Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rpc: add limit for batch request and response size #26681

Merged
merged 32 commits into from
Jun 13, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
7f3d7e6
limit the number of batch requests to 100
mmsqe Feb 14, 2023
667a408
limit the size of the response packet to 10MB
mmsqe Feb 14, 2023
21aec8f
add batch limit related config
mmsqe Feb 14, 2023
6b8b39d
update doc
mmsqe Feb 14, 2023
b6993c4
Merge branch 'master' into add-rpc-limit
mmsqe Feb 14, 2023
c4ac65c
apply limit for server & client
mmsqe Feb 14, 2023
c9015fa
make batch related limit configurable
mmsqe Feb 15, 2023
6d2ce24
Merge branch 'master' into add-rpc-limit
mmsqe Feb 15, 2023
2c04aa0
add SetBatchLimits for client with default limit
mmsqe Feb 16, 2023
22bc552
Merge branch 'master' into add-rpc-limit
mmsqe Feb 16, 2023
a43fda5
rename namespace
mmsqe Feb 16, 2023
754137c
Merge branch 'master' into add-rpc-limit
mmsqe Feb 16, 2023
d7c8673
allow set limit with dial after client get init
mmsqe Feb 16, 2023
7fd2b77
set limit when init client
mmsqe Feb 16, 2023
733910c
rpc: configure client batch limits through options
fjl Feb 17, 2023
bae5a2f
node: refactor passing around rpc config
fjl Feb 17, 2023
6c6b8b1
rpc: increase default batch limits
fjl Feb 17, 2023
cebe226
rpc: simplify sending error response
fjl Feb 17, 2023
333dffb
rpc: rename variable
fjl Feb 17, 2023
b91f08a
rpc: add test for batch size limit
fjl Feb 17, 2023
fdf1b20
handle msg id for batch too large
mmsqe Feb 18, 2023
127079b
test batch request limit for non-call
mmsqe Mar 3, 2023
e82658a
rm non-call test
mmsqe Mar 6, 2023
bd5dfa6
Merge branch 'master' into add-rpc-limit
holiman May 31, 2023
47557d1
cmd/utils: fix docs on flags
holiman May 31, 2023
8e6018f
rpc: minor refactor of tests
holiman May 31, 2023
acf5730
rpc: improve client batch response handling
fjl Jun 8, 2023
82b5208
rpc: attach "batch too large" error to the first call
fjl Jun 8, 2023
f0688d6
rpc: remove default limits
fjl Jun 8, 2023
cd73291
rpc: remove added blank lines in invalid-batch.js
fjl Jun 8, 2023
7048bfc
rpc: remove special error handling for HTTP batch response length
fjl Jun 8, 2023
6841858
rpc: rename error
fjl Jun 9, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion rpc/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,14 @@ const (
errcodeDefault = -32000
errcodeNotificationsUnsupported = -32001
errcodeTimeout = -32002
errcodeResponseTooLarge = -32003
holiman marked this conversation as resolved.
Show resolved Hide resolved
errcodePanic = -32603
errcodeMarshalError = -32603
)

const (
errMsgTimeout = "request timed out"
errMsgTimeout = "request timed out"
errMsgResponseTooLarge = "response too large"
)

type methodNotFoundError struct{ method string }
Expand Down
81 changes: 60 additions & 21 deletions rpc/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ import (
"github.com/ethereum/go-ethereum/log"
)

const (
BatchRequestLimit = 100 // Maximum number of requests in a batch
BatchResponseMaxSize = 10 * 1000 * 1000 // Maximum number of bytes returned from calls (10MB)
)

// handler handles JSON-RPC messages. There is one handler per connection. Note that
// handler is not safe for concurrent use. Message handling never blocks indefinitely
// because RPCs are processed on background goroutines launched by handler.
Expand All @@ -49,17 +54,19 @@ import (
// h.removeRequestOp(op) // timeout, etc.
// }
type handler struct {
reg *serviceRegistry
unsubscribeCb *callback
idgen func() ID // subscription ID generator
respWait map[string]*requestOp // active client requests
clientSubs map[string]*ClientSubscription // active client subscriptions
callWG sync.WaitGroup // pending call goroutines
rootCtx context.Context // canceled by close()
cancelRoot func() // cancel function for rootCtx
conn jsonWriter // where responses will be sent
log log.Logger
allowSubscribe bool
reg *serviceRegistry
unsubscribeCb *callback
idgen func() ID // subscription ID generator
respWait map[string]*requestOp // active client requests
clientSubs map[string]*ClientSubscription // active client subscriptions
callWG sync.WaitGroup // pending call goroutines
rootCtx context.Context // canceled by close()
cancelRoot func() // cancel function for rootCtx
conn jsonWriter // where responses will be sent
log log.Logger
allowSubscribe bool
batchRequestLimit int
batchResponseMaxSize int

subLock sync.Mutex
serverSubs map[ID]*Subscription
Expand All @@ -73,16 +80,18 @@ type callProc struct {
func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler {
rootCtx, cancelRoot := context.WithCancel(connCtx)
h := &handler{
reg: reg,
idgen: idgen,
conn: conn,
respWait: make(map[string]*requestOp),
clientSubs: make(map[string]*ClientSubscription),
rootCtx: rootCtx,
cancelRoot: cancelRoot,
allowSubscribe: true,
serverSubs: make(map[ID]*Subscription),
log: log.Root(),
reg: reg,
idgen: idgen,
conn: conn,
respWait: make(map[string]*requestOp),
clientSubs: make(map[string]*ClientSubscription),
rootCtx: rootCtx,
cancelRoot: cancelRoot,
allowSubscribe: true,
serverSubs: make(map[ID]*Subscription),
log: log.Root(),
batchRequestLimit: BatchRequestLimit,
batchResponseMaxSize: BatchResponseMaxSize,
}
if conn.remoteAddr() != "" {
h.log = h.log.New("conn", conn.remoteAddr())
Expand Down Expand Up @@ -149,6 +158,21 @@ func (b *batchCallBuffer) timeout(ctx context.Context, conn jsonWriter) {
b.doWrite(ctx, conn, true)
}

// responseTooLarge sends the responses added so far. For the remaining unanswered call
// messages, it sends a response too large error response.
func (b *batchCallBuffer) responseTooLarge(ctx context.Context, conn jsonWriter) {
b.mutex.Lock()
defer b.mutex.Unlock()

for _, msg := range b.calls {
if !msg.isNotification() {
resp := msg.errorResponse(&internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge})
b.resp = append(b.resp, resp)
}
}
b.doWrite(ctx, conn, true)
}

// doWrite actually writes the response.
// This assumes b.mutex is held.
func (b *batchCallBuffer) doWrite(ctx context.Context, conn jsonWriter, isErrorResponse bool) {
Expand All @@ -172,6 +196,14 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
return
}

if len(msgs) > h.batchRequestLimit && h.batchRequestLimit != 0 {
h.startCallProc(func(cp *callProc) {
resp := errorMessage(&invalidRequestError{"batch too large"})
h.conn.writeJSON(cp.ctx, resp, true)
})
return
}

// Handle non-call messages first:
calls := make([]*jsonrpcMessage, 0, len(msgs))
for _, msg := range msgs {
Expand Down Expand Up @@ -203,6 +235,7 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
})
}

resBytes := 0
for {
// No need to handle rest of calls if timed out.
if cp.ctx.Err() != nil {
Expand All @@ -214,6 +247,12 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
}
resp := h.handleCallMsg(cp, msg)
callBuffer.pushResponse(resp)
if resp != nil && h.batchResponseMaxSize != 0 {
if resBytes += len(resp.Result); resBytes > h.batchResponseMaxSize {
callBuffer.responseTooLarge(cp.ctx, h.conn)
break
}
}
}
if timer != nil {
timer.Stop()
Expand Down