From 9e3b2803c46c37061ee943e617deac38206290eb Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 15:36:36 -0500 Subject: [PATCH 01/80] Initial query limit overriding --- src/dbnode/generated/thrift/rpc.thrift | 11 + src/dbnode/generated/thrift/rpc/rpc.go | 897 ++++++++++++++++-- src/dbnode/generated/thrift/rpc/rpc_mock.go | 654 ------------- src/dbnode/generated/thrift/rpc/tchan-rpc.go | 52 +- .../server/tchannelthrift/node/service.go | 19 + src/dbnode/storage/limits/query_limits.go | 45 +- src/dbnode/storage/limits/types.go | 2 + 7 files changed, 910 insertions(+), 770 deletions(-) delete mode 100644 src/dbnode/generated/thrift/rpc/rpc_mock.go diff --git a/src/dbnode/generated/thrift/rpc.thrift b/src/dbnode/generated/thrift/rpc.thrift index e83cc7335f..4132fb210e 100644 --- a/src/dbnode/generated/thrift/rpc.thrift +++ b/src/dbnode/generated/thrift/rpc.thrift @@ -85,6 +85,7 @@ service Node { NodeWriteNewSeriesBackoffDurationResult setWriteNewSeriesBackoffDuration(1: NodeSetWriteNewSeriesBackoffDurationRequest req) throws (1: Error err) NodeWriteNewSeriesLimitPerShardPerSecondResult getWriteNewSeriesLimitPerShardPerSecond() throws (1: Error err) NodeWriteNewSeriesLimitPerShardPerSecondResult setWriteNewSeriesLimitPerShardPerSecond(1: NodeSetWriteNewSeriesLimitPerShardPerSecondRequest req) throws (1: Error err) + NodeQueryLimitOverridesResult setQueryLimitOverrides(1: NodeSetQueryLimitOverridesRequest req) throws (1: Error err) // Debug endpoints DebugProfileStartResult debugProfileStart(1: DebugProfileStartRequest req) throws (1: Error err) @@ -367,6 +368,16 @@ struct NodeSetWriteNewSeriesLimitPerShardPerSecondRequest { 1: required i64 writeNewSeriesLimitPerShardPerSecond } +struct NodeQueryLimitOverridesResult { + 1: optional i64 docsLimitOverride + 2: optional i64 bytesReadLimitOverride +} + +struct NodeSetQueryLimitOverridesRequest { + 1: optional i64 docsLimitOverride + 2: optional i64 bytesReadLimitOverride +} + service Cluster { HealthResult health() throws (1: Error err) void write(1: WriteRequest req) throws (1: Error err) diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go index 678aa3b183..680dd5c9dc 100644 --- a/src/dbnode/generated/thrift/rpc/rpc.go +++ b/src/dbnode/generated/thrift/rpc/rpc.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -9466,6 +9466,306 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) String() string { return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondRequest(%+v)", *p) } +// Attributes: +// - DocsLimitOverride +// - BytesReadLimitOverride +type NodeQueryLimitOverridesResult_ struct { + DocsLimitOverride *int64 `thrift:"docsLimitOverride,1" db:"docsLimitOverride" json:"docsLimitOverride,omitempty"` + BytesReadLimitOverride *int64 `thrift:"bytesReadLimitOverride,2" db:"bytesReadLimitOverride" json:"bytesReadLimitOverride,omitempty"` +} + +func NewNodeQueryLimitOverridesResult_() *NodeQueryLimitOverridesResult_ { + return &NodeQueryLimitOverridesResult_{} +} + +var NodeQueryLimitOverridesResult__DocsLimitOverride_DEFAULT int64 + +func (p *NodeQueryLimitOverridesResult_) GetDocsLimitOverride() int64 { + if !p.IsSetDocsLimitOverride() { + return NodeQueryLimitOverridesResult__DocsLimitOverride_DEFAULT + } + return *p.DocsLimitOverride +} + +var NodeQueryLimitOverridesResult__BytesReadLimitOverride_DEFAULT int64 + +func (p *NodeQueryLimitOverridesResult_) GetBytesReadLimitOverride() int64 { + if !p.IsSetBytesReadLimitOverride() { + return NodeQueryLimitOverridesResult__BytesReadLimitOverride_DEFAULT + } + return *p.BytesReadLimitOverride +} +func (p *NodeQueryLimitOverridesResult_) IsSetDocsLimitOverride() bool { + return p.DocsLimitOverride != nil +} + +func (p *NodeQueryLimitOverridesResult_) IsSetBytesReadLimitOverride() bool { + return p.BytesReadLimitOverride != nil +} + +func (p *NodeQueryLimitOverridesResult_) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.ReadField1(iprot); err != nil { + return err + } + case 2: + if err := p.ReadField2(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NodeQueryLimitOverridesResult_) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DocsLimitOverride = &v + } + return nil +} + +func (p *NodeQueryLimitOverridesResult_) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BytesReadLimitOverride = &v + } + return nil +} + +func (p *NodeQueryLimitOverridesResult_) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("NodeQueryLimitOverridesResult"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NodeQueryLimitOverridesResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDocsLimitOverride() { + if err := oprot.WriteFieldBegin("docsLimitOverride", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:docsLimitOverride: ", p), err) + } + if err := oprot.WriteI64(int64(*p.DocsLimitOverride)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.docsLimitOverride (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:docsLimitOverride: ", p), err) + } + } + return err +} + +func (p *NodeQueryLimitOverridesResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBytesReadLimitOverride() { + if err := oprot.WriteFieldBegin("bytesReadLimitOverride", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:bytesReadLimitOverride: ", p), err) + } + if err := oprot.WriteI64(int64(*p.BytesReadLimitOverride)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bytesReadLimitOverride (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:bytesReadLimitOverride: ", p), err) + } + } + return err +} + +func (p *NodeQueryLimitOverridesResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NodeQueryLimitOverridesResult_(%+v)", *p) +} + +// Attributes: +// - DocsLimitOverride +// - BytesReadLimitOverride +type NodeSetQueryLimitOverridesRequest struct { + DocsLimitOverride *int64 `thrift:"docsLimitOverride,1" db:"docsLimitOverride" json:"docsLimitOverride,omitempty"` + BytesReadLimitOverride *int64 `thrift:"bytesReadLimitOverride,2" db:"bytesReadLimitOverride" json:"bytesReadLimitOverride,omitempty"` +} + +func NewNodeSetQueryLimitOverridesRequest() *NodeSetQueryLimitOverridesRequest { + return &NodeSetQueryLimitOverridesRequest{} +} + +var NodeSetQueryLimitOverridesRequest_DocsLimitOverride_DEFAULT int64 + +func (p *NodeSetQueryLimitOverridesRequest) GetDocsLimitOverride() int64 { + if !p.IsSetDocsLimitOverride() { + return NodeSetQueryLimitOverridesRequest_DocsLimitOverride_DEFAULT + } + return *p.DocsLimitOverride +} + +var NodeSetQueryLimitOverridesRequest_BytesReadLimitOverride_DEFAULT int64 + +func (p *NodeSetQueryLimitOverridesRequest) GetBytesReadLimitOverride() int64 { + if !p.IsSetBytesReadLimitOverride() { + return NodeSetQueryLimitOverridesRequest_BytesReadLimitOverride_DEFAULT + } + return *p.BytesReadLimitOverride +} +func (p *NodeSetQueryLimitOverridesRequest) IsSetDocsLimitOverride() bool { + return p.DocsLimitOverride != nil +} + +func (p *NodeSetQueryLimitOverridesRequest) IsSetBytesReadLimitOverride() bool { + return p.BytesReadLimitOverride != nil +} + +func (p *NodeSetQueryLimitOverridesRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.ReadField1(iprot); err != nil { + return err + } + case 2: + if err := p.ReadField2(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DocsLimitOverride = &v + } + return nil +} + +func (p *NodeSetQueryLimitOverridesRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BytesReadLimitOverride = &v + } + return nil +} + +func (p *NodeSetQueryLimitOverridesRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("NodeSetQueryLimitOverridesRequest"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDocsLimitOverride() { + if err := oprot.WriteFieldBegin("docsLimitOverride", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:docsLimitOverride: ", p), err) + } + if err := oprot.WriteI64(int64(*p.DocsLimitOverride)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.docsLimitOverride (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:docsLimitOverride: ", p), err) + } + } + return err +} + +func (p *NodeSetQueryLimitOverridesRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBytesReadLimitOverride() { + if err := oprot.WriteFieldBegin("bytesReadLimitOverride", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:bytesReadLimitOverride: ", p), err) + } + if err := oprot.WriteI64(int64(*p.BytesReadLimitOverride)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bytesReadLimitOverride (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:bytesReadLimitOverride: ", p), err) + } + } + return err +} + +func (p *NodeSetQueryLimitOverridesRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NodeSetQueryLimitOverridesRequest(%+v)", *p) +} + // Attributes: // - Ok // - Status @@ -14609,6 +14909,9 @@ type Node interface { SetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) // Parameters: // - Req + SetQueryLimitOverrides(req *NodeSetQueryLimitOverridesRequest) (r *NodeQueryLimitOverridesResult_, err error) + // Parameters: + // - Req DebugProfileStart(req *DebugProfileStartRequest) (r *DebugProfileStartResult_, err error) // Parameters: // - Req @@ -16954,6 +17257,87 @@ func (p *NodeClient) recvSetWriteNewSeriesLimitPerShardPerSecond() (value *NodeW return } +// Parameters: +// - Req +func (p *NodeClient) SetQueryLimitOverrides(req *NodeSetQueryLimitOverridesRequest) (r *NodeQueryLimitOverridesResult_, err error) { + if err = p.sendSetQueryLimitOverrides(req); err != nil { + return + } + return p.recvSetQueryLimitOverrides() +} + +func (p *NodeClient) sendSetQueryLimitOverrides(req *NodeSetQueryLimitOverridesRequest) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.CALL, p.SeqId); err != nil { + return + } + args := NodeSetQueryLimitOverridesArgs{ + Req: req, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +func (p *NodeClient) recvSetQueryLimitOverrides() (value *NodeQueryLimitOverridesResult_, err error) { + iprot := p.InputProtocol + if iprot == nil { + iprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.InputProtocol = iprot + } + method, mTypeId, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return + } + if method != "setQueryLimitOverrides" { + err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setQueryLimitOverrides failed: wrong method name") + return + } + if p.SeqId != seqId { + err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setQueryLimitOverrides failed: out of sequence response") + return + } + if mTypeId == thrift.EXCEPTION { + error93 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error94 error + error94, err = error93.Read(iprot) + if err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + err = error94 + return + } + if mTypeId != thrift.REPLY { + err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setQueryLimitOverrides failed: invalid message type") + return + } + result := NodeSetQueryLimitOverridesResult{} + if err = result.Read(iprot); err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + if result.Err != nil { + err = result.Err + return + } + value = result.GetSuccess() + return +} + // Parameters: // - Req func (p *NodeClient) DebugProfileStart(req *DebugProfileStartRequest) (r *DebugProfileStartResult_, err error) { @@ -17004,16 +17388,16 @@ func (p *NodeClient) recvDebugProfileStart() (value *DebugProfileStartResult_, e return } if mTypeId == thrift.EXCEPTION { - error93 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error94 error - error94, err = error93.Read(iprot) + error95 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error96 error + error96, err = error95.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error94 + err = error96 return } if mTypeId != thrift.REPLY { @@ -17085,16 +17469,16 @@ func (p *NodeClient) recvDebugProfileStop() (value *DebugProfileStopResult_, err return } if mTypeId == thrift.EXCEPTION { - error95 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error96 error - error96, err = error95.Read(iprot) + error97 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error98 error + error98, err = error97.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error96 + err = error98 return } if mTypeId != thrift.REPLY { @@ -17166,16 +17550,16 @@ func (p *NodeClient) recvDebugIndexMemorySegments() (value *DebugIndexMemorySegm return } if mTypeId == thrift.EXCEPTION { - error97 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error98 error - error98, err = error97.Read(iprot) + error99 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error100 error + error100, err = error99.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error98 + err = error100 return } if mTypeId != thrift.REPLY { @@ -17217,40 +17601,41 @@ func (p *NodeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { func NewNodeProcessor(handler Node) *NodeProcessor { - self99 := &NodeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self99.processorMap["query"] = &nodeProcessorQuery{handler: handler} - self99.processorMap["aggregate"] = &nodeProcessorAggregate{handler: handler} - self99.processorMap["fetch"] = &nodeProcessorFetch{handler: handler} - self99.processorMap["write"] = &nodeProcessorWrite{handler: handler} - self99.processorMap["writeTagged"] = &nodeProcessorWriteTagged{handler: handler} - self99.processorMap["aggregateRaw"] = &nodeProcessorAggregateRaw{handler: handler} - self99.processorMap["fetchBatchRaw"] = &nodeProcessorFetchBatchRaw{handler: handler} - self99.processorMap["fetchBatchRawV2"] = &nodeProcessorFetchBatchRawV2{handler: handler} - self99.processorMap["fetchBlocksRaw"] = &nodeProcessorFetchBlocksRaw{handler: handler} - self99.processorMap["fetchTagged"] = &nodeProcessorFetchTagged{handler: handler} - self99.processorMap["fetchBlocksMetadataRawV2"] = &nodeProcessorFetchBlocksMetadataRawV2{handler: handler} - self99.processorMap["writeBatchRaw"] = &nodeProcessorWriteBatchRaw{handler: handler} - self99.processorMap["writeBatchRawV2"] = &nodeProcessorWriteBatchRawV2{handler: handler} - self99.processorMap["writeTaggedBatchRaw"] = &nodeProcessorWriteTaggedBatchRaw{handler: handler} - self99.processorMap["writeTaggedBatchRawV2"] = &nodeProcessorWriteTaggedBatchRawV2{handler: handler} - self99.processorMap["repair"] = &nodeProcessorRepair{handler: handler} - self99.processorMap["truncate"] = &nodeProcessorTruncate{handler: handler} - self99.processorMap["aggregateTiles"] = &nodeProcessorAggregateTiles{handler: handler} - self99.processorMap["health"] = &nodeProcessorHealth{handler: handler} - self99.processorMap["bootstrapped"] = &nodeProcessorBootstrapped{handler: handler} - self99.processorMap["bootstrappedInPlacementOrNoPlacement"] = &nodeProcessorBootstrappedInPlacementOrNoPlacement{handler: handler} - self99.processorMap["getPersistRateLimit"] = &nodeProcessorGetPersistRateLimit{handler: handler} - self99.processorMap["setPersistRateLimit"] = &nodeProcessorSetPersistRateLimit{handler: handler} - self99.processorMap["getWriteNewSeriesAsync"] = &nodeProcessorGetWriteNewSeriesAsync{handler: handler} - self99.processorMap["setWriteNewSeriesAsync"] = &nodeProcessorSetWriteNewSeriesAsync{handler: handler} - self99.processorMap["getWriteNewSeriesBackoffDuration"] = &nodeProcessorGetWriteNewSeriesBackoffDuration{handler: handler} - self99.processorMap["setWriteNewSeriesBackoffDuration"] = &nodeProcessorSetWriteNewSeriesBackoffDuration{handler: handler} - self99.processorMap["getWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond{handler: handler} - self99.processorMap["setWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond{handler: handler} - self99.processorMap["debugProfileStart"] = &nodeProcessorDebugProfileStart{handler: handler} - self99.processorMap["debugProfileStop"] = &nodeProcessorDebugProfileStop{handler: handler} - self99.processorMap["debugIndexMemorySegments"] = &nodeProcessorDebugIndexMemorySegments{handler: handler} - return self99 + self101 := &NodeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self101.processorMap["query"] = &nodeProcessorQuery{handler: handler} + self101.processorMap["aggregate"] = &nodeProcessorAggregate{handler: handler} + self101.processorMap["fetch"] = &nodeProcessorFetch{handler: handler} + self101.processorMap["write"] = &nodeProcessorWrite{handler: handler} + self101.processorMap["writeTagged"] = &nodeProcessorWriteTagged{handler: handler} + self101.processorMap["aggregateRaw"] = &nodeProcessorAggregateRaw{handler: handler} + self101.processorMap["fetchBatchRaw"] = &nodeProcessorFetchBatchRaw{handler: handler} + self101.processorMap["fetchBatchRawV2"] = &nodeProcessorFetchBatchRawV2{handler: handler} + self101.processorMap["fetchBlocksRaw"] = &nodeProcessorFetchBlocksRaw{handler: handler} + self101.processorMap["fetchTagged"] = &nodeProcessorFetchTagged{handler: handler} + self101.processorMap["fetchBlocksMetadataRawV2"] = &nodeProcessorFetchBlocksMetadataRawV2{handler: handler} + self101.processorMap["writeBatchRaw"] = &nodeProcessorWriteBatchRaw{handler: handler} + self101.processorMap["writeBatchRawV2"] = &nodeProcessorWriteBatchRawV2{handler: handler} + self101.processorMap["writeTaggedBatchRaw"] = &nodeProcessorWriteTaggedBatchRaw{handler: handler} + self101.processorMap["writeTaggedBatchRawV2"] = &nodeProcessorWriteTaggedBatchRawV2{handler: handler} + self101.processorMap["repair"] = &nodeProcessorRepair{handler: handler} + self101.processorMap["truncate"] = &nodeProcessorTruncate{handler: handler} + self101.processorMap["aggregateTiles"] = &nodeProcessorAggregateTiles{handler: handler} + self101.processorMap["health"] = &nodeProcessorHealth{handler: handler} + self101.processorMap["bootstrapped"] = &nodeProcessorBootstrapped{handler: handler} + self101.processorMap["bootstrappedInPlacementOrNoPlacement"] = &nodeProcessorBootstrappedInPlacementOrNoPlacement{handler: handler} + self101.processorMap["getPersistRateLimit"] = &nodeProcessorGetPersistRateLimit{handler: handler} + self101.processorMap["setPersistRateLimit"] = &nodeProcessorSetPersistRateLimit{handler: handler} + self101.processorMap["getWriteNewSeriesAsync"] = &nodeProcessorGetWriteNewSeriesAsync{handler: handler} + self101.processorMap["setWriteNewSeriesAsync"] = &nodeProcessorSetWriteNewSeriesAsync{handler: handler} + self101.processorMap["getWriteNewSeriesBackoffDuration"] = &nodeProcessorGetWriteNewSeriesBackoffDuration{handler: handler} + self101.processorMap["setWriteNewSeriesBackoffDuration"] = &nodeProcessorSetWriteNewSeriesBackoffDuration{handler: handler} + self101.processorMap["getWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond{handler: handler} + self101.processorMap["setWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond{handler: handler} + self101.processorMap["setQueryLimitOverrides"] = &nodeProcessorSetQueryLimitOverrides{handler: handler} + self101.processorMap["debugProfileStart"] = &nodeProcessorDebugProfileStart{handler: handler} + self101.processorMap["debugProfileStop"] = &nodeProcessorDebugProfileStop{handler: handler} + self101.processorMap["debugIndexMemorySegments"] = &nodeProcessorDebugIndexMemorySegments{handler: handler} + return self101 } func (p *NodeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { @@ -17263,12 +17648,12 @@ func (p *NodeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, er } iprot.Skip(thrift.STRUCT) iprot.ReadMessageEnd() - x100 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + x102 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x100.Write(oprot) + x102.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() - return false, x100 + return false, x102 } @@ -18702,13 +19087,66 @@ func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int result := NodeGetWriteNewSeriesLimitPerShardPerSecondResult{} var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_ var err2 error - if retval, err2 = p.handler.GetWriteNewSeriesLimitPerShardPerSecond(); err2 != nil { + if retval, err2 = p.handler.GetWriteNewSeriesLimitPerShardPerSecond(); err2 != nil { + switch v := err2.(type) { + case *Error: + result.Err = v + default: + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesLimitPerShardPerSecond: "+err2.Error()) + oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return true, err2 + } + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond struct { + handler Node +} + +func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, err + } + + iprot.ReadMessageEnd() + result := NodeSetWriteNewSeriesLimitPerShardPerSecondResult{} + var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_ + var err2 error + if retval, err2 = p.handler.SetWriteNewSeriesLimitPerShardPerSecond(args.Req); err2 != nil { switch v := err2.(type) { case *Error: result.Err = v default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesLimitPerShardPerSecond: "+err2.Error()) - oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId) + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesLimitPerShardPerSecond: "+err2.Error()) + oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId) x.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() @@ -18717,7 +19155,7 @@ func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int } else { result.Success = retval } - if err2 = oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil { + if err2 = oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil { err = err2 } if err2 = result.Write(oprot); err == nil && err2 != nil { @@ -18735,16 +19173,16 @@ func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int return true, err } -type nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond struct { +type nodeProcessorSetQueryLimitOverrides struct { handler Node } -func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{} +func (p *nodeProcessorSetQueryLimitOverrides) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := NodeSetQueryLimitOverridesArgs{} if err = args.Read(iprot); err != nil { iprot.ReadMessageEnd() x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId) + oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.EXCEPTION, seqId) x.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() @@ -18752,16 +19190,16 @@ func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int } iprot.ReadMessageEnd() - result := NodeSetWriteNewSeriesLimitPerShardPerSecondResult{} - var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_ + result := NodeSetQueryLimitOverridesResult{} + var retval *NodeQueryLimitOverridesResult_ var err2 error - if retval, err2 = p.handler.SetWriteNewSeriesLimitPerShardPerSecond(args.Req); err2 != nil { + if retval, err2 = p.handler.SetQueryLimitOverrides(args.Req); err2 != nil { switch v := err2.(type) { case *Error: result.Err = v default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesLimitPerShardPerSecond: "+err2.Error()) - oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId) + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setQueryLimitOverrides: "+err2.Error()) + oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.EXCEPTION, seqId) x.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() @@ -18770,7 +19208,7 @@ func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int } else { result.Success = retval } - if err2 = oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil { + if err2 = oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.REPLY, seqId); err2 != nil { err = err2 } if err2 = result.Write(oprot); err == nil && err2 != nil { @@ -25563,6 +26001,257 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) String() string { return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondResult(%+v)", *p) } +// Attributes: +// - Req +type NodeSetQueryLimitOverridesArgs struct { + Req *NodeSetQueryLimitOverridesRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewNodeSetQueryLimitOverridesArgs() *NodeSetQueryLimitOverridesArgs { + return &NodeSetQueryLimitOverridesArgs{} +} + +var NodeSetQueryLimitOverridesArgs_Req_DEFAULT *NodeSetQueryLimitOverridesRequest + +func (p *NodeSetQueryLimitOverridesArgs) GetReq() *NodeSetQueryLimitOverridesRequest { + if !p.IsSetReq() { + return NodeSetQueryLimitOverridesArgs_Req_DEFAULT + } + return p.Req +} +func (p *NodeSetQueryLimitOverridesArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *NodeSetQueryLimitOverridesArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.ReadField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &NodeSetQueryLimitOverridesRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("setQueryLimitOverrides_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *NodeSetQueryLimitOverridesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NodeSetQueryLimitOverridesArgs(%+v)", *p) +} + +// Attributes: +// - Success +// - Err +type NodeSetQueryLimitOverridesResult struct { + Success *NodeQueryLimitOverridesResult_ `thrift:"success,0" db:"success" json:"success,omitempty"` + Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"` +} + +func NewNodeSetQueryLimitOverridesResult() *NodeSetQueryLimitOverridesResult { + return &NodeSetQueryLimitOverridesResult{} +} + +var NodeSetQueryLimitOverridesResult_Success_DEFAULT *NodeQueryLimitOverridesResult_ + +func (p *NodeSetQueryLimitOverridesResult) GetSuccess() *NodeQueryLimitOverridesResult_ { + if !p.IsSetSuccess() { + return NodeSetQueryLimitOverridesResult_Success_DEFAULT + } + return p.Success +} + +var NodeSetQueryLimitOverridesResult_Err_DEFAULT *Error + +func (p *NodeSetQueryLimitOverridesResult) GetErr() *Error { + if !p.IsSetErr() { + return NodeSetQueryLimitOverridesResult_Err_DEFAULT + } + return p.Err +} +func (p *NodeSetQueryLimitOverridesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *NodeSetQueryLimitOverridesResult) IsSetErr() bool { + return p.Err != nil +} + +func (p *NodeSetQueryLimitOverridesResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if err := p.ReadField0(iprot); err != nil { + return err + } + case 1: + if err := p.ReadField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &NodeQueryLimitOverridesResult_{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesResult) ReadField1(iprot thrift.TProtocol) error { + p.Err = &Error{ + Type: 0, + } + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("setQueryLimitOverrides_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NodeSetQueryLimitOverridesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *NodeSetQueryLimitOverridesResult) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetErr() { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + } + return err +} + +func (p *NodeSetQueryLimitOverridesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NodeSetQueryLimitOverridesResult(%+v)", *p) +} + // Attributes: // - Req type NodeDebugProfileStartArgs struct { @@ -26410,16 +27099,16 @@ func (p *ClusterClient) recvHealth() (value *HealthResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error245 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error246 error - error246, err = error245.Read(iprot) + error253 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error254 error + error254, err = error253.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error246 + err = error254 return } if mTypeId != thrift.REPLY { @@ -26491,16 +27180,16 @@ func (p *ClusterClient) recvWrite() (err error) { return } if mTypeId == thrift.EXCEPTION { - error247 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error248 error - error248, err = error247.Read(iprot) + error255 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error256 error + error256, err = error255.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error248 + err = error256 return } if mTypeId != thrift.REPLY { @@ -26571,16 +27260,16 @@ func (p *ClusterClient) recvWriteTagged() (err error) { return } if mTypeId == thrift.EXCEPTION { - error249 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error250 error - error250, err = error249.Read(iprot) + error257 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error258 error + error258, err = error257.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error250 + err = error258 return } if mTypeId != thrift.REPLY { @@ -26651,16 +27340,16 @@ func (p *ClusterClient) recvQuery() (value *QueryResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error251 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error252 error - error252, err = error251.Read(iprot) + error259 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error260 error + error260, err = error259.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error252 + err = error260 return } if mTypeId != thrift.REPLY { @@ -26732,16 +27421,16 @@ func (p *ClusterClient) recvAggregate() (value *AggregateQueryResult_, err error return } if mTypeId == thrift.EXCEPTION { - error253 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error254 error - error254, err = error253.Read(iprot) + error261 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error262 error + error262, err = error261.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error254 + err = error262 return } if mTypeId != thrift.REPLY { @@ -26813,16 +27502,16 @@ func (p *ClusterClient) recvFetch() (value *FetchResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error255 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error256 error - error256, err = error255.Read(iprot) + error263 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error264 error + error264, err = error263.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error256 + err = error264 return } if mTypeId != thrift.REPLY { @@ -26894,16 +27583,16 @@ func (p *ClusterClient) recvTruncate() (value *TruncateResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error257 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error258 error - error258, err = error257.Read(iprot) + error265 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error266 error + error266, err = error265.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error258 + err = error266 return } if mTypeId != thrift.REPLY { @@ -26945,15 +27634,15 @@ func (p *ClusterProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { func NewClusterProcessor(handler Cluster) *ClusterProcessor { - self259 := &ClusterProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self259.processorMap["health"] = &clusterProcessorHealth{handler: handler} - self259.processorMap["write"] = &clusterProcessorWrite{handler: handler} - self259.processorMap["writeTagged"] = &clusterProcessorWriteTagged{handler: handler} - self259.processorMap["query"] = &clusterProcessorQuery{handler: handler} - self259.processorMap["aggregate"] = &clusterProcessorAggregate{handler: handler} - self259.processorMap["fetch"] = &clusterProcessorFetch{handler: handler} - self259.processorMap["truncate"] = &clusterProcessorTruncate{handler: handler} - return self259 + self267 := &ClusterProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self267.processorMap["health"] = &clusterProcessorHealth{handler: handler} + self267.processorMap["write"] = &clusterProcessorWrite{handler: handler} + self267.processorMap["writeTagged"] = &clusterProcessorWriteTagged{handler: handler} + self267.processorMap["query"] = &clusterProcessorQuery{handler: handler} + self267.processorMap["aggregate"] = &clusterProcessorAggregate{handler: handler} + self267.processorMap["fetch"] = &clusterProcessorFetch{handler: handler} + self267.processorMap["truncate"] = &clusterProcessorTruncate{handler: handler} + return self267 } func (p *ClusterProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { @@ -26966,12 +27655,12 @@ func (p *ClusterProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, } iprot.Skip(thrift.STRUCT) iprot.ReadMessageEnd() - x260 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + x268 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x260.Write(oprot) + x268.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() - return false, x260 + return false, x268 } diff --git a/src/dbnode/generated/thrift/rpc/rpc_mock.go b/src/dbnode/generated/thrift/rpc/rpc_mock.go deleted file mode 100644 index a7be5ca909..0000000000 --- a/src/dbnode/generated/thrift/rpc/rpc_mock.go +++ /dev/null @@ -1,654 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/generated/thrift/rpc/tchan-go - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package rpc is a generated GoMock package. -package rpc - -import ( - "reflect" - - "github.com/golang/mock/gomock" - "github.com/uber/tchannel-go/thrift" -) - -// MockTChanCluster is a mock of TChanCluster interface -type MockTChanCluster struct { - ctrl *gomock.Controller - recorder *MockTChanClusterMockRecorder -} - -// MockTChanClusterMockRecorder is the mock recorder for MockTChanCluster -type MockTChanClusterMockRecorder struct { - mock *MockTChanCluster -} - -// NewMockTChanCluster creates a new mock instance -func NewMockTChanCluster(ctrl *gomock.Controller) *MockTChanCluster { - mock := &MockTChanCluster{ctrl: ctrl} - mock.recorder = &MockTChanClusterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTChanCluster) EXPECT() *MockTChanClusterMockRecorder { - return m.recorder -} - -// Aggregate mocks base method -func (m *MockTChanCluster) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Aggregate", ctx, req) - ret0, _ := ret[0].(*AggregateQueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Aggregate indicates an expected call of Aggregate -func (mr *MockTChanClusterMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanCluster)(nil).Aggregate), ctx, req) -} - -// Fetch mocks base method -func (m *MockTChanCluster) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Fetch", ctx, req) - ret0, _ := ret[0].(*FetchResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Fetch indicates an expected call of Fetch -func (mr *MockTChanClusterMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanCluster)(nil).Fetch), ctx, req) -} - -// Health mocks base method -func (m *MockTChanCluster) Health(ctx thrift.Context) (*HealthResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Health", ctx) - ret0, _ := ret[0].(*HealthResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Health indicates an expected call of Health -func (mr *MockTChanClusterMockRecorder) Health(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanCluster)(nil).Health), ctx) -} - -// Query mocks base method -func (m *MockTChanCluster) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Query", ctx, req) - ret0, _ := ret[0].(*QueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Query indicates an expected call of Query -func (mr *MockTChanClusterMockRecorder) Query(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanCluster)(nil).Query), ctx, req) -} - -// Truncate mocks base method -func (m *MockTChanCluster) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Truncate", ctx, req) - ret0, _ := ret[0].(*TruncateResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Truncate indicates an expected call of Truncate -func (mr *MockTChanClusterMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanCluster)(nil).Truncate), ctx, req) -} - -// Write mocks base method -func (m *MockTChanCluster) Write(ctx thrift.Context, req *WriteRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// Write indicates an expected call of Write -func (mr *MockTChanClusterMockRecorder) Write(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanCluster)(nil).Write), ctx, req) -} - -// WriteTagged mocks base method -func (m *MockTChanCluster) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTagged", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTagged indicates an expected call of WriteTagged -func (mr *MockTChanClusterMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanCluster)(nil).WriteTagged), ctx, req) -} - -// MockTChanNode is a mock of TChanNode interface -type MockTChanNode struct { - ctrl *gomock.Controller - recorder *MockTChanNodeMockRecorder -} - -// MockTChanNodeMockRecorder is the mock recorder for MockTChanNode -type MockTChanNodeMockRecorder struct { - mock *MockTChanNode -} - -// NewMockTChanNode creates a new mock instance -func NewMockTChanNode(ctrl *gomock.Controller) *MockTChanNode { - mock := &MockTChanNode{ctrl: ctrl} - mock.recorder = &MockTChanNodeMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTChanNode) EXPECT() *MockTChanNodeMockRecorder { - return m.recorder -} - -// Aggregate mocks base method -func (m *MockTChanNode) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Aggregate", ctx, req) - ret0, _ := ret[0].(*AggregateQueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Aggregate indicates an expected call of Aggregate -func (mr *MockTChanNodeMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanNode)(nil).Aggregate), ctx, req) -} - -// AggregateRaw mocks base method -func (m *MockTChanNode) AggregateRaw(ctx thrift.Context, req *AggregateQueryRawRequest) (*AggregateQueryRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AggregateRaw", ctx, req) - ret0, _ := ret[0].(*AggregateQueryRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AggregateRaw indicates an expected call of AggregateRaw -func (mr *MockTChanNodeMockRecorder) AggregateRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateRaw", reflect.TypeOf((*MockTChanNode)(nil).AggregateRaw), ctx, req) -} - -// AggregateTiles mocks base method -func (m *MockTChanNode) AggregateTiles(ctx thrift.Context, req *AggregateTilesRequest) (*AggregateTilesResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AggregateTiles", ctx, req) - ret0, _ := ret[0].(*AggregateTilesResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AggregateTiles indicates an expected call of AggregateTiles -func (mr *MockTChanNodeMockRecorder) AggregateTiles(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateTiles", reflect.TypeOf((*MockTChanNode)(nil).AggregateTiles), ctx, req) -} - -// Bootstrapped mocks base method -func (m *MockTChanNode) Bootstrapped(ctx thrift.Context) (*NodeBootstrappedResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Bootstrapped", ctx) - ret0, _ := ret[0].(*NodeBootstrappedResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Bootstrapped indicates an expected call of Bootstrapped -func (mr *MockTChanNodeMockRecorder) Bootstrapped(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrapped", reflect.TypeOf((*MockTChanNode)(nil).Bootstrapped), ctx) -} - -// BootstrappedInPlacementOrNoPlacement mocks base method -func (m *MockTChanNode) BootstrappedInPlacementOrNoPlacement(ctx thrift.Context) (*NodeBootstrappedInPlacementOrNoPlacementResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BootstrappedInPlacementOrNoPlacement", ctx) - ret0, _ := ret[0].(*NodeBootstrappedInPlacementOrNoPlacementResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BootstrappedInPlacementOrNoPlacement indicates an expected call of BootstrappedInPlacementOrNoPlacement -func (mr *MockTChanNodeMockRecorder) BootstrappedInPlacementOrNoPlacement(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrappedInPlacementOrNoPlacement", reflect.TypeOf((*MockTChanNode)(nil).BootstrappedInPlacementOrNoPlacement), ctx) -} - -// DebugIndexMemorySegments mocks base method -func (m *MockTChanNode) DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DebugIndexMemorySegments", ctx, req) - ret0, _ := ret[0].(*DebugIndexMemorySegmentsResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DebugIndexMemorySegments indicates an expected call of DebugIndexMemorySegments -func (mr *MockTChanNodeMockRecorder) DebugIndexMemorySegments(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugIndexMemorySegments", reflect.TypeOf((*MockTChanNode)(nil).DebugIndexMemorySegments), ctx, req) -} - -// DebugProfileStart mocks base method -func (m *MockTChanNode) DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DebugProfileStart", ctx, req) - ret0, _ := ret[0].(*DebugProfileStartResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DebugProfileStart indicates an expected call of DebugProfileStart -func (mr *MockTChanNodeMockRecorder) DebugProfileStart(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStart", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStart), ctx, req) -} - -// DebugProfileStop mocks base method -func (m *MockTChanNode) DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DebugProfileStop", ctx, req) - ret0, _ := ret[0].(*DebugProfileStopResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DebugProfileStop indicates an expected call of DebugProfileStop -func (mr *MockTChanNodeMockRecorder) DebugProfileStop(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStop", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStop), ctx, req) -} - -// Fetch mocks base method -func (m *MockTChanNode) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Fetch", ctx, req) - ret0, _ := ret[0].(*FetchResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Fetch indicates an expected call of Fetch -func (mr *MockTChanNodeMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanNode)(nil).Fetch), ctx, req) -} - -// FetchBatchRaw mocks base method -func (m *MockTChanNode) FetchBatchRaw(ctx thrift.Context, req *FetchBatchRawRequest) (*FetchBatchRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBatchRaw", ctx, req) - ret0, _ := ret[0].(*FetchBatchRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBatchRaw indicates an expected call of FetchBatchRaw -func (mr *MockTChanNodeMockRecorder) FetchBatchRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRaw), ctx, req) -} - -// FetchBatchRawV2 mocks base method -func (m *MockTChanNode) FetchBatchRawV2(ctx thrift.Context, req *FetchBatchRawV2Request) (*FetchBatchRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBatchRawV2", ctx, req) - ret0, _ := ret[0].(*FetchBatchRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBatchRawV2 indicates an expected call of FetchBatchRawV2 -func (mr *MockTChanNodeMockRecorder) FetchBatchRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRawV2), ctx, req) -} - -// FetchBlocksMetadataRawV2 mocks base method -func (m *MockTChanNode) FetchBlocksMetadataRawV2(ctx thrift.Context, req *FetchBlocksMetadataRawV2Request) (*FetchBlocksMetadataRawV2Result_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBlocksMetadataRawV2", ctx, req) - ret0, _ := ret[0].(*FetchBlocksMetadataRawV2Result_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBlocksMetadataRawV2 indicates an expected call of FetchBlocksMetadataRawV2 -func (mr *MockTChanNodeMockRecorder) FetchBlocksMetadataRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksMetadataRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksMetadataRawV2), ctx, req) -} - -// FetchBlocksRaw mocks base method -func (m *MockTChanNode) FetchBlocksRaw(ctx thrift.Context, req *FetchBlocksRawRequest) (*FetchBlocksRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBlocksRaw", ctx, req) - ret0, _ := ret[0].(*FetchBlocksRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBlocksRaw indicates an expected call of FetchBlocksRaw -func (mr *MockTChanNodeMockRecorder) FetchBlocksRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksRaw), ctx, req) -} - -// FetchTagged mocks base method -func (m *MockTChanNode) FetchTagged(ctx thrift.Context, req *FetchTaggedRequest) (*FetchTaggedResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchTagged", ctx, req) - ret0, _ := ret[0].(*FetchTaggedResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchTagged indicates an expected call of FetchTagged -func (mr *MockTChanNodeMockRecorder) FetchTagged(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchTagged", reflect.TypeOf((*MockTChanNode)(nil).FetchTagged), ctx, req) -} - -// GetPersistRateLimit mocks base method -func (m *MockTChanNode) GetPersistRateLimit(ctx thrift.Context) (*NodePersistRateLimitResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPersistRateLimit", ctx) - ret0, _ := ret[0].(*NodePersistRateLimitResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPersistRateLimit indicates an expected call of GetPersistRateLimit -func (mr *MockTChanNodeMockRecorder) GetPersistRateLimit(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).GetPersistRateLimit), ctx) -} - -// GetWriteNewSeriesAsync mocks base method -func (m *MockTChanNode) GetWriteNewSeriesAsync(ctx thrift.Context) (*NodeWriteNewSeriesAsyncResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWriteNewSeriesAsync", ctx) - ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWriteNewSeriesAsync indicates an expected call of GetWriteNewSeriesAsync -func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesAsync(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesAsync), ctx) -} - -// GetWriteNewSeriesBackoffDuration mocks base method -func (m *MockTChanNode) GetWriteNewSeriesBackoffDuration(ctx thrift.Context) (*NodeWriteNewSeriesBackoffDurationResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWriteNewSeriesBackoffDuration", ctx) - ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWriteNewSeriesBackoffDuration indicates an expected call of GetWriteNewSeriesBackoffDuration -func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesBackoffDuration(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesBackoffDuration), ctx) -} - -// GetWriteNewSeriesLimitPerShardPerSecond mocks base method -func (m *MockTChanNode) GetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWriteNewSeriesLimitPerShardPerSecond", ctx) - ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of GetWriteNewSeriesLimitPerShardPerSecond -func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesLimitPerShardPerSecond(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesLimitPerShardPerSecond), ctx) -} - -// Health mocks base method -func (m *MockTChanNode) Health(ctx thrift.Context) (*NodeHealthResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Health", ctx) - ret0, _ := ret[0].(*NodeHealthResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Health indicates an expected call of Health -func (mr *MockTChanNodeMockRecorder) Health(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanNode)(nil).Health), ctx) -} - -// Query mocks base method -func (m *MockTChanNode) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Query", ctx, req) - ret0, _ := ret[0].(*QueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Query indicates an expected call of Query -func (mr *MockTChanNodeMockRecorder) Query(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanNode)(nil).Query), ctx, req) -} - -// Repair mocks base method -func (m *MockTChanNode) Repair(ctx thrift.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Repair", ctx) - ret0, _ := ret[0].(error) - return ret0 -} - -// Repair indicates an expected call of Repair -func (mr *MockTChanNodeMockRecorder) Repair(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockTChanNode)(nil).Repair), ctx) -} - -// SetPersistRateLimit mocks base method -func (m *MockTChanNode) SetPersistRateLimit(ctx thrift.Context, req *NodeSetPersistRateLimitRequest) (*NodePersistRateLimitResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetPersistRateLimit", ctx, req) - ret0, _ := ret[0].(*NodePersistRateLimitResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetPersistRateLimit indicates an expected call of SetPersistRateLimit -func (mr *MockTChanNodeMockRecorder) SetPersistRateLimit(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).SetPersistRateLimit), ctx, req) -} - -// SetWriteNewSeriesAsync mocks base method -func (m *MockTChanNode) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWriteNewSeriesAsync", ctx, req) - ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWriteNewSeriesAsync indicates an expected call of SetWriteNewSeriesAsync -func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesAsync(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesAsync), ctx, req) -} - -// SetWriteNewSeriesBackoffDuration mocks base method -func (m *MockTChanNode) SetWriteNewSeriesBackoffDuration(ctx thrift.Context, req *NodeSetWriteNewSeriesBackoffDurationRequest) (*NodeWriteNewSeriesBackoffDurationResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWriteNewSeriesBackoffDuration", ctx, req) - ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWriteNewSeriesBackoffDuration indicates an expected call of SetWriteNewSeriesBackoffDuration -func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesBackoffDuration(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesBackoffDuration), ctx, req) -} - -// SetWriteNewSeriesLimitPerShardPerSecond mocks base method -func (m *MockTChanNode) SetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context, req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWriteNewSeriesLimitPerShardPerSecond", ctx, req) - ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of SetWriteNewSeriesLimitPerShardPerSecond -func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesLimitPerShardPerSecond(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesLimitPerShardPerSecond), ctx, req) -} - -// Truncate mocks base method -func (m *MockTChanNode) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Truncate", ctx, req) - ret0, _ := ret[0].(*TruncateResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Truncate indicates an expected call of Truncate -func (mr *MockTChanNodeMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanNode)(nil).Truncate), ctx, req) -} - -// Write mocks base method -func (m *MockTChanNode) Write(ctx thrift.Context, req *WriteRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// Write indicates an expected call of Write -func (mr *MockTChanNodeMockRecorder) Write(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanNode)(nil).Write), ctx, req) -} - -// WriteBatchRaw mocks base method -func (m *MockTChanNode) WriteBatchRaw(ctx thrift.Context, req *WriteBatchRawRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteBatchRaw", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteBatchRaw indicates an expected call of WriteBatchRaw -func (mr *MockTChanNodeMockRecorder) WriteBatchRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRaw), ctx, req) -} - -// WriteBatchRawV2 mocks base method -func (m *MockTChanNode) WriteBatchRawV2(ctx thrift.Context, req *WriteBatchRawV2Request) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteBatchRawV2", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteBatchRawV2 indicates an expected call of WriteBatchRawV2 -func (mr *MockTChanNodeMockRecorder) WriteBatchRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRawV2), ctx, req) -} - -// WriteTagged mocks base method -func (m *MockTChanNode) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTagged", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTagged indicates an expected call of WriteTagged -func (mr *MockTChanNodeMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanNode)(nil).WriteTagged), ctx, req) -} - -// WriteTaggedBatchRaw mocks base method -func (m *MockTChanNode) WriteTaggedBatchRaw(ctx thrift.Context, req *WriteTaggedBatchRawRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTaggedBatchRaw", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTaggedBatchRaw indicates an expected call of WriteTaggedBatchRaw -func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRaw), ctx, req) -} - -// WriteTaggedBatchRawV2 mocks base method -func (m *MockTChanNode) WriteTaggedBatchRawV2(ctx thrift.Context, req *WriteTaggedBatchRawV2Request) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTaggedBatchRawV2", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTaggedBatchRawV2 indicates an expected call of WriteTaggedBatchRawV2 -func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRawV2), ctx, req) -} diff --git a/src/dbnode/generated/thrift/rpc/tchan-rpc.go b/src/dbnode/generated/thrift/rpc/tchan-rpc.go index cf6c94667a..8c4b5b8473 100644 --- a/src/dbnode/generated/thrift/rpc/tchan-rpc.go +++ b/src/dbnode/generated/thrift/rpc/tchan-rpc.go @@ -1,6 +1,6 @@ // @generated Code generated by thrift-gen. Do not modify. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -67,6 +67,7 @@ type TChanNode interface { Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) Repair(ctx thrift.Context) error SetPersistRateLimit(ctx thrift.Context, req *NodeSetPersistRateLimitRequest) (*NodePersistRateLimitResult_, error) + SetQueryLimitOverrides(ctx thrift.Context, req *NodeSetQueryLimitOverridesRequest) (*NodeQueryLimitOverridesResult_, error) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) SetWriteNewSeriesBackoffDuration(ctx thrift.Context, req *NodeSetWriteNewSeriesBackoffDurationRequest) (*NodeWriteNewSeriesBackoffDurationResult_, error) SetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context, req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) @@ -863,6 +864,24 @@ func (c *tchanNodeClient) SetPersistRateLimit(ctx thrift.Context, req *NodeSetPe return resp.GetSuccess(), err } +func (c *tchanNodeClient) SetQueryLimitOverrides(ctx thrift.Context, req *NodeSetQueryLimitOverridesRequest) (*NodeQueryLimitOverridesResult_, error) { + var resp NodeSetQueryLimitOverridesResult + args := NodeSetQueryLimitOverridesArgs{ + Req: req, + } + success, err := c.client.Call(ctx, c.thriftService, "setQueryLimitOverrides", &args, &resp) + if err == nil && !success { + switch { + case resp.Err != nil: + err = resp.Err + default: + err = fmt.Errorf("received no result or unknown exception for setQueryLimitOverrides") + } + } + + return resp.GetSuccess(), err +} + func (c *tchanNodeClient) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) { var resp NodeSetWriteNewSeriesAsyncResult args := NodeSetWriteNewSeriesAsyncArgs{ @@ -1083,6 +1102,7 @@ func (s *tchanNodeServer) Methods() []string { "query", "repair", "setPersistRateLimit", + "setQueryLimitOverrides", "setWriteNewSeriesAsync", "setWriteNewSeriesBackoffDuration", "setWriteNewSeriesLimitPerShardPerSecond", @@ -1142,6 +1162,8 @@ func (s *tchanNodeServer) Handle(ctx thrift.Context, methodName string, protocol return s.handleRepair(ctx, protocol) case "setPersistRateLimit": return s.handleSetPersistRateLimit(ctx, protocol) + case "setQueryLimitOverrides": + return s.handleSetQueryLimitOverrides(ctx, protocol) case "setWriteNewSeriesAsync": return s.handleSetWriteNewSeriesAsync(ctx, protocol) case "setWriteNewSeriesBackoffDuration": @@ -1783,6 +1805,34 @@ func (s *tchanNodeServer) handleSetPersistRateLimit(ctx thrift.Context, protocol return err == nil, &res, nil } +func (s *tchanNodeServer) handleSetQueryLimitOverrides(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) { + var req NodeSetQueryLimitOverridesArgs + var res NodeSetQueryLimitOverridesResult + + if err := req.Read(protocol); err != nil { + return false, nil, err + } + + r, err := + s.handler.SetQueryLimitOverrides(ctx, req.Req) + + if err != nil { + switch v := err.(type) { + case *Error: + if v == nil { + return false, nil, fmt.Errorf("Handler for err returned non-nil error type *Error but nil value") + } + res.Err = v + default: + return false, nil, err + } + } else { + res.Success = r + } + + return err == nil, &res, nil +} + func (s *tchanNodeServer) handleSetWriteNewSeriesAsync(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) { var req NodeSetWriteNewSeriesAsyncArgs var res NodeSetWriteNewSeriesAsyncResult diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index 8a7d7a0f6a..6e78da6140 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2082,6 +2082,25 @@ func (s *service) SetWriteNewSeriesLimitPerShardPerSecond( return s.GetWriteNewSeriesLimitPerShardPerSecond(ctx) } +func (s *service) SetQueryLimitOverrides( + ctx thrift.Context, + req *rpc.NodeSetQueryLimitOverridesRequest, +) ( + *rpc.NodeQueryLimitOverridesResult_, + error, +) { + db, err := s.startRPCWithDB() + if err != nil { + return nil, err + } + + queryLimits := db.Options().IndexOptions().QueryLimits() + queryLimits.BytesReadLimit().Override(req.BytesReadLimitOverride) + queryLimits.DocsLimit().Override(req.DocsLimitOverride) + + return &rpc.NodeQueryLimitOverridesResult_{}, nil +} + func (s *service) DebugProfileStart( ctx thrift.Context, req *rpc.DebugProfileStartRequest, diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 44643d40ec..a3431c779d 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -22,6 +22,7 @@ package limits import ( "fmt" + "sync" "time" xerrors "github.com/m3db/m3/src/x/errors" @@ -41,11 +42,13 @@ type queryLimits struct { } type lookbackLimit struct { - name string - options LookbackLimitOptions - metrics lookbackLimitMetrics - recent *atomic.Int64 - stopCh chan struct{} + name string + options LookbackLimitOptions + metrics lookbackLimitMetrics + recent *atomic.Int64 + stopCh chan struct{} + overrideLock sync.RWMutex + overrideLimit *int64 } type lookbackLimitMetrics struct { @@ -72,12 +75,7 @@ func DefaultLookbackLimitOptions() LookbackLimitOptions { } // NewQueryLimits returns a new query limits manager. -func NewQueryLimits( - options Options, - // docsLimitOpts LookbackLimitOptions, - // bytesReadLimitOpts LookbackLimitOptions, - // instrumentOpts instrument.Options, -) (QueryLimits, error) { +func NewQueryLimits(options Options) (QueryLimits, error) { if err := options.Validate(); err != nil { return nil, err } @@ -162,6 +160,19 @@ func (q *queryLimits) AnyExceeded() error { return q.bytesReadLimit.exceeded() } +// Inc increments the current value and returns an error if above the limit. +func (q *lookbackLimit) Override(limit *int) { + q.overrideLock.Lock() + defer q.overrideLock.Unlock() + + if limit == nil { + q.overrideLimit = nil + } else { + v := int64(*limit) + q.overrideLimit = &v + } +} + // Inc increments the current value and returns an error if above the limit. func (q *lookbackLimit) Inc(val int, source []byte) error { if val < 0 { @@ -196,6 +207,18 @@ func (q *lookbackLimit) checkLimit(recent int64) error { "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", q.name, q.options.Limit, recent, q.options.Lookback))) } + + q.overrideLock.RLock() + overrideLimit := q.overrideLimit + q.overrideLock.RUnlock() + + if overrideLimit != nil && recent > *overrideLimit { + q.metrics.exceeded.Inc(1) + return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( + "query aborted due to limit override: name=%s, limit=%d, current=%d, within=%s", + q.name, *overrideLimit, recent, q.options.Lookback))) + } + return nil } diff --git a/src/dbnode/storage/limits/types.go b/src/dbnode/storage/limits/types.go index d38db00452..922dfc9f82 100644 --- a/src/dbnode/storage/limits/types.go +++ b/src/dbnode/storage/limits/types.go @@ -52,6 +52,8 @@ type QueryLimits interface { type LookbackLimit interface { // Inc increments the recent value for the limit. Inc(new int, source []byte) error + // Override overrides the lookback limit value. + Override(limit *int) } // LookbackLimitOptions holds options for a lookback limit to be enforced. From d5ef8b45202d39834676f89e8bfa58e70616fff7 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 15:37:55 -0500 Subject: [PATCH 02/80] Initial query limit overriding 2 --- src/dbnode/storage/limits/query_limits.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index a3431c779d..2a4db3cde5 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -201,7 +201,7 @@ func (q *lookbackLimit) exceeded() error { } func (q *lookbackLimit) checkLimit(recent int64) error { - if q.options.Limit > 0 && recent > q.options.Limit { + if q.options.Limit > 0 && recent >= q.options.Limit { q.metrics.exceeded.Inc(1) return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", @@ -212,7 +212,7 @@ func (q *lookbackLimit) checkLimit(recent int64) error { overrideLimit := q.overrideLimit q.overrideLock.RUnlock() - if overrideLimit != nil && recent > *overrideLimit { + if overrideLimit != nil && recent >= *overrideLimit { q.metrics.exceeded.Inc(1) return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( "query aborted due to limit override: name=%s, limit=%d, current=%d, within=%s", From 5ecdb410afa843606f22ebe67a582296a0a63d93 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 15:46:42 -0500 Subject: [PATCH 03/80] Initial query limit overriding 3 --- src/dbnode/network/server/tchannelthrift/node/service.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index 6e78da6140..4802b895cf 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2098,7 +2098,10 @@ func (s *service) SetQueryLimitOverrides( queryLimits.BytesReadLimit().Override(req.BytesReadLimitOverride) queryLimits.DocsLimit().Override(req.DocsLimitOverride) - return &rpc.NodeQueryLimitOverridesResult_{}, nil + return &rpc.NodeQueryLimitOverridesResult_{ + BytesReadLimitOverride: req.BytesReadLimitOverride, + DocsLimitOverride: req.DocsLimitOverride, + }, nil } func (s *service) DebugProfileStart( From ab8384b716b88fbea184fa9ff5033274c5c3ef14 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 15:49:57 -0500 Subject: [PATCH 04/80] Initial query limit overriding 4 --- src/dbnode/storage/limits/query_limits.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 2a4db3cde5..e959d73f2e 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -201,24 +201,26 @@ func (q *lookbackLimit) exceeded() error { } func (q *lookbackLimit) checkLimit(recent int64) error { - if q.options.Limit > 0 && recent >= q.options.Limit { - q.metrics.exceeded.Inc(1) - return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( - "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", - q.name, q.options.Limit, recent, q.options.Lookback))) - } - q.overrideLock.RLock() overrideLimit := q.overrideLimit q.overrideLock.RUnlock() - if overrideLimit != nil && recent >= *overrideLimit { + if overrideLimit == nil { + if q.options.Limit > 0 && recent >= q.options.Limit { + q.metrics.exceeded.Inc(1) + return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( + "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", + q.name, q.options.Limit, recent, q.options.Lookback))) + } + return nil + } + + if recent >= *overrideLimit { q.metrics.exceeded.Inc(1) return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( "query aborted due to limit override: name=%s, limit=%d, current=%d, within=%s", q.name, *overrideLimit, recent, q.options.Lookback))) } - return nil } From 06ad76666d0ffac1af4311f9a73d5109b5ba531b Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 17:10:48 -0500 Subject: [PATCH 05/80] PR feedback --- .../server/tchannelthrift/node/service.go | 13 ++++- src/dbnode/storage/limits/query_limits.go | 58 +++++++++++-------- src/dbnode/storage/limits/types.go | 2 +- 3 files changed, 45 insertions(+), 28 deletions(-) diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index 4802b895cf..5f928d7360 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2095,8 +2095,17 @@ func (s *service) SetQueryLimitOverrides( } queryLimits := db.Options().IndexOptions().QueryLimits() - queryLimits.BytesReadLimit().Override(req.BytesReadLimitOverride) - queryLimits.DocsLimit().Override(req.DocsLimitOverride) + if err := queryLimits.BytesReadLimit().Override(req.BytesReadLimitOverride); err != nil { + return nil, err + } + if err := queryLimits.DocsLimit().Override(req.DocsLimitOverride); err != nil { + return nil, err + } + + s.logger.Info("query limit overrides set", + zap.Int64p("bytes-read", req.BytesReadLimitOverride), + zap.Int64p("docs", req.DocsLimitOverride), + ) return &rpc.NodeQueryLimitOverridesResult_{ BytesReadLimitOverride: req.BytesReadLimitOverride, diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index e959d73f2e..e6d16ee030 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -42,13 +42,13 @@ type queryLimits struct { } type lookbackLimit struct { - name string - options LookbackLimitOptions - metrics lookbackLimitMetrics - recent *atomic.Int64 - stopCh chan struct{} - overrideLock sync.RWMutex - overrideLimit *int64 + name string + limit *int64 + options LookbackLimitOptions + metrics lookbackLimitMetrics + recent *atomic.Int64 + stopCh chan struct{} + overrideLock sync.RWMutex } type lookbackLimitMetrics struct { @@ -106,6 +106,7 @@ func newLookbackLimit( ) *lookbackLimit { return &lookbackLimit{ name: name, + limit: getLimit(opts, nil), options: opts, metrics: newLookbackLimitMetrics(instrumentOpts, name, sourceLoggerBuilder), recent: atomic.NewInt64(0), @@ -113,6 +114,19 @@ func newLookbackLimit( } } +func getLimit(opts LookbackLimitOptions, override *int64) *int64 { + if override != nil { + return override + } + + // Zero limit means no limit enforced. + var limit *int64 + if opts.Limit != 0 { + limit = &opts.Limit + } + return limit +} + func newLookbackLimitMetrics( instrumentOpts instrument.Options, name string, @@ -161,16 +175,16 @@ func (q *queryLimits) AnyExceeded() error { } // Inc increments the current value and returns an error if above the limit. -func (q *lookbackLimit) Override(limit *int) { +func (q *lookbackLimit) Override(limit *int64) error { + if limit != nil && *limit < 0 { + return fmt.Errorf("invalid negative query limit inc %d", *limit) + } + q.overrideLock.Lock() defer q.overrideLock.Unlock() - if limit == nil { - q.overrideLimit = nil - } else { - v := int64(*limit) - q.overrideLimit = &v - } + q.limit = getLimit(q.options, limit) + return nil } // Inc increments the current value and returns an error if above the limit. @@ -202,24 +216,18 @@ func (q *lookbackLimit) exceeded() error { func (q *lookbackLimit) checkLimit(recent int64) error { q.overrideLock.RLock() - overrideLimit := q.overrideLimit + limit := q.limit q.overrideLock.RUnlock() - if overrideLimit == nil { - if q.options.Limit > 0 && recent >= q.options.Limit { - q.metrics.exceeded.Inc(1) - return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( - "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", - q.name, q.options.Limit, recent, q.options.Lookback))) - } + if limit == nil { return nil } - if recent >= *overrideLimit { + if recent >= *limit { q.metrics.exceeded.Inc(1) return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( - "query aborted due to limit override: name=%s, limit=%d, current=%d, within=%s", - q.name, *overrideLimit, recent, q.options.Lookback))) + "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", + q.name, q.options.Limit, recent, q.options.Lookback))) } return nil } diff --git a/src/dbnode/storage/limits/types.go b/src/dbnode/storage/limits/types.go index 922dfc9f82..0f1d829eea 100644 --- a/src/dbnode/storage/limits/types.go +++ b/src/dbnode/storage/limits/types.go @@ -53,7 +53,7 @@ type LookbackLimit interface { // Inc increments the recent value for the limit. Inc(new int, source []byte) error // Override overrides the lookback limit value. - Override(limit *int) + Override(limit *int64) error } // LookbackLimitOptions holds options for a lookback limit to be enforced. From 6390dcfa9f3a6275f5b8c88f55694bdcac782f2f Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 17:12:37 -0500 Subject: [PATCH 06/80] Build fix --- src/dbnode/storage/limits/noop_query_limits.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/dbnode/storage/limits/noop_query_limits.go b/src/dbnode/storage/limits/noop_query_limits.go index 672ea8d52d..b25caf5c24 100644 --- a/src/dbnode/storage/limits/noop_query_limits.go +++ b/src/dbnode/storage/limits/noop_query_limits.go @@ -54,6 +54,10 @@ func (q *noOpQueryLimits) Stop() { func (q *noOpQueryLimits) Start() { } +func (q *noOpLookbackLimit) Override(*int64) error { + return nil +} + func (q *noOpLookbackLimit) Inc(int, []byte) error { return nil } From c9b0ad08bb2e377fbc1e4940a11532f788dbbf33 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 17:14:18 -0500 Subject: [PATCH 07/80] Fix error wording --- src/dbnode/storage/limits/query_limits.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index e6d16ee030..2871dda637 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -177,7 +177,7 @@ func (q *queryLimits) AnyExceeded() error { // Inc increments the current value and returns an error if above the limit. func (q *lookbackLimit) Override(limit *int64) error { if limit != nil && *limit < 0 { - return fmt.Errorf("invalid negative query limit inc %d", *limit) + return fmt.Errorf("invalid negative query limit override %d", *limit) } q.overrideLock.Lock() From 0bba45e09a1420f2511257460708ddcfd4926432 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 17:53:39 -0500 Subject: [PATCH 08/80] Build fix 2 --- src/dbnode/generated/thrift/rpc/rpc_mock.go | 669 ++++++++++++++++++++ src/dbnode/storage/limits/query_limits.go | 2 +- 2 files changed, 670 insertions(+), 1 deletion(-) create mode 100644 src/dbnode/generated/thrift/rpc/rpc_mock.go diff --git a/src/dbnode/generated/thrift/rpc/rpc_mock.go b/src/dbnode/generated/thrift/rpc/rpc_mock.go new file mode 100644 index 0000000000..3074161f26 --- /dev/null +++ b/src/dbnode/generated/thrift/rpc/rpc_mock.go @@ -0,0 +1,669 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/dbnode/generated/thrift/rpc/tchan-go + +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package rpc is a generated GoMock package. +package rpc + +import ( + "reflect" + + "github.com/golang/mock/gomock" + "github.com/uber/tchannel-go/thrift" +) + +// MockTChanCluster is a mock of TChanCluster interface +type MockTChanCluster struct { + ctrl *gomock.Controller + recorder *MockTChanClusterMockRecorder +} + +// MockTChanClusterMockRecorder is the mock recorder for MockTChanCluster +type MockTChanClusterMockRecorder struct { + mock *MockTChanCluster +} + +// NewMockTChanCluster creates a new mock instance +func NewMockTChanCluster(ctrl *gomock.Controller) *MockTChanCluster { + mock := &MockTChanCluster{ctrl: ctrl} + mock.recorder = &MockTChanClusterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTChanCluster) EXPECT() *MockTChanClusterMockRecorder { + return m.recorder +} + +// Aggregate mocks base method +func (m *MockTChanCluster) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Aggregate", ctx, req) + ret0, _ := ret[0].(*AggregateQueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Aggregate indicates an expected call of Aggregate +func (mr *MockTChanClusterMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanCluster)(nil).Aggregate), ctx, req) +} + +// Fetch mocks base method +func (m *MockTChanCluster) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fetch", ctx, req) + ret0, _ := ret[0].(*FetchResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fetch indicates an expected call of Fetch +func (mr *MockTChanClusterMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanCluster)(nil).Fetch), ctx, req) +} + +// Health mocks base method +func (m *MockTChanCluster) Health(ctx thrift.Context) (*HealthResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Health", ctx) + ret0, _ := ret[0].(*HealthResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Health indicates an expected call of Health +func (mr *MockTChanClusterMockRecorder) Health(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanCluster)(nil).Health), ctx) +} + +// Query mocks base method +func (m *MockTChanCluster) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Query", ctx, req) + ret0, _ := ret[0].(*QueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Query indicates an expected call of Query +func (mr *MockTChanClusterMockRecorder) Query(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanCluster)(nil).Query), ctx, req) +} + +// Truncate mocks base method +func (m *MockTChanCluster) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Truncate", ctx, req) + ret0, _ := ret[0].(*TruncateResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Truncate indicates an expected call of Truncate +func (mr *MockTChanClusterMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanCluster)(nil).Truncate), ctx, req) +} + +// Write mocks base method +func (m *MockTChanCluster) Write(ctx thrift.Context, req *WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write +func (mr *MockTChanClusterMockRecorder) Write(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanCluster)(nil).Write), ctx, req) +} + +// WriteTagged mocks base method +func (m *MockTChanCluster) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTagged", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTagged indicates an expected call of WriteTagged +func (mr *MockTChanClusterMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanCluster)(nil).WriteTagged), ctx, req) +} + +// MockTChanNode is a mock of TChanNode interface +type MockTChanNode struct { + ctrl *gomock.Controller + recorder *MockTChanNodeMockRecorder +} + +// MockTChanNodeMockRecorder is the mock recorder for MockTChanNode +type MockTChanNodeMockRecorder struct { + mock *MockTChanNode +} + +// NewMockTChanNode creates a new mock instance +func NewMockTChanNode(ctrl *gomock.Controller) *MockTChanNode { + mock := &MockTChanNode{ctrl: ctrl} + mock.recorder = &MockTChanNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTChanNode) EXPECT() *MockTChanNodeMockRecorder { + return m.recorder +} + +// Aggregate mocks base method +func (m *MockTChanNode) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Aggregate", ctx, req) + ret0, _ := ret[0].(*AggregateQueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Aggregate indicates an expected call of Aggregate +func (mr *MockTChanNodeMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanNode)(nil).Aggregate), ctx, req) +} + +// AggregateRaw mocks base method +func (m *MockTChanNode) AggregateRaw(ctx thrift.Context, req *AggregateQueryRawRequest) (*AggregateQueryRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateRaw", ctx, req) + ret0, _ := ret[0].(*AggregateQueryRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AggregateRaw indicates an expected call of AggregateRaw +func (mr *MockTChanNodeMockRecorder) AggregateRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateRaw", reflect.TypeOf((*MockTChanNode)(nil).AggregateRaw), ctx, req) +} + +// AggregateTiles mocks base method +func (m *MockTChanNode) AggregateTiles(ctx thrift.Context, req *AggregateTilesRequest) (*AggregateTilesResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateTiles", ctx, req) + ret0, _ := ret[0].(*AggregateTilesResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AggregateTiles indicates an expected call of AggregateTiles +func (mr *MockTChanNodeMockRecorder) AggregateTiles(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateTiles", reflect.TypeOf((*MockTChanNode)(nil).AggregateTiles), ctx, req) +} + +// Bootstrapped mocks base method +func (m *MockTChanNode) Bootstrapped(ctx thrift.Context) (*NodeBootstrappedResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bootstrapped", ctx) + ret0, _ := ret[0].(*NodeBootstrappedResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Bootstrapped indicates an expected call of Bootstrapped +func (mr *MockTChanNodeMockRecorder) Bootstrapped(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrapped", reflect.TypeOf((*MockTChanNode)(nil).Bootstrapped), ctx) +} + +// BootstrappedInPlacementOrNoPlacement mocks base method +func (m *MockTChanNode) BootstrappedInPlacementOrNoPlacement(ctx thrift.Context) (*NodeBootstrappedInPlacementOrNoPlacementResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BootstrappedInPlacementOrNoPlacement", ctx) + ret0, _ := ret[0].(*NodeBootstrappedInPlacementOrNoPlacementResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BootstrappedInPlacementOrNoPlacement indicates an expected call of BootstrappedInPlacementOrNoPlacement +func (mr *MockTChanNodeMockRecorder) BootstrappedInPlacementOrNoPlacement(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrappedInPlacementOrNoPlacement", reflect.TypeOf((*MockTChanNode)(nil).BootstrappedInPlacementOrNoPlacement), ctx) +} + +// DebugIndexMemorySegments mocks base method +func (m *MockTChanNode) DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DebugIndexMemorySegments", ctx, req) + ret0, _ := ret[0].(*DebugIndexMemorySegmentsResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DebugIndexMemorySegments indicates an expected call of DebugIndexMemorySegments +func (mr *MockTChanNodeMockRecorder) DebugIndexMemorySegments(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugIndexMemorySegments", reflect.TypeOf((*MockTChanNode)(nil).DebugIndexMemorySegments), ctx, req) +} + +// DebugProfileStart mocks base method +func (m *MockTChanNode) DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DebugProfileStart", ctx, req) + ret0, _ := ret[0].(*DebugProfileStartResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DebugProfileStart indicates an expected call of DebugProfileStart +func (mr *MockTChanNodeMockRecorder) DebugProfileStart(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStart", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStart), ctx, req) +} + +// DebugProfileStop mocks base method +func (m *MockTChanNode) DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DebugProfileStop", ctx, req) + ret0, _ := ret[0].(*DebugProfileStopResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DebugProfileStop indicates an expected call of DebugProfileStop +func (mr *MockTChanNodeMockRecorder) DebugProfileStop(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStop", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStop), ctx, req) +} + +// Fetch mocks base method +func (m *MockTChanNode) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fetch", ctx, req) + ret0, _ := ret[0].(*FetchResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fetch indicates an expected call of Fetch +func (mr *MockTChanNodeMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanNode)(nil).Fetch), ctx, req) +} + +// FetchBatchRaw mocks base method +func (m *MockTChanNode) FetchBatchRaw(ctx thrift.Context, req *FetchBatchRawRequest) (*FetchBatchRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBatchRaw", ctx, req) + ret0, _ := ret[0].(*FetchBatchRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBatchRaw indicates an expected call of FetchBatchRaw +func (mr *MockTChanNodeMockRecorder) FetchBatchRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRaw), ctx, req) +} + +// FetchBatchRawV2 mocks base method +func (m *MockTChanNode) FetchBatchRawV2(ctx thrift.Context, req *FetchBatchRawV2Request) (*FetchBatchRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBatchRawV2", ctx, req) + ret0, _ := ret[0].(*FetchBatchRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBatchRawV2 indicates an expected call of FetchBatchRawV2 +func (mr *MockTChanNodeMockRecorder) FetchBatchRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRawV2), ctx, req) +} + +// FetchBlocksMetadataRawV2 mocks base method +func (m *MockTChanNode) FetchBlocksMetadataRawV2(ctx thrift.Context, req *FetchBlocksMetadataRawV2Request) (*FetchBlocksMetadataRawV2Result_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBlocksMetadataRawV2", ctx, req) + ret0, _ := ret[0].(*FetchBlocksMetadataRawV2Result_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBlocksMetadataRawV2 indicates an expected call of FetchBlocksMetadataRawV2 +func (mr *MockTChanNodeMockRecorder) FetchBlocksMetadataRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksMetadataRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksMetadataRawV2), ctx, req) +} + +// FetchBlocksRaw mocks base method +func (m *MockTChanNode) FetchBlocksRaw(ctx thrift.Context, req *FetchBlocksRawRequest) (*FetchBlocksRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBlocksRaw", ctx, req) + ret0, _ := ret[0].(*FetchBlocksRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBlocksRaw indicates an expected call of FetchBlocksRaw +func (mr *MockTChanNodeMockRecorder) FetchBlocksRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksRaw), ctx, req) +} + +// FetchTagged mocks base method +func (m *MockTChanNode) FetchTagged(ctx thrift.Context, req *FetchTaggedRequest) (*FetchTaggedResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchTagged", ctx, req) + ret0, _ := ret[0].(*FetchTaggedResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchTagged indicates an expected call of FetchTagged +func (mr *MockTChanNodeMockRecorder) FetchTagged(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchTagged", reflect.TypeOf((*MockTChanNode)(nil).FetchTagged), ctx, req) +} + +// GetPersistRateLimit mocks base method +func (m *MockTChanNode) GetPersistRateLimit(ctx thrift.Context) (*NodePersistRateLimitResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistRateLimit", ctx) + ret0, _ := ret[0].(*NodePersistRateLimitResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistRateLimit indicates an expected call of GetPersistRateLimit +func (mr *MockTChanNodeMockRecorder) GetPersistRateLimit(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).GetPersistRateLimit), ctx) +} + +// GetWriteNewSeriesAsync mocks base method +func (m *MockTChanNode) GetWriteNewSeriesAsync(ctx thrift.Context) (*NodeWriteNewSeriesAsyncResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWriteNewSeriesAsync", ctx) + ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWriteNewSeriesAsync indicates an expected call of GetWriteNewSeriesAsync +func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesAsync(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesAsync), ctx) +} + +// GetWriteNewSeriesBackoffDuration mocks base method +func (m *MockTChanNode) GetWriteNewSeriesBackoffDuration(ctx thrift.Context) (*NodeWriteNewSeriesBackoffDurationResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWriteNewSeriesBackoffDuration", ctx) + ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWriteNewSeriesBackoffDuration indicates an expected call of GetWriteNewSeriesBackoffDuration +func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesBackoffDuration(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesBackoffDuration), ctx) +} + +// GetWriteNewSeriesLimitPerShardPerSecond mocks base method +func (m *MockTChanNode) GetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWriteNewSeriesLimitPerShardPerSecond", ctx) + ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of GetWriteNewSeriesLimitPerShardPerSecond +func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesLimitPerShardPerSecond(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesLimitPerShardPerSecond), ctx) +} + +// Health mocks base method +func (m *MockTChanNode) Health(ctx thrift.Context) (*NodeHealthResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Health", ctx) + ret0, _ := ret[0].(*NodeHealthResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Health indicates an expected call of Health +func (mr *MockTChanNodeMockRecorder) Health(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanNode)(nil).Health), ctx) +} + +// Query mocks base method +func (m *MockTChanNode) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Query", ctx, req) + ret0, _ := ret[0].(*QueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Query indicates an expected call of Query +func (mr *MockTChanNodeMockRecorder) Query(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanNode)(nil).Query), ctx, req) +} + +// Repair mocks base method +func (m *MockTChanNode) Repair(ctx thrift.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Repair", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Repair indicates an expected call of Repair +func (mr *MockTChanNodeMockRecorder) Repair(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockTChanNode)(nil).Repair), ctx) +} + +// SetPersistRateLimit mocks base method +func (m *MockTChanNode) SetPersistRateLimit(ctx thrift.Context, req *NodeSetPersistRateLimitRequest) (*NodePersistRateLimitResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPersistRateLimit", ctx, req) + ret0, _ := ret[0].(*NodePersistRateLimitResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetPersistRateLimit indicates an expected call of SetPersistRateLimit +func (mr *MockTChanNodeMockRecorder) SetPersistRateLimit(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).SetPersistRateLimit), ctx, req) +} + +// SetQueryLimitOverrides mocks base method +func (m *MockTChanNode) SetQueryLimitOverrides(ctx thrift.Context, req *NodeSetQueryLimitOverridesRequest) (*NodeQueryLimitOverridesResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetQueryLimitOverrides", ctx, req) + ret0, _ := ret[0].(*NodeQueryLimitOverridesResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetQueryLimitOverrides indicates an expected call of SetQueryLimitOverrides +func (mr *MockTChanNodeMockRecorder) SetQueryLimitOverrides(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetQueryLimitOverrides", reflect.TypeOf((*MockTChanNode)(nil).SetQueryLimitOverrides), ctx, req) +} + +// SetWriteNewSeriesAsync mocks base method +func (m *MockTChanNode) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWriteNewSeriesAsync", ctx, req) + ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWriteNewSeriesAsync indicates an expected call of SetWriteNewSeriesAsync +func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesAsync(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesAsync), ctx, req) +} + +// SetWriteNewSeriesBackoffDuration mocks base method +func (m *MockTChanNode) SetWriteNewSeriesBackoffDuration(ctx thrift.Context, req *NodeSetWriteNewSeriesBackoffDurationRequest) (*NodeWriteNewSeriesBackoffDurationResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWriteNewSeriesBackoffDuration", ctx, req) + ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWriteNewSeriesBackoffDuration indicates an expected call of SetWriteNewSeriesBackoffDuration +func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesBackoffDuration(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesBackoffDuration), ctx, req) +} + +// SetWriteNewSeriesLimitPerShardPerSecond mocks base method +func (m *MockTChanNode) SetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context, req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWriteNewSeriesLimitPerShardPerSecond", ctx, req) + ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of SetWriteNewSeriesLimitPerShardPerSecond +func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesLimitPerShardPerSecond(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesLimitPerShardPerSecond), ctx, req) +} + +// Truncate mocks base method +func (m *MockTChanNode) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Truncate", ctx, req) + ret0, _ := ret[0].(*TruncateResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Truncate indicates an expected call of Truncate +func (mr *MockTChanNodeMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanNode)(nil).Truncate), ctx, req) +} + +// Write mocks base method +func (m *MockTChanNode) Write(ctx thrift.Context, req *WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write +func (mr *MockTChanNodeMockRecorder) Write(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanNode)(nil).Write), ctx, req) +} + +// WriteBatchRaw mocks base method +func (m *MockTChanNode) WriteBatchRaw(ctx thrift.Context, req *WriteBatchRawRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatchRaw", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatchRaw indicates an expected call of WriteBatchRaw +func (mr *MockTChanNodeMockRecorder) WriteBatchRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRaw), ctx, req) +} + +// WriteBatchRawV2 mocks base method +func (m *MockTChanNode) WriteBatchRawV2(ctx thrift.Context, req *WriteBatchRawV2Request) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatchRawV2", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatchRawV2 indicates an expected call of WriteBatchRawV2 +func (mr *MockTChanNodeMockRecorder) WriteBatchRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRawV2), ctx, req) +} + +// WriteTagged mocks base method +func (m *MockTChanNode) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTagged", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTagged indicates an expected call of WriteTagged +func (mr *MockTChanNodeMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanNode)(nil).WriteTagged), ctx, req) +} + +// WriteTaggedBatchRaw mocks base method +func (m *MockTChanNode) WriteTaggedBatchRaw(ctx thrift.Context, req *WriteTaggedBatchRawRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTaggedBatchRaw", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTaggedBatchRaw indicates an expected call of WriteTaggedBatchRaw +func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRaw), ctx, req) +} + +// WriteTaggedBatchRawV2 mocks base method +func (m *MockTChanNode) WriteTaggedBatchRawV2(ctx thrift.Context, req *WriteTaggedBatchRawV2Request) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTaggedBatchRawV2", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTaggedBatchRawV2 indicates an expected call of WriteTaggedBatchRawV2 +func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRawV2), ctx, req) +} diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 2871dda637..1b770681e5 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -174,7 +174,7 @@ func (q *queryLimits) AnyExceeded() error { return q.bytesReadLimit.exceeded() } -// Inc increments the current value and returns an error if above the limit. +// Override overrides the limit set on construction. func (q *lookbackLimit) Override(limit *int64) error { if limit != nil && *limit < 0 { return fmt.Errorf("invalid negative query limit override %d", *limit) From b90cc5c9c9b116696693ae69056812bcd6f3d407 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 18:09:59 -0500 Subject: [PATCH 09/80] Fix limit test --- src/dbnode/storage/limits/query_limits_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 7739812c62..f79020e1e1 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -156,9 +156,10 @@ func TestLookbackLimit(t *testing.T) { } func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int64) int64 { + fmt.Println("A", limit, inc, expectedLimit) var exceededCount int64 err := limit.Inc(inc, nil) - if limit.current() <= expectedLimit || expectedLimit == 0 { + if limit.current() < expectedLimit || expectedLimit == 0 { require.NoError(t, err) } else { require.Error(t, err) @@ -167,7 +168,7 @@ func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int6 exceededCount++ } err = limit.exceeded() - if limit.current() <= expectedLimit || expectedLimit == 0 { + if limit.current() < expectedLimit || expectedLimit == 0 { require.NoError(t, err) } else { require.Error(t, err) From 7916470be2be55c7a1f4a3b353bf010f7ae64e5e Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 18:10:36 -0500 Subject: [PATCH 10/80] Fix limit test 2 --- src/dbnode/storage/limits/query_limits_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index f79020e1e1..3ba8795995 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -156,7 +156,6 @@ func TestLookbackLimit(t *testing.T) { } func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int64) int64 { - fmt.Println("A", limit, inc, expectedLimit) var exceededCount int64 err := limit.Inc(inc, nil) if limit.current() < expectedLimit || expectedLimit == 0 { From 5c6ae76cbcd6a68ac5f273005ee8e24de6c1dc90 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 14 Jan 2021 18:27:09 -0500 Subject: [PATCH 11/80] Add Override test --- .../storage/limits/query_limits_test.go | 38 ++++++++++++++----- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 3ba8795995..f4f511342a 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -110,19 +110,19 @@ func TestLookbackLimit(t *testing.T) { // Validate ascending while checking limits. var exceededCount int64 - exceededCount += verifyLimit(t, limit, 3, test.limit) + exceededCount += verifyLimit(t, limit, 3, limit.limit) require.Equal(t, int64(3), limit.current()) verifyMetrics(t, scope, name, 3, 0, 3, exceededCount) - exceededCount += verifyLimit(t, limit, 2, test.limit) + exceededCount += verifyLimit(t, limit, 2, limit.limit) require.Equal(t, int64(5), limit.current()) verifyMetrics(t, scope, name, 5, 0, 5, exceededCount) - exceededCount += verifyLimit(t, limit, 1, test.limit) + exceededCount += verifyLimit(t, limit, 1, limit.limit) require.Equal(t, int64(6), limit.current()) verifyMetrics(t, scope, name, 6, 0, 6, exceededCount) - exceededCount += verifyLimit(t, limit, 4, test.limit) + exceededCount += verifyLimit(t, limit, 4, limit.limit) require.Equal(t, int64(10), limit.current()) verifyMetrics(t, scope, name, 10, 0, 10, exceededCount) @@ -132,11 +132,11 @@ func TestLookbackLimit(t *testing.T) { verifyMetrics(t, scope, name, 0, 10, 10, exceededCount) // Validate ascending again post-reset. - exceededCount += verifyLimit(t, limit, 2, test.limit) + exceededCount += verifyLimit(t, limit, 2, limit.limit) require.Equal(t, int64(2), limit.current()) verifyMetrics(t, scope, name, 2, 10, 12, exceededCount) - exceededCount += verifyLimit(t, limit, 5, test.limit) + exceededCount += verifyLimit(t, limit, 5, limit.limit) require.Equal(t, int64(7), limit.current()) verifyMetrics(t, scope, name, 7, 10, 17, exceededCount) @@ -151,14 +151,34 @@ func TestLookbackLimit(t *testing.T) { require.Equal(t, int64(0), limit.current()) verifyMetrics(t, scope, name, 0, 0, 17, exceededCount) + + limit.reset() + + overrideZero := int64(0) + require.NoError(t, limit.Override(&overrideZero)) + + exceededCount += verifyLimit(t, limit, 0, &overrideZero) + require.Equal(t, int64(0), limit.current()) + + overrideNonZero := int64(2) + require.NoError(t, limit.Override(&overrideNonZero)) + + exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) + require.Equal(t, int64(1), limit.current()) + + exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) + require.Equal(t, int64(2), limit.current()) + + exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) + require.Equal(t, int64(3), limit.current()) }) } } -func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int64) int64 { +func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int64) int64 { var exceededCount int64 err := limit.Inc(inc, nil) - if limit.current() < expectedLimit || expectedLimit == 0 { + if expectedLimit == nil || limit.current() < *expectedLimit { require.NoError(t, err) } else { require.Error(t, err) @@ -167,7 +187,7 @@ func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int6 exceededCount++ } err = limit.exceeded() - if limit.current() < expectedLimit || expectedLimit == 0 { + if expectedLimit == nil || limit.current() < *expectedLimit { require.NoError(t, err) } else { require.Error(t, err) From 3285a6282a04f132d7331e7a3eefa8fe4e56e4dd Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Fri, 15 Jan 2021 11:08:03 -0500 Subject: [PATCH 12/80] Add Override test 2 --- src/dbnode/storage/limits/query_limits_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index f4f511342a..1427fa3b63 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -165,12 +165,15 @@ func TestLookbackLimit(t *testing.T) { exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) require.Equal(t, int64(1), limit.current()) + verifyMetrics(t, scope, name, 1, 0, 18, exceededCount) exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) require.Equal(t, int64(2), limit.current()) + verifyMetrics(t, scope, name, 2, 0, 19, exceededCount) exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) require.Equal(t, int64(3), limit.current()) + verifyMetrics(t, scope, name, 3, 0, 20, exceededCount) }) } } From 6df725bd794e7008c4bb4d7a4215664a2e2d3441 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Fri, 15 Jan 2021 19:22:51 -0500 Subject: [PATCH 13/80] Back query limits by etcd --- src/dbnode/kvconfig/keys.go | 8 ++ .../server/tchannelthrift/node/service.go | 4 +- src/dbnode/persist/fs/retriever.go | 4 + src/dbnode/server/server.go | 95 +++++++++++++++---- .../storage/limits/noop_query_limits.go | 2 +- src/dbnode/storage/limits/query_limits.go | 68 +++++++------ src/dbnode/storage/limits/types.go | 8 +- 7 files changed, 131 insertions(+), 58 deletions(-) diff --git a/src/dbnode/kvconfig/keys.go b/src/dbnode/kvconfig/keys.go index e607be8d38..3145cc89df 100644 --- a/src/dbnode/kvconfig/keys.go +++ b/src/dbnode/kvconfig/keys.go @@ -49,4 +49,12 @@ const ( // ClientWriteConsistencyLevel is the KV config key for the runtime // configuration specifying the client write consistency level ClientWriteConsistencyLevel = "m3db.client.write-consistency-level" + + // DocsLimit is the KV config key for the docs matched query limit. + // Settings in string form "{limit},{lookback}", e.g. "1000,15s". + DocsLimit = "m3db.limits.docs" + + // BytesReadLimit is the KV config key for the bytes read query limit. + // Settings in string form "{limit},{lookback}", e.g. "1000,15s". + BytesReadLimit = "m3db.limits.bytes-read" ) diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index 5f928d7360..24776b0b51 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2095,10 +2095,10 @@ func (s *service) SetQueryLimitOverrides( } queryLimits := db.Options().IndexOptions().QueryLimits() - if err := queryLimits.BytesReadLimit().Override(req.BytesReadLimitOverride); err != nil { + if err := queryLimits.BytesReadLimit().Update(req.BytesReadLimitOverride); err != nil { return nil, err } - if err := queryLimits.DocsLimit().Override(req.DocsLimitOverride); err != nil { + if err := queryLimits.DocsLimit().Update(req.DocsLimitOverride); err != nil { return nil, err } diff --git a/src/dbnode/persist/fs/retriever.go b/src/dbnode/persist/fs/retriever.go index a64b76da83..e6320ac040 100644 --- a/src/dbnode/persist/fs/retriever.go +++ b/src/dbnode/persist/fs/retriever.go @@ -564,6 +564,10 @@ func (r *blockRetriever) streamRequest( startTime time.Time, nsCtx namespace.Context, ) (bool, error) { + if err := r.queryLimits.AnyExceeded(); err != nil { + return false, err + } + req.shard = shard // NB(r): Clone the ID as we're not positive it will stay valid throughout // the lifecycle of the async request. diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index ef21426ef7..2f12f7b6ec 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -32,6 +32,7 @@ import ( "path" "runtime" "runtime/debug" + "strconv" "strings" "sync" "time" @@ -987,6 +988,8 @@ func Run(runOpts RunOptions) { runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond) kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger, runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits, kvconfig.DocsLimit) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits, kvconfig.BytesReadLimit) }() // Wait for process interrupt. @@ -1111,7 +1114,7 @@ func kvWatchEncodersPerBlockLimit( ) { var initEncoderLimit int - value, err := store.Get(kvconfig.EncodersPerBlockLimitKey) + value, err := store.Set.Get(kvconfig.EncodersPerBlockLimitKey) if err == nil { protoValue := &commonpb.Int64Proto{} err = value.Unmarshal(protoValue) @@ -1159,6 +1162,81 @@ func kvWatchEncodersPerBlockLimit( }() } +func kvWatchQueryLimit( + store kv.Store, + logger *zap.Logger, + limit limits.LookbackLimit, + kvName string, +) { + options := limit.Options() + + parseOptionsFn := func(val string, defaultOpts LookbackLimitOptions) LookbackLimitOptions { + parts := strings.Split(val, ",") + if val == "" { + defaultOpts.Limit = nil + } else if len(parts) == 2 { + parsedLimit, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + logger.Warn("error parsing query limit value", zap.Error(err), zap.String("name", kvName)) + } else { + defaultOpts.Limit = &parsedLimit + } + parsedLookback, err := time.ParseDuration(parts[1]) + if err != nil { + logger.Warn("error parsing query limit lookback", zap.Error(err), zap.String("name", kvName)) + } else { + defaultOpts.Lookback = parsedLookback + } + } + return defaultOpts + } + + value, err := store.Set.Get(kvName) + if err == nil { + protoValue := &commonpb.StringProto{} + err = value.Unmarshal(protoValue) + if err == nil { + options := parseOptionsFn(protoValue.Value, options) + } + } + + if err != nil { + if err != kv.ErrNotFound { + logger.Warn("error resolving encoder per block limit", zap.Error(err)) + } + } + + err = limit.Update(options) + if err != nil { + logger.Warn("unable to set query limit", zap.Error(err), zap.String("name", kvName)) + } + + watch, err := store.Watch(kvName) + if err != nil { + logger.Error("could not watch query limit", zap.Error(err), zap.String("name", kvName)) + return + } + + go func() { + protoValue := &commonpb.StringProto{} + for range watch.C() { + value := options + if newValue := watch.Get(); newValue != nil { + if err := newValue.Unmarshal(protoValue); err != nil { + logger.Warn("unable to parse new encoder per block limit", zap.Error(err)) + continue + } + value = parseOptionsFn(protoValue.Value, value) + } + + err = limit.Update(value) + if err != nil { + logger.Warn("unable to set query limit", zap.Error(err), zap.String("name", kvName)) + } + } + }() +} + func kvWatchClientConsistencyLevels( store kv.Store, logger *zap.Logger, @@ -1318,21 +1396,6 @@ func clusterLimitToPlacedShardLimit(topo topology.Topology, clusterLimit int) in return nodeLimit } -func setEncodersPerBlockLimitOnChange( - runtimeOptsMgr m3dbruntime.OptionsManager, - encoderLimit int, -) error { - runtimeOpts := runtimeOptsMgr.Get() - if runtimeOpts.EncodersPerBlockLimit() == encoderLimit { - // Not changed, no need to set the value and trigger a runtime options update - return nil - } - - newRuntimeOpts := runtimeOpts. - SetEncodersPerBlockLimit(encoderLimit) - return runtimeOptsMgr.Update(newRuntimeOpts) -} - func withEncodingAndPoolingOptions( cfg config.DBConfiguration, logger *zap.Logger, diff --git a/src/dbnode/storage/limits/noop_query_limits.go b/src/dbnode/storage/limits/noop_query_limits.go index b25caf5c24..1cc2ca2406 100644 --- a/src/dbnode/storage/limits/noop_query_limits.go +++ b/src/dbnode/storage/limits/noop_query_limits.go @@ -54,7 +54,7 @@ func (q *noOpQueryLimits) Stop() { func (q *noOpQueryLimits) Start() { } -func (q *noOpLookbackLimit) Override(*int64) error { +func (q *noOpLookbackLimit) Update(LookbackLimitOptions) error { return nil } diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 1b770681e5..0871d6c87b 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -42,13 +42,14 @@ type queryLimits struct { } type lookbackLimit struct { - name string - limit *int64 - options LookbackLimitOptions - metrics lookbackLimitMetrics - recent *atomic.Int64 - stopCh chan struct{} - overrideLock sync.RWMutex + name string + limit *int64 + options LookbackLimitOptions + metrics lookbackLimitMetrics + recent *atomic.Int64 + ticker *time.Ticker + stopCh chan struct{} + lock sync.RWMutex } type lookbackLimitMetrics struct { @@ -69,7 +70,7 @@ var ( func DefaultLookbackLimitOptions() LookbackLimitOptions { return LookbackLimitOptions{ // Default to no limit. - Limit: 0, + Limit: nil, Lookback: defaultLookback, } } @@ -106,7 +107,6 @@ func newLookbackLimit( ) *lookbackLimit { return &lookbackLimit{ name: name, - limit: getLimit(opts, nil), options: opts, metrics: newLookbackLimitMetrics(instrumentOpts, name, sourceLoggerBuilder), recent: atomic.NewInt64(0), @@ -114,19 +114,6 @@ func newLookbackLimit( } } -func getLimit(opts LookbackLimitOptions, override *int64) *int64 { - if override != nil { - return override - } - - // Zero limit means no limit enforced. - var limit *int64 - if opts.Limit != 0 { - limit = &opts.Limit - } - return limit -} - func newLookbackLimitMetrics( instrumentOpts instrument.Options, name string, @@ -174,16 +161,25 @@ func (q *queryLimits) AnyExceeded() error { return q.bytesReadLimit.exceeded() } +func (q *lookbackLimit) Options() LookbackLimitOptions { + return q.options +} + // Override overrides the limit set on construction. -func (q *lookbackLimit) Override(limit *int64) error { - if limit != nil && *limit < 0 { - return fmt.Errorf("invalid negative query limit override %d", *limit) +func (q *lookbackLimit) Update(opts LookbackLimitOptions) error { + if err := opts.validate(); err != nil { + return err + } + + q.lock.Lock() + defer q.lock.Unlock() + + if q.options.Lookback != opts.Lookback { + q.ticker.Reset(opts.Lookback) } - q.overrideLock.Lock() - defer q.overrideLock.Unlock() + q.options = opts - q.limit = getLimit(q.options, limit) return nil } @@ -215,9 +211,9 @@ func (q *lookbackLimit) exceeded() error { } func (q *lookbackLimit) checkLimit(recent int64) error { - q.overrideLock.RLock() - limit := q.limit - q.overrideLock.RUnlock() + q.lock.RLock() + limit := q.options.Limit + q.lock.RUnlock() if limit == nil { return nil @@ -233,14 +229,14 @@ func (q *lookbackLimit) checkLimit(recent int64) error { } func (q *lookbackLimit) start() { - ticker := time.NewTicker(q.options.Lookback) + q.ticker = time.NewTicker(q.options.Lookback) go func() { for { select { - case <-ticker.C: + case <-q.ticker.C: q.reset() case <-q.stopCh: - ticker.Stop() + q.ticker.Stop() return } } @@ -268,8 +264,8 @@ func (q *lookbackLimit) reset() { } func (opts LookbackLimitOptions) validate() error { - if opts.Limit < 0 { - return fmt.Errorf("query limit requires limit >= 0 (%d)", opts.Limit) + if opts.Limit != nil && *opts.Limit < 0 { + return fmt.Errorf("query limit requires limit >= 0 or nil (%d)", *opts.Limit) } if opts.Lookback <= 0 { return fmt.Errorf("query limit requires lookback > 0 (%d)", opts.Lookback) diff --git a/src/dbnode/storage/limits/types.go b/src/dbnode/storage/limits/types.go index 0f1d829eea..2d24f861e9 100644 --- a/src/dbnode/storage/limits/types.go +++ b/src/dbnode/storage/limits/types.go @@ -50,16 +50,18 @@ type QueryLimits interface { // LookbackLimit provides an interface for a specific query limit. type LookbackLimit interface { + // Options returns the current limit options. + Options() LookbackLimitOptions // Inc increments the recent value for the limit. Inc(new int, source []byte) error - // Override overrides the lookback limit value. - Override(limit *int64) error + // Update changes the lookback limit settings. + Update(opts LookbackLimitOptions) error } // LookbackLimitOptions holds options for a lookback limit to be enforced. type LookbackLimitOptions struct { // Limit past which errors will be returned. - Limit int64 + Limit *int64 // Lookback is the period over which the limit is enforced. Lookback time.Duration } From e7e23aa7369612e823d8cf320786a79da90bd325 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Fri, 15 Jan 2021 19:24:34 -0500 Subject: [PATCH 14/80] Back query limits by etcd 2 --- src/dbnode/network/server/tchannelthrift/node/service.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index 24776b0b51..df6c39cba6 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2094,6 +2094,9 @@ func (s *service) SetQueryLimitOverrides( return nil, err } + // TODO(ra): what is the protocol for exposing way to update etcd values? does it make sense to keep this + // endpoint and call kvStore.Set(...) on these vals, which then trigger the queryLimits.Update(...)? Or + // should we just delete this endpoint now and rely on some other more generic way to update etcd? queryLimits := db.Options().IndexOptions().QueryLimits() if err := queryLimits.BytesReadLimit().Update(req.BytesReadLimitOverride); err != nil { return nil, err From 6c4bca4a9b069d2dae07e7c6203b7ea15e4ccc19 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Fri, 15 Jan 2021 19:25:42 -0500 Subject: [PATCH 15/80] Back query limits by etcd 3 --- src/dbnode/server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 2f12f7b6ec..26198fe5ff 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1114,7 +1114,7 @@ func kvWatchEncodersPerBlockLimit( ) { var initEncoderLimit int - value, err := store.Set.Get(kvconfig.EncodersPerBlockLimitKey) + value, err := store.Get(kvconfig.EncodersPerBlockLimitKey) if err == nil { protoValue := &commonpb.Int64Proto{} err = value.Unmarshal(protoValue) From 8f281b99af267696fbb1bf8a288a169ce7bc4d91 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 10:08:46 -0500 Subject: [PATCH 16/80] Build fix --- src/dbnode/network/server/tchannelthrift/node/service.go | 9 +-------- src/dbnode/storage/limits/noop_query_limits.go | 4 ++++ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index df6c39cba6..60788e4067 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2089,7 +2089,7 @@ func (s *service) SetQueryLimitOverrides( *rpc.NodeQueryLimitOverridesResult_, error, ) { - db, err := s.startRPCWithDB() + _, err := s.startRPCWithDB() if err != nil { return nil, err } @@ -2097,13 +2097,6 @@ func (s *service) SetQueryLimitOverrides( // TODO(ra): what is the protocol for exposing way to update etcd values? does it make sense to keep this // endpoint and call kvStore.Set(...) on these vals, which then trigger the queryLimits.Update(...)? Or // should we just delete this endpoint now and rely on some other more generic way to update etcd? - queryLimits := db.Options().IndexOptions().QueryLimits() - if err := queryLimits.BytesReadLimit().Update(req.BytesReadLimitOverride); err != nil { - return nil, err - } - if err := queryLimits.DocsLimit().Update(req.DocsLimitOverride); err != nil { - return nil, err - } s.logger.Info("query limit overrides set", zap.Int64p("bytes-read", req.BytesReadLimitOverride), diff --git a/src/dbnode/storage/limits/noop_query_limits.go b/src/dbnode/storage/limits/noop_query_limits.go index 1cc2ca2406..6c1ab1cd89 100644 --- a/src/dbnode/storage/limits/noop_query_limits.go +++ b/src/dbnode/storage/limits/noop_query_limits.go @@ -54,6 +54,10 @@ func (q *noOpQueryLimits) Stop() { func (q *noOpQueryLimits) Start() { } +func (q *noOpLookbackLimit) Options() LookbackLimitOptions { + return LookbackLimitOptions{} +} + func (q *noOpLookbackLimit) Update(LookbackLimitOptions) error { return nil } From 7b0d797295c67676f5d775cefca5707b036ae973 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 10:12:49 -0500 Subject: [PATCH 17/80] Build fix 2 --- src/dbnode/server/server.go | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 26198fe5ff..5211ad52a2 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -452,11 +452,15 @@ func Run(runOpts RunOptions) { docsLimit := limits.DefaultLookbackLimitOptions() bytesReadLimit := limits.DefaultLookbackLimitOptions() if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesBlocks; limitConfig != nil { - docsLimit.Limit = limitConfig.Value + if limitConfig.Value != 0 { + docsLimit.Limit = &limitConfig.Value + } docsLimit.Lookback = limitConfig.Lookback } if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskBytesRead; limitConfig != nil { - bytesReadLimit.Limit = limitConfig.Value + if limitConfig.Value != 0 { + bytesReadLimit.Limit = &limitConfig.Value + } bytesReadLimit.Lookback = limitConfig.Lookback } limitOpts := limits.NewOptions(). @@ -988,8 +992,8 @@ func Run(runOpts RunOptions) { runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond) kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger, runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits, kvconfig.DocsLimit) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits, kvconfig.BytesReadLimit) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.DocsLimit(), kvconfig.DocsLimit) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.BytesReadLimit(), kvconfig.BytesReadLimit) }() // Wait for process interrupt. @@ -1170,7 +1174,7 @@ func kvWatchQueryLimit( ) { options := limit.Options() - parseOptionsFn := func(val string, defaultOpts LookbackLimitOptions) LookbackLimitOptions { + parseOptionsFn := func(val string, defaultOpts limits.LookbackLimitOptions) limits.LookbackLimitOptions { parts := strings.Split(val, ",") if val == "" { defaultOpts.Limit = nil @@ -1191,12 +1195,12 @@ func kvWatchQueryLimit( return defaultOpts } - value, err := store.Set.Get(kvName) + value, err := store.Get(kvName) if err == nil { protoValue := &commonpb.StringProto{} err = value.Unmarshal(protoValue) if err == nil { - options := parseOptionsFn(protoValue.Value, options) + options = parseOptionsFn(protoValue.Value, options) } } @@ -1396,6 +1400,21 @@ func clusterLimitToPlacedShardLimit(topo topology.Topology, clusterLimit int) in return nodeLimit } +func setEncodersPerBlockLimitOnChange( + runtimeOptsMgr m3dbruntime.OptionsManager, + encoderLimit int, +) error { + runtimeOpts := runtimeOptsMgr.Get() + if runtimeOpts.EncodersPerBlockLimit() == encoderLimit { + // Not changed, no need to set the value and trigger a runtime options update + return nil + } + + newRuntimeOpts := runtimeOpts. + SetEncodersPerBlockLimit(encoderLimit) + return runtimeOptsMgr.Update(newRuntimeOpts) +} + func withEncodingAndPoolingOptions( cfg config.DBConfiguration, logger *zap.Logger, From 2ad8c096dcbf2fd620b6e48d1a980e7313688604 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 10:28:34 -0500 Subject: [PATCH 18/80] Build fix 3 --- src/dbnode/storage/limits/query_limits.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 0871d6c87b..a9dc593d44 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -174,11 +174,12 @@ func (q *lookbackLimit) Update(opts LookbackLimitOptions) error { q.lock.Lock() defer q.lock.Unlock() - if q.options.Lookback != opts.Lookback { - q.ticker.Reset(opts.Lookback) - } - + loobackUpdated := q.options.Lookback != opts.Lookback q.options = opts + if loobackUpdated { + q.stop() + q.start() + } return nil } From bc508605df02befca98167b177342e842f47f468 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 10:37:20 -0500 Subject: [PATCH 19/80] Build fix 4 --- .../storage/limits/query_limits_test.go | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 1427fa3b63..8f58e6a80f 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -46,12 +46,13 @@ func testQueryLimitOptions( } func TestQueryLimits(t *testing.T) { + l := int64(1) docOpts := LookbackLimitOptions{ - Limit: 1, + Limit: &l, Lookback: time.Second, } bytesOpts := LookbackLimitOptions{ - Limit: 1, + Limit: &l, Lookback: time.Second, } opts := testQueryLimitOptions(docOpts, bytesOpts, instrument.NewOptions()) @@ -89,10 +90,10 @@ func TestQueryLimits(t *testing.T) { func TestLookbackLimit(t *testing.T) { for _, test := range []struct { name string - limit int64 + limit *int64 }{ - {name: "no limit", limit: 0}, - {name: "limit", limit: 5}, + {name: "no limit", limit: prt(0)}, + {name: "limit", limit: prt(5)}, } { t.Run(test.name, func(t *testing.T) { scope := tally.NewTestScope("", nil) @@ -154,24 +155,24 @@ func TestLookbackLimit(t *testing.T) { limit.reset() - overrideZero := int64(0) - require.NoError(t, limit.Override(&overrideZero)) + opts.Limit = prt(0) + require.NoError(t, limit.Update(opts)) - exceededCount += verifyLimit(t, limit, 0, &overrideZero) + exceededCount += verifyLimit(t, limit, 0, opts.Limit) require.Equal(t, int64(0), limit.current()) - overrideNonZero := int64(2) - require.NoError(t, limit.Override(&overrideNonZero)) + opts.Limit = prt(2) + require.NoError(t, limit.Update(opts)) - exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) + exceededCount += verifyLimit(t, limit, 1, opts.Limit) require.Equal(t, int64(1), limit.current()) verifyMetrics(t, scope, name, 1, 0, 18, exceededCount) - exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) + exceededCount += verifyLimit(t, limit, 1, opts.Limit) require.Equal(t, int64(2), limit.current()) verifyMetrics(t, scope, name, 2, 0, 19, exceededCount) - exceededCount += verifyLimit(t, limit, 1, &overrideNonZero) + exceededCount += verifyLimit(t, limit, 1, opts.Limit) require.Equal(t, int64(3), limit.current()) verifyMetrics(t, scope, name, 3, 0, 20, exceededCount) }) @@ -205,7 +206,7 @@ func TestLookbackReset(t *testing.T) { scope := tally.NewTestScope("", nil) iOpts := instrument.NewOptions().SetMetricsScope(scope) opts := LookbackLimitOptions{ - Limit: 5, + Limit: prt(5), Lookback: time.Millisecond * 100, } name := "test" @@ -263,7 +264,7 @@ func TestValidateLookbackLimitOptions(t *testing.T) { } { t.Run(test.name, func(t *testing.T) { err := LookbackLimitOptions{ - Limit: test.max, + Limit: prt(test.max), Lookback: test.lookback, }.validate() if test.expectError { @@ -316,7 +317,7 @@ func TestSourceLogger(t *testing.T) { scope = tally.NewTestScope("test", nil) iOpts = instrument.NewOptions().SetMetricsScope(scope) noLimit = LookbackLimitOptions{ - Limit: 0, + Limit: prt(0), Lookback: time.Millisecond * 100, } @@ -340,6 +341,10 @@ func TestSourceLogger(t *testing.T) { }, builder.records) } +func prt(i int64) *int64 { + return &i +} + // NB: creates test logger records that share an underlying record set, // differentiated by source logger name. type testBuilder struct { From f2476c5af8a87f2cd5fd1a9faedb80a4bb32a1cc Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 10:44:31 -0500 Subject: [PATCH 20/80] Lint --- src/dbnode/server/server.go | 51 ++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 5211ad52a2..8c4a7d66d0 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1174,38 +1174,17 @@ func kvWatchQueryLimit( ) { options := limit.Options() - parseOptionsFn := func(val string, defaultOpts limits.LookbackLimitOptions) limits.LookbackLimitOptions { - parts := strings.Split(val, ",") - if val == "" { - defaultOpts.Limit = nil - } else if len(parts) == 2 { - parsedLimit, err := strconv.ParseInt(parts[0], 10, 64) - if err != nil { - logger.Warn("error parsing query limit value", zap.Error(err), zap.String("name", kvName)) - } else { - defaultOpts.Limit = &parsedLimit - } - parsedLookback, err := time.ParseDuration(parts[1]) - if err != nil { - logger.Warn("error parsing query limit lookback", zap.Error(err), zap.String("name", kvName)) - } else { - defaultOpts.Lookback = parsedLookback - } - } - return defaultOpts - } - value, err := store.Get(kvName) if err == nil { protoValue := &commonpb.StringProto{} err = value.Unmarshal(protoValue) if err == nil { - options = parseOptionsFn(protoValue.Value, options) + options = parseLookbackLimitOptions(logger, kvName, protoValue.Value, options) } } if err != nil { - if err != kv.ErrNotFound { + if errors.Is(err, kv.ErrNotFound) { logger.Warn("error resolving encoder per block limit", zap.Error(err)) } } @@ -1230,7 +1209,7 @@ func kvWatchQueryLimit( logger.Warn("unable to parse new encoder per block limit", zap.Error(err)) continue } - value = parseOptionsFn(protoValue.Value, value) + value = parseLookbackLimitOptions(logger, kvName, protoValue.Value, value) } err = limit.Update(value) @@ -1241,6 +1220,30 @@ func kvWatchQueryLimit( }() } +func parseLookbackLimitOptions(logger *zap.Logger, + kvName string, + val string, + defaultOpts limits.LookbackLimitOptions) limits.LookbackLimitOptions { + parts := strings.Split(val, ",") + if val == "" { + defaultOpts.Limit = nil + } else if len(parts) == 2 { + parsedLimit, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + logger.Warn("error parsing query limit value", zap.Error(err), zap.String("name", kvName)) + } else { + defaultOpts.Limit = &parsedLimit + } + parsedLookback, err := time.ParseDuration(parts[1]) + if err != nil { + logger.Warn("error parsing query limit lookback", zap.Error(err), zap.String("name", kvName)) + } else { + defaultOpts.Lookback = parsedLookback + } + } + return defaultOpts +} + func kvWatchClientConsistencyLevels( store kv.Store, logger *zap.Logger, From 9f4d9b10558959e3f84b5a95b8f7d26263b31362 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:11:46 -0500 Subject: [PATCH 21/80] Removing endpoint to instead use etcd directly --- src/dbnode/generated/thrift/rpc.thrift | 11 - src/dbnode/generated/thrift/rpc/rpc.go | 151 +---- .../server/tchannelthrift/node/service.go | 27 - src/dbnode/persist/fs/retriever.go | 4 - src/m3em/generated/proto/m3em/m3em_mock.go | 556 ------------------ 5 files changed, 1 insertion(+), 748 deletions(-) delete mode 100644 src/m3em/generated/proto/m3em/m3em_mock.go diff --git a/src/dbnode/generated/thrift/rpc.thrift b/src/dbnode/generated/thrift/rpc.thrift index 4132fb210e..e83cc7335f 100644 --- a/src/dbnode/generated/thrift/rpc.thrift +++ b/src/dbnode/generated/thrift/rpc.thrift @@ -85,7 +85,6 @@ service Node { NodeWriteNewSeriesBackoffDurationResult setWriteNewSeriesBackoffDuration(1: NodeSetWriteNewSeriesBackoffDurationRequest req) throws (1: Error err) NodeWriteNewSeriesLimitPerShardPerSecondResult getWriteNewSeriesLimitPerShardPerSecond() throws (1: Error err) NodeWriteNewSeriesLimitPerShardPerSecondResult setWriteNewSeriesLimitPerShardPerSecond(1: NodeSetWriteNewSeriesLimitPerShardPerSecondRequest req) throws (1: Error err) - NodeQueryLimitOverridesResult setQueryLimitOverrides(1: NodeSetQueryLimitOverridesRequest req) throws (1: Error err) // Debug endpoints DebugProfileStartResult debugProfileStart(1: DebugProfileStartRequest req) throws (1: Error err) @@ -368,16 +367,6 @@ struct NodeSetWriteNewSeriesLimitPerShardPerSecondRequest { 1: required i64 writeNewSeriesLimitPerShardPerSecond } -struct NodeQueryLimitOverridesResult { - 1: optional i64 docsLimitOverride - 2: optional i64 bytesReadLimitOverride -} - -struct NodeSetQueryLimitOverridesRequest { - 1: optional i64 docsLimitOverride - 2: optional i64 bytesReadLimitOverride -} - service Cluster { HealthResult health() throws (1: Error err) void write(1: WriteRequest req) throws (1: Error err) diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go index 680dd5c9dc..5bd6ea9c2d 100644 --- a/src/dbnode/generated/thrift/rpc/rpc.go +++ b/src/dbnode/generated/thrift/rpc/rpc.go @@ -28,6 +28,7 @@ import ( "database/sql/driver" "errors" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) @@ -9466,156 +9467,6 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) String() string { return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondRequest(%+v)", *p) } -// Attributes: -// - DocsLimitOverride -// - BytesReadLimitOverride -type NodeQueryLimitOverridesResult_ struct { - DocsLimitOverride *int64 `thrift:"docsLimitOverride,1" db:"docsLimitOverride" json:"docsLimitOverride,omitempty"` - BytesReadLimitOverride *int64 `thrift:"bytesReadLimitOverride,2" db:"bytesReadLimitOverride" json:"bytesReadLimitOverride,omitempty"` -} - -func NewNodeQueryLimitOverridesResult_() *NodeQueryLimitOverridesResult_ { - return &NodeQueryLimitOverridesResult_{} -} - -var NodeQueryLimitOverridesResult__DocsLimitOverride_DEFAULT int64 - -func (p *NodeQueryLimitOverridesResult_) GetDocsLimitOverride() int64 { - if !p.IsSetDocsLimitOverride() { - return NodeQueryLimitOverridesResult__DocsLimitOverride_DEFAULT - } - return *p.DocsLimitOverride -} - -var NodeQueryLimitOverridesResult__BytesReadLimitOverride_DEFAULT int64 - -func (p *NodeQueryLimitOverridesResult_) GetBytesReadLimitOverride() int64 { - if !p.IsSetBytesReadLimitOverride() { - return NodeQueryLimitOverridesResult__BytesReadLimitOverride_DEFAULT - } - return *p.BytesReadLimitOverride -} -func (p *NodeQueryLimitOverridesResult_) IsSetDocsLimitOverride() bool { - return p.DocsLimitOverride != nil -} - -func (p *NodeQueryLimitOverridesResult_) IsSetBytesReadLimitOverride() bool { - return p.BytesReadLimitOverride != nil -} - -func (p *NodeQueryLimitOverridesResult_) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NodeQueryLimitOverridesResult_) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.DocsLimitOverride = &v - } - return nil -} - -func (p *NodeQueryLimitOverridesResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.BytesReadLimitOverride = &v - } - return nil -} - -func (p *NodeQueryLimitOverridesResult_) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("NodeQueryLimitOverridesResult"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *NodeQueryLimitOverridesResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDocsLimitOverride() { - if err := oprot.WriteFieldBegin("docsLimitOverride", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:docsLimitOverride: ", p), err) - } - if err := oprot.WriteI64(int64(*p.DocsLimitOverride)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.docsLimitOverride (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:docsLimitOverride: ", p), err) - } - } - return err -} - -func (p *NodeQueryLimitOverridesResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetBytesReadLimitOverride() { - if err := oprot.WriteFieldBegin("bytesReadLimitOverride", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:bytesReadLimitOverride: ", p), err) - } - if err := oprot.WriteI64(int64(*p.BytesReadLimitOverride)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.bytesReadLimitOverride (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:bytesReadLimitOverride: ", p), err) - } - } - return err -} - -func (p *NodeQueryLimitOverridesResult_) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NodeQueryLimitOverridesResult_(%+v)", *p) -} - // Attributes: // - DocsLimitOverride // - BytesReadLimitOverride diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index b0452e9a7b..5b60d27804 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -2084,33 +2084,6 @@ func (s *service) SetWriteNewSeriesLimitPerShardPerSecond( return s.GetWriteNewSeriesLimitPerShardPerSecond(ctx) } -func (s *service) SetQueryLimitOverrides( - ctx thrift.Context, - req *rpc.NodeSetQueryLimitOverridesRequest, -) ( - *rpc.NodeQueryLimitOverridesResult_, - error, -) { - _, err := s.startRPCWithDB() - if err != nil { - return nil, err - } - - // TODO(ra): what is the protocol for exposing way to update etcd values? does it make sense to keep this - // endpoint and call kvStore.Set(...) on these vals, which then trigger the queryLimits.Update(...)? Or - // should we just delete this endpoint now and rely on some other more generic way to update etcd? - - s.logger.Info("query limit overrides set", - zap.Int64p("bytes-read", req.BytesReadLimitOverride), - zap.Int64p("docs", req.DocsLimitOverride), - ) - - return &rpc.NodeQueryLimitOverridesResult_{ - BytesReadLimitOverride: req.BytesReadLimitOverride, - DocsLimitOverride: req.DocsLimitOverride, - }, nil -} - func (s *service) DebugProfileStart( ctx thrift.Context, req *rpc.DebugProfileStartRequest, diff --git a/src/dbnode/persist/fs/retriever.go b/src/dbnode/persist/fs/retriever.go index 3e59b126c6..a5bfab3511 100644 --- a/src/dbnode/persist/fs/retriever.go +++ b/src/dbnode/persist/fs/retriever.go @@ -564,10 +564,6 @@ func (r *blockRetriever) streamRequest( startTime time.Time, nsCtx namespace.Context, ) (bool, error) { - if err := r.queryLimits.AnyExceeded(); err != nil { - return false, err - } - req.shard = shard // NB(r): If the ID is a ident.BytesID then we can just hold diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go deleted file mode 100644 index 24d6d67813..0000000000 --- a/src/m3em/generated/proto/m3em/m3em_mock.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer) - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package m3em is a generated GoMock package. -package m3em - -import ( - "context" - "reflect" - - "github.com/golang/mock/gomock" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// MockOperatorClient is a mock of OperatorClient interface -type MockOperatorClient struct { - ctrl *gomock.Controller - recorder *MockOperatorClientMockRecorder -} - -// MockOperatorClientMockRecorder is the mock recorder for MockOperatorClient -type MockOperatorClientMockRecorder struct { - mock *MockOperatorClient -} - -// NewMockOperatorClient creates a new mock instance -func NewMockOperatorClient(ctrl *gomock.Controller) *MockOperatorClient { - mock := &MockOperatorClient{ctrl: ctrl} - mock.recorder = &MockOperatorClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperatorClient) EXPECT() *MockOperatorClientMockRecorder { - return m.recorder -} - -// PullFile mocks base method -func (m *MockOperatorClient) PullFile(arg0 context.Context, arg1 *PullFileRequest, arg2 ...grpc.CallOption) (Operator_PullFileClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PullFile", varargs...) - ret0, _ := ret[0].(Operator_PullFileClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PullFile indicates an expected call of PullFile -func (mr *MockOperatorClientMockRecorder) PullFile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullFile", reflect.TypeOf((*MockOperatorClient)(nil).PullFile), varargs...) -} - -// PushFile mocks base method -func (m *MockOperatorClient) PushFile(arg0 context.Context, arg1 ...grpc.CallOption) (Operator_PushFileClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PushFile", varargs...) - ret0, _ := ret[0].(Operator_PushFileClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PushFile indicates an expected call of PushFile -func (mr *MockOperatorClientMockRecorder) PushFile(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushFile", reflect.TypeOf((*MockOperatorClient)(nil).PushFile), varargs...) -} - -// Setup mocks base method -func (m *MockOperatorClient) Setup(arg0 context.Context, arg1 *SetupRequest, arg2 ...grpc.CallOption) (*SetupResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Setup", varargs...) - ret0, _ := ret[0].(*SetupResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Setup indicates an expected call of Setup -func (mr *MockOperatorClientMockRecorder) Setup(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockOperatorClient)(nil).Setup), varargs...) -} - -// Start mocks base method -func (m *MockOperatorClient) Start(arg0 context.Context, arg1 *StartRequest, arg2 ...grpc.CallOption) (*StartResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Start", varargs...) - ret0, _ := ret[0].(*StartResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Start indicates an expected call of Start -func (mr *MockOperatorClientMockRecorder) Start(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorClient)(nil).Start), varargs...) -} - -// Stop mocks base method -func (m *MockOperatorClient) Stop(arg0 context.Context, arg1 *StopRequest, arg2 ...grpc.CallOption) (*StopResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*StopResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stop indicates an expected call of Stop -func (mr *MockOperatorClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorClient)(nil).Stop), varargs...) -} - -// Teardown mocks base method -func (m *MockOperatorClient) Teardown(arg0 context.Context, arg1 *TeardownRequest, arg2 ...grpc.CallOption) (*TeardownResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Teardown", varargs...) - ret0, _ := ret[0].(*TeardownResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Teardown indicates an expected call of Teardown -func (mr *MockOperatorClientMockRecorder) Teardown(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Teardown", reflect.TypeOf((*MockOperatorClient)(nil).Teardown), varargs...) -} - -// MockOperator_PushFileClient is a mock of Operator_PushFileClient interface -type MockOperator_PushFileClient struct { - ctrl *gomock.Controller - recorder *MockOperator_PushFileClientMockRecorder -} - -// MockOperator_PushFileClientMockRecorder is the mock recorder for MockOperator_PushFileClient -type MockOperator_PushFileClientMockRecorder struct { - mock *MockOperator_PushFileClient -} - -// NewMockOperator_PushFileClient creates a new mock instance -func NewMockOperator_PushFileClient(ctrl *gomock.Controller) *MockOperator_PushFileClient { - mock := &MockOperator_PushFileClient{ctrl: ctrl} - mock.recorder = &MockOperator_PushFileClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PushFileClient) EXPECT() *MockOperator_PushFileClientMockRecorder { - return m.recorder -} - -// CloseAndRecv mocks base method -func (m *MockOperator_PushFileClient) CloseAndRecv() (*PushFileResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseAndRecv") - ret0, _ := ret[0].(*PushFileResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CloseAndRecv indicates an expected call of CloseAndRecv -func (mr *MockOperator_PushFileClientMockRecorder) CloseAndRecv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseAndRecv)) -} - -// CloseSend mocks base method -func (m *MockOperator_PushFileClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend -func (mr *MockOperator_PushFileClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseSend)) -} - -// Context mocks base method -func (m *MockOperator_PushFileClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PushFileClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Context)) -} - -// Header mocks base method -func (m *MockOperator_PushFileClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header -func (mr *MockOperator_PushFileClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Header)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PushFileClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PushFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).RecvMsg), arg0) -} - -// Send mocks base method -func (m *MockOperator_PushFileClient) Send(arg0 *PushFileRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send -func (mr *MockOperator_PushFileClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Send), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PushFileClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PushFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method -func (m *MockOperator_PushFileClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer -func (mr *MockOperator_PushFileClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Trailer)) -} - -// MockOperator_PullFileClient is a mock of Operator_PullFileClient interface -type MockOperator_PullFileClient struct { - ctrl *gomock.Controller - recorder *MockOperator_PullFileClientMockRecorder -} - -// MockOperator_PullFileClientMockRecorder is the mock recorder for MockOperator_PullFileClient -type MockOperator_PullFileClientMockRecorder struct { - mock *MockOperator_PullFileClient -} - -// NewMockOperator_PullFileClient creates a new mock instance -func NewMockOperator_PullFileClient(ctrl *gomock.Controller) *MockOperator_PullFileClient { - mock := &MockOperator_PullFileClient{ctrl: ctrl} - mock.recorder = &MockOperator_PullFileClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PullFileClient) EXPECT() *MockOperator_PullFileClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method -func (m *MockOperator_PullFileClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend -func (mr *MockOperator_PullFileClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PullFileClient)(nil).CloseSend)) -} - -// Context mocks base method -func (m *MockOperator_PullFileClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PullFileClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Context)) -} - -// Header mocks base method -func (m *MockOperator_PullFileClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header -func (mr *MockOperator_PullFileClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Header)) -} - -// Recv mocks base method -func (m *MockOperator_PullFileClient) Recv() (*PullFileResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*PullFileResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv -func (mr *MockOperator_PullFileClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Recv)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PullFileClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PullFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).RecvMsg), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PullFileClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PullFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method -func (m *MockOperator_PullFileClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer -func (mr *MockOperator_PullFileClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Trailer)) -} - -// MockOperator_PullFileServer is a mock of Operator_PullFileServer interface -type MockOperator_PullFileServer struct { - ctrl *gomock.Controller - recorder *MockOperator_PullFileServerMockRecorder -} - -// MockOperator_PullFileServerMockRecorder is the mock recorder for MockOperator_PullFileServer -type MockOperator_PullFileServerMockRecorder struct { - mock *MockOperator_PullFileServer -} - -// NewMockOperator_PullFileServer creates a new mock instance -func NewMockOperator_PullFileServer(ctrl *gomock.Controller) *MockOperator_PullFileServer { - mock := &MockOperator_PullFileServer{ctrl: ctrl} - mock.recorder = &MockOperator_PullFileServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PullFileServer) EXPECT() *MockOperator_PullFileServerMockRecorder { - return m.recorder -} - -// Context mocks base method -func (m *MockOperator_PullFileServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PullFileServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Context)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PullFileServer) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PullFileServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).RecvMsg), arg0) -} - -// Send mocks base method -func (m *MockOperator_PullFileServer) Send(arg0 *PullFileResponse) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send -func (mr *MockOperator_PullFileServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Send), arg0) -} - -// SendHeader mocks base method -func (m *MockOperator_PullFileServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader -func (mr *MockOperator_PullFileServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PullFileServer) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PullFileServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendMsg), arg0) -} - -// SetHeader mocks base method -func (m *MockOperator_PullFileServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader -func (mr *MockOperator_PullFileServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method -func (m *MockOperator_PullFileServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer -func (mr *MockOperator_PullFileServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetTrailer), arg0) -} From 33a7bf47d54fe17a0922d2b256a8a6f3dce98ba2 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:13:25 -0500 Subject: [PATCH 22/80] Removing endpoint to instead use etcd directly 2 --- src/dbnode/generated/thrift/rpc/rpc.go | 718 +++---------------- src/dbnode/generated/thrift/rpc/rpc_mock.go | 669 ----------------- src/dbnode/generated/thrift/rpc/tchan-rpc.go | 50 -- 3 files changed, 89 insertions(+), 1348 deletions(-) delete mode 100644 src/dbnode/generated/thrift/rpc/rpc_mock.go diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go index 5bd6ea9c2d..ea8421cec3 100644 --- a/src/dbnode/generated/thrift/rpc/rpc.go +++ b/src/dbnode/generated/thrift/rpc/rpc.go @@ -28,7 +28,6 @@ import ( "database/sql/driver" "errors" "fmt" - "github.com/apache/thrift/lib/go/thrift" ) @@ -9467,156 +9466,6 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) String() string { return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondRequest(%+v)", *p) } -// Attributes: -// - DocsLimitOverride -// - BytesReadLimitOverride -type NodeSetQueryLimitOverridesRequest struct { - DocsLimitOverride *int64 `thrift:"docsLimitOverride,1" db:"docsLimitOverride" json:"docsLimitOverride,omitempty"` - BytesReadLimitOverride *int64 `thrift:"bytesReadLimitOverride,2" db:"bytesReadLimitOverride" json:"bytesReadLimitOverride,omitempty"` -} - -func NewNodeSetQueryLimitOverridesRequest() *NodeSetQueryLimitOverridesRequest { - return &NodeSetQueryLimitOverridesRequest{} -} - -var NodeSetQueryLimitOverridesRequest_DocsLimitOverride_DEFAULT int64 - -func (p *NodeSetQueryLimitOverridesRequest) GetDocsLimitOverride() int64 { - if !p.IsSetDocsLimitOverride() { - return NodeSetQueryLimitOverridesRequest_DocsLimitOverride_DEFAULT - } - return *p.DocsLimitOverride -} - -var NodeSetQueryLimitOverridesRequest_BytesReadLimitOverride_DEFAULT int64 - -func (p *NodeSetQueryLimitOverridesRequest) GetBytesReadLimitOverride() int64 { - if !p.IsSetBytesReadLimitOverride() { - return NodeSetQueryLimitOverridesRequest_BytesReadLimitOverride_DEFAULT - } - return *p.BytesReadLimitOverride -} -func (p *NodeSetQueryLimitOverridesRequest) IsSetDocsLimitOverride() bool { - return p.DocsLimitOverride != nil -} - -func (p *NodeSetQueryLimitOverridesRequest) IsSetBytesReadLimitOverride() bool { - return p.BytesReadLimitOverride != nil -} - -func (p *NodeSetQueryLimitOverridesRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - case 2: - if err := p.ReadField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.DocsLimitOverride = &v - } - return nil -} - -func (p *NodeSetQueryLimitOverridesRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.BytesReadLimitOverride = &v - } - return nil -} - -func (p *NodeSetQueryLimitOverridesRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("NodeSetQueryLimitOverridesRequest"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDocsLimitOverride() { - if err := oprot.WriteFieldBegin("docsLimitOverride", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:docsLimitOverride: ", p), err) - } - if err := oprot.WriteI64(int64(*p.DocsLimitOverride)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.docsLimitOverride (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:docsLimitOverride: ", p), err) - } - } - return err -} - -func (p *NodeSetQueryLimitOverridesRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetBytesReadLimitOverride() { - if err := oprot.WriteFieldBegin("bytesReadLimitOverride", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:bytesReadLimitOverride: ", p), err) - } - if err := oprot.WriteI64(int64(*p.BytesReadLimitOverride)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.bytesReadLimitOverride (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:bytesReadLimitOverride: ", p), err) - } - } - return err -} - -func (p *NodeSetQueryLimitOverridesRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NodeSetQueryLimitOverridesRequest(%+v)", *p) -} - // Attributes: // - Ok // - Status @@ -14760,9 +14609,6 @@ type Node interface { SetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) // Parameters: // - Req - SetQueryLimitOverrides(req *NodeSetQueryLimitOverridesRequest) (r *NodeQueryLimitOverridesResult_, err error) - // Parameters: - // - Req DebugProfileStart(req *DebugProfileStartRequest) (r *DebugProfileStartResult_, err error) // Parameters: // - Req @@ -17108,87 +16954,6 @@ func (p *NodeClient) recvSetWriteNewSeriesLimitPerShardPerSecond() (value *NodeW return } -// Parameters: -// - Req -func (p *NodeClient) SetQueryLimitOverrides(req *NodeSetQueryLimitOverridesRequest) (r *NodeQueryLimitOverridesResult_, err error) { - if err = p.sendSetQueryLimitOverrides(req); err != nil { - return - } - return p.recvSetQueryLimitOverrides() -} - -func (p *NodeClient) sendSetQueryLimitOverrides(req *NodeSetQueryLimitOverridesRequest) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.CALL, p.SeqId); err != nil { - return - } - args := NodeSetQueryLimitOverridesArgs{ - Req: req, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *NodeClient) recvSetQueryLimitOverrides() (value *NodeQueryLimitOverridesResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "setQueryLimitOverrides" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setQueryLimitOverrides failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setQueryLimitOverrides failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error93 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error94 error - error94, err = error93.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error94 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setQueryLimitOverrides failed: invalid message type") - return - } - result := NodeSetQueryLimitOverridesResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Err != nil { - err = result.Err - return - } - value = result.GetSuccess() - return -} - // Parameters: // - Req func (p *NodeClient) DebugProfileStart(req *DebugProfileStartRequest) (r *DebugProfileStartResult_, err error) { @@ -17239,16 +17004,16 @@ func (p *NodeClient) recvDebugProfileStart() (value *DebugProfileStartResult_, e return } if mTypeId == thrift.EXCEPTION { - error95 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error96 error - error96, err = error95.Read(iprot) + error93 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error94 error + error94, err = error93.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error96 + err = error94 return } if mTypeId != thrift.REPLY { @@ -17320,16 +17085,16 @@ func (p *NodeClient) recvDebugProfileStop() (value *DebugProfileStopResult_, err return } if mTypeId == thrift.EXCEPTION { - error97 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error98 error - error98, err = error97.Read(iprot) + error95 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error96 error + error96, err = error95.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error98 + err = error96 return } if mTypeId != thrift.REPLY { @@ -17401,16 +17166,16 @@ func (p *NodeClient) recvDebugIndexMemorySegments() (value *DebugIndexMemorySegm return } if mTypeId == thrift.EXCEPTION { - error99 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error100 error - error100, err = error99.Read(iprot) + error97 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error98 error + error98, err = error97.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error100 + err = error98 return } if mTypeId != thrift.REPLY { @@ -17452,41 +17217,40 @@ func (p *NodeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { func NewNodeProcessor(handler Node) *NodeProcessor { - self101 := &NodeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self101.processorMap["query"] = &nodeProcessorQuery{handler: handler} - self101.processorMap["aggregate"] = &nodeProcessorAggregate{handler: handler} - self101.processorMap["fetch"] = &nodeProcessorFetch{handler: handler} - self101.processorMap["write"] = &nodeProcessorWrite{handler: handler} - self101.processorMap["writeTagged"] = &nodeProcessorWriteTagged{handler: handler} - self101.processorMap["aggregateRaw"] = &nodeProcessorAggregateRaw{handler: handler} - self101.processorMap["fetchBatchRaw"] = &nodeProcessorFetchBatchRaw{handler: handler} - self101.processorMap["fetchBatchRawV2"] = &nodeProcessorFetchBatchRawV2{handler: handler} - self101.processorMap["fetchBlocksRaw"] = &nodeProcessorFetchBlocksRaw{handler: handler} - self101.processorMap["fetchTagged"] = &nodeProcessorFetchTagged{handler: handler} - self101.processorMap["fetchBlocksMetadataRawV2"] = &nodeProcessorFetchBlocksMetadataRawV2{handler: handler} - self101.processorMap["writeBatchRaw"] = &nodeProcessorWriteBatchRaw{handler: handler} - self101.processorMap["writeBatchRawV2"] = &nodeProcessorWriteBatchRawV2{handler: handler} - self101.processorMap["writeTaggedBatchRaw"] = &nodeProcessorWriteTaggedBatchRaw{handler: handler} - self101.processorMap["writeTaggedBatchRawV2"] = &nodeProcessorWriteTaggedBatchRawV2{handler: handler} - self101.processorMap["repair"] = &nodeProcessorRepair{handler: handler} - self101.processorMap["truncate"] = &nodeProcessorTruncate{handler: handler} - self101.processorMap["aggregateTiles"] = &nodeProcessorAggregateTiles{handler: handler} - self101.processorMap["health"] = &nodeProcessorHealth{handler: handler} - self101.processorMap["bootstrapped"] = &nodeProcessorBootstrapped{handler: handler} - self101.processorMap["bootstrappedInPlacementOrNoPlacement"] = &nodeProcessorBootstrappedInPlacementOrNoPlacement{handler: handler} - self101.processorMap["getPersistRateLimit"] = &nodeProcessorGetPersistRateLimit{handler: handler} - self101.processorMap["setPersistRateLimit"] = &nodeProcessorSetPersistRateLimit{handler: handler} - self101.processorMap["getWriteNewSeriesAsync"] = &nodeProcessorGetWriteNewSeriesAsync{handler: handler} - self101.processorMap["setWriteNewSeriesAsync"] = &nodeProcessorSetWriteNewSeriesAsync{handler: handler} - self101.processorMap["getWriteNewSeriesBackoffDuration"] = &nodeProcessorGetWriteNewSeriesBackoffDuration{handler: handler} - self101.processorMap["setWriteNewSeriesBackoffDuration"] = &nodeProcessorSetWriteNewSeriesBackoffDuration{handler: handler} - self101.processorMap["getWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond{handler: handler} - self101.processorMap["setWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond{handler: handler} - self101.processorMap["setQueryLimitOverrides"] = &nodeProcessorSetQueryLimitOverrides{handler: handler} - self101.processorMap["debugProfileStart"] = &nodeProcessorDebugProfileStart{handler: handler} - self101.processorMap["debugProfileStop"] = &nodeProcessorDebugProfileStop{handler: handler} - self101.processorMap["debugIndexMemorySegments"] = &nodeProcessorDebugIndexMemorySegments{handler: handler} - return self101 + self99 := &NodeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self99.processorMap["query"] = &nodeProcessorQuery{handler: handler} + self99.processorMap["aggregate"] = &nodeProcessorAggregate{handler: handler} + self99.processorMap["fetch"] = &nodeProcessorFetch{handler: handler} + self99.processorMap["write"] = &nodeProcessorWrite{handler: handler} + self99.processorMap["writeTagged"] = &nodeProcessorWriteTagged{handler: handler} + self99.processorMap["aggregateRaw"] = &nodeProcessorAggregateRaw{handler: handler} + self99.processorMap["fetchBatchRaw"] = &nodeProcessorFetchBatchRaw{handler: handler} + self99.processorMap["fetchBatchRawV2"] = &nodeProcessorFetchBatchRawV2{handler: handler} + self99.processorMap["fetchBlocksRaw"] = &nodeProcessorFetchBlocksRaw{handler: handler} + self99.processorMap["fetchTagged"] = &nodeProcessorFetchTagged{handler: handler} + self99.processorMap["fetchBlocksMetadataRawV2"] = &nodeProcessorFetchBlocksMetadataRawV2{handler: handler} + self99.processorMap["writeBatchRaw"] = &nodeProcessorWriteBatchRaw{handler: handler} + self99.processorMap["writeBatchRawV2"] = &nodeProcessorWriteBatchRawV2{handler: handler} + self99.processorMap["writeTaggedBatchRaw"] = &nodeProcessorWriteTaggedBatchRaw{handler: handler} + self99.processorMap["writeTaggedBatchRawV2"] = &nodeProcessorWriteTaggedBatchRawV2{handler: handler} + self99.processorMap["repair"] = &nodeProcessorRepair{handler: handler} + self99.processorMap["truncate"] = &nodeProcessorTruncate{handler: handler} + self99.processorMap["aggregateTiles"] = &nodeProcessorAggregateTiles{handler: handler} + self99.processorMap["health"] = &nodeProcessorHealth{handler: handler} + self99.processorMap["bootstrapped"] = &nodeProcessorBootstrapped{handler: handler} + self99.processorMap["bootstrappedInPlacementOrNoPlacement"] = &nodeProcessorBootstrappedInPlacementOrNoPlacement{handler: handler} + self99.processorMap["getPersistRateLimit"] = &nodeProcessorGetPersistRateLimit{handler: handler} + self99.processorMap["setPersistRateLimit"] = &nodeProcessorSetPersistRateLimit{handler: handler} + self99.processorMap["getWriteNewSeriesAsync"] = &nodeProcessorGetWriteNewSeriesAsync{handler: handler} + self99.processorMap["setWriteNewSeriesAsync"] = &nodeProcessorSetWriteNewSeriesAsync{handler: handler} + self99.processorMap["getWriteNewSeriesBackoffDuration"] = &nodeProcessorGetWriteNewSeriesBackoffDuration{handler: handler} + self99.processorMap["setWriteNewSeriesBackoffDuration"] = &nodeProcessorSetWriteNewSeriesBackoffDuration{handler: handler} + self99.processorMap["getWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond{handler: handler} + self99.processorMap["setWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond{handler: handler} + self99.processorMap["debugProfileStart"] = &nodeProcessorDebugProfileStart{handler: handler} + self99.processorMap["debugProfileStop"] = &nodeProcessorDebugProfileStop{handler: handler} + self99.processorMap["debugIndexMemorySegments"] = &nodeProcessorDebugIndexMemorySegments{handler: handler} + return self99 } func (p *NodeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { @@ -17499,12 +17263,12 @@ func (p *NodeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, er } iprot.Skip(thrift.STRUCT) iprot.ReadMessageEnd() - x102 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + x100 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x102.Write(oprot) + x100.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() - return false, x102 + return false, x100 } @@ -19024,59 +18788,6 @@ func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int return true, err } -type nodeProcessorSetQueryLimitOverrides struct { - handler Node -} - -func (p *nodeProcessorSetQueryLimitOverrides) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := NodeSetQueryLimitOverridesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := NodeSetQueryLimitOverridesResult{} - var retval *NodeQueryLimitOverridesResult_ - var err2 error - if retval, err2 = p.handler.SetQueryLimitOverrides(args.Req); err2 != nil { - switch v := err2.(type) { - case *Error: - result.Err = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setQueryLimitOverrides: "+err2.Error()) - oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("setQueryLimitOverrides", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - type nodeProcessorDebugProfileStart struct { handler Node } @@ -25852,257 +25563,6 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) String() string { return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondResult(%+v)", *p) } -// Attributes: -// - Req -type NodeSetQueryLimitOverridesArgs struct { - Req *NodeSetQueryLimitOverridesRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewNodeSetQueryLimitOverridesArgs() *NodeSetQueryLimitOverridesArgs { - return &NodeSetQueryLimitOverridesArgs{} -} - -var NodeSetQueryLimitOverridesArgs_Req_DEFAULT *NodeSetQueryLimitOverridesRequest - -func (p *NodeSetQueryLimitOverridesArgs) GetReq() *NodeSetQueryLimitOverridesRequest { - if !p.IsSetReq() { - return NodeSetQueryLimitOverridesArgs_Req_DEFAULT - } - return p.Req -} -func (p *NodeSetQueryLimitOverridesArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *NodeSetQueryLimitOverridesArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &NodeSetQueryLimitOverridesRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("setQueryLimitOverrides_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *NodeSetQueryLimitOverridesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NodeSetQueryLimitOverridesArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Err -type NodeSetQueryLimitOverridesResult struct { - Success *NodeQueryLimitOverridesResult_ `thrift:"success,0" db:"success" json:"success,omitempty"` - Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"` -} - -func NewNodeSetQueryLimitOverridesResult() *NodeSetQueryLimitOverridesResult { - return &NodeSetQueryLimitOverridesResult{} -} - -var NodeSetQueryLimitOverridesResult_Success_DEFAULT *NodeQueryLimitOverridesResult_ - -func (p *NodeSetQueryLimitOverridesResult) GetSuccess() *NodeQueryLimitOverridesResult_ { - if !p.IsSetSuccess() { - return NodeSetQueryLimitOverridesResult_Success_DEFAULT - } - return p.Success -} - -var NodeSetQueryLimitOverridesResult_Err_DEFAULT *Error - -func (p *NodeSetQueryLimitOverridesResult) GetErr() *Error { - if !p.IsSetErr() { - return NodeSetQueryLimitOverridesResult_Err_DEFAULT - } - return p.Err -} -func (p *NodeSetQueryLimitOverridesResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *NodeSetQueryLimitOverridesResult) IsSetErr() bool { - return p.Err != nil -} - -func (p *NodeSetQueryLimitOverridesResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.ReadField0(iprot); err != nil { - return err - } - case 1: - if err := p.ReadField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &NodeQueryLimitOverridesResult_{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesResult) ReadField1(iprot thrift.TProtocol) error { - p.Err = &Error{ - Type: 0, - } - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("setQueryLimitOverrides_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *NodeSetQueryLimitOverridesResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *NodeSetQueryLimitOverridesResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetErr() { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - } - return err -} - -func (p *NodeSetQueryLimitOverridesResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NodeSetQueryLimitOverridesResult(%+v)", *p) -} - // Attributes: // - Req type NodeDebugProfileStartArgs struct { @@ -26950,16 +26410,16 @@ func (p *ClusterClient) recvHealth() (value *HealthResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error253 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error254 error - error254, err = error253.Read(iprot) + error245 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error246 error + error246, err = error245.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error254 + err = error246 return } if mTypeId != thrift.REPLY { @@ -27031,16 +26491,16 @@ func (p *ClusterClient) recvWrite() (err error) { return } if mTypeId == thrift.EXCEPTION { - error255 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error256 error - error256, err = error255.Read(iprot) + error247 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error248 error + error248, err = error247.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error256 + err = error248 return } if mTypeId != thrift.REPLY { @@ -27111,16 +26571,16 @@ func (p *ClusterClient) recvWriteTagged() (err error) { return } if mTypeId == thrift.EXCEPTION { - error257 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error258 error - error258, err = error257.Read(iprot) + error249 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error250 error + error250, err = error249.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error258 + err = error250 return } if mTypeId != thrift.REPLY { @@ -27191,16 +26651,16 @@ func (p *ClusterClient) recvQuery() (value *QueryResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error259 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error260 error - error260, err = error259.Read(iprot) + error251 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error252 error + error252, err = error251.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error260 + err = error252 return } if mTypeId != thrift.REPLY { @@ -27272,16 +26732,16 @@ func (p *ClusterClient) recvAggregate() (value *AggregateQueryResult_, err error return } if mTypeId == thrift.EXCEPTION { - error261 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error262 error - error262, err = error261.Read(iprot) + error253 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error254 error + error254, err = error253.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error262 + err = error254 return } if mTypeId != thrift.REPLY { @@ -27353,16 +26813,16 @@ func (p *ClusterClient) recvFetch() (value *FetchResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error263 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error264 error - error264, err = error263.Read(iprot) + error255 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error256 error + error256, err = error255.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error264 + err = error256 return } if mTypeId != thrift.REPLY { @@ -27434,16 +26894,16 @@ func (p *ClusterClient) recvTruncate() (value *TruncateResult_, err error) { return } if mTypeId == thrift.EXCEPTION { - error265 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error266 error - error266, err = error265.Read(iprot) + error257 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error258 error + error258, err = error257.Read(iprot) if err != nil { return } if err = iprot.ReadMessageEnd(); err != nil { return } - err = error266 + err = error258 return } if mTypeId != thrift.REPLY { @@ -27485,15 +26945,15 @@ func (p *ClusterProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { func NewClusterProcessor(handler Cluster) *ClusterProcessor { - self267 := &ClusterProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self267.processorMap["health"] = &clusterProcessorHealth{handler: handler} - self267.processorMap["write"] = &clusterProcessorWrite{handler: handler} - self267.processorMap["writeTagged"] = &clusterProcessorWriteTagged{handler: handler} - self267.processorMap["query"] = &clusterProcessorQuery{handler: handler} - self267.processorMap["aggregate"] = &clusterProcessorAggregate{handler: handler} - self267.processorMap["fetch"] = &clusterProcessorFetch{handler: handler} - self267.processorMap["truncate"] = &clusterProcessorTruncate{handler: handler} - return self267 + self259 := &ClusterProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self259.processorMap["health"] = &clusterProcessorHealth{handler: handler} + self259.processorMap["write"] = &clusterProcessorWrite{handler: handler} + self259.processorMap["writeTagged"] = &clusterProcessorWriteTagged{handler: handler} + self259.processorMap["query"] = &clusterProcessorQuery{handler: handler} + self259.processorMap["aggregate"] = &clusterProcessorAggregate{handler: handler} + self259.processorMap["fetch"] = &clusterProcessorFetch{handler: handler} + self259.processorMap["truncate"] = &clusterProcessorTruncate{handler: handler} + return self259 } func (p *ClusterProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { @@ -27506,12 +26966,12 @@ func (p *ClusterProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, } iprot.Skip(thrift.STRUCT) iprot.ReadMessageEnd() - x268 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + x260 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x268.Write(oprot) + x260.Write(oprot) oprot.WriteMessageEnd() oprot.Flush() - return false, x268 + return false, x260 } diff --git a/src/dbnode/generated/thrift/rpc/rpc_mock.go b/src/dbnode/generated/thrift/rpc/rpc_mock.go deleted file mode 100644 index 3074161f26..0000000000 --- a/src/dbnode/generated/thrift/rpc/rpc_mock.go +++ /dev/null @@ -1,669 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/generated/thrift/rpc/tchan-go - -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package rpc is a generated GoMock package. -package rpc - -import ( - "reflect" - - "github.com/golang/mock/gomock" - "github.com/uber/tchannel-go/thrift" -) - -// MockTChanCluster is a mock of TChanCluster interface -type MockTChanCluster struct { - ctrl *gomock.Controller - recorder *MockTChanClusterMockRecorder -} - -// MockTChanClusterMockRecorder is the mock recorder for MockTChanCluster -type MockTChanClusterMockRecorder struct { - mock *MockTChanCluster -} - -// NewMockTChanCluster creates a new mock instance -func NewMockTChanCluster(ctrl *gomock.Controller) *MockTChanCluster { - mock := &MockTChanCluster{ctrl: ctrl} - mock.recorder = &MockTChanClusterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTChanCluster) EXPECT() *MockTChanClusterMockRecorder { - return m.recorder -} - -// Aggregate mocks base method -func (m *MockTChanCluster) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Aggregate", ctx, req) - ret0, _ := ret[0].(*AggregateQueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Aggregate indicates an expected call of Aggregate -func (mr *MockTChanClusterMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanCluster)(nil).Aggregate), ctx, req) -} - -// Fetch mocks base method -func (m *MockTChanCluster) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Fetch", ctx, req) - ret0, _ := ret[0].(*FetchResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Fetch indicates an expected call of Fetch -func (mr *MockTChanClusterMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanCluster)(nil).Fetch), ctx, req) -} - -// Health mocks base method -func (m *MockTChanCluster) Health(ctx thrift.Context) (*HealthResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Health", ctx) - ret0, _ := ret[0].(*HealthResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Health indicates an expected call of Health -func (mr *MockTChanClusterMockRecorder) Health(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanCluster)(nil).Health), ctx) -} - -// Query mocks base method -func (m *MockTChanCluster) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Query", ctx, req) - ret0, _ := ret[0].(*QueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Query indicates an expected call of Query -func (mr *MockTChanClusterMockRecorder) Query(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanCluster)(nil).Query), ctx, req) -} - -// Truncate mocks base method -func (m *MockTChanCluster) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Truncate", ctx, req) - ret0, _ := ret[0].(*TruncateResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Truncate indicates an expected call of Truncate -func (mr *MockTChanClusterMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanCluster)(nil).Truncate), ctx, req) -} - -// Write mocks base method -func (m *MockTChanCluster) Write(ctx thrift.Context, req *WriteRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// Write indicates an expected call of Write -func (mr *MockTChanClusterMockRecorder) Write(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanCluster)(nil).Write), ctx, req) -} - -// WriteTagged mocks base method -func (m *MockTChanCluster) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTagged", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTagged indicates an expected call of WriteTagged -func (mr *MockTChanClusterMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanCluster)(nil).WriteTagged), ctx, req) -} - -// MockTChanNode is a mock of TChanNode interface -type MockTChanNode struct { - ctrl *gomock.Controller - recorder *MockTChanNodeMockRecorder -} - -// MockTChanNodeMockRecorder is the mock recorder for MockTChanNode -type MockTChanNodeMockRecorder struct { - mock *MockTChanNode -} - -// NewMockTChanNode creates a new mock instance -func NewMockTChanNode(ctrl *gomock.Controller) *MockTChanNode { - mock := &MockTChanNode{ctrl: ctrl} - mock.recorder = &MockTChanNodeMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTChanNode) EXPECT() *MockTChanNodeMockRecorder { - return m.recorder -} - -// Aggregate mocks base method -func (m *MockTChanNode) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Aggregate", ctx, req) - ret0, _ := ret[0].(*AggregateQueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Aggregate indicates an expected call of Aggregate -func (mr *MockTChanNodeMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanNode)(nil).Aggregate), ctx, req) -} - -// AggregateRaw mocks base method -func (m *MockTChanNode) AggregateRaw(ctx thrift.Context, req *AggregateQueryRawRequest) (*AggregateQueryRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AggregateRaw", ctx, req) - ret0, _ := ret[0].(*AggregateQueryRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AggregateRaw indicates an expected call of AggregateRaw -func (mr *MockTChanNodeMockRecorder) AggregateRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateRaw", reflect.TypeOf((*MockTChanNode)(nil).AggregateRaw), ctx, req) -} - -// AggregateTiles mocks base method -func (m *MockTChanNode) AggregateTiles(ctx thrift.Context, req *AggregateTilesRequest) (*AggregateTilesResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AggregateTiles", ctx, req) - ret0, _ := ret[0].(*AggregateTilesResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AggregateTiles indicates an expected call of AggregateTiles -func (mr *MockTChanNodeMockRecorder) AggregateTiles(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateTiles", reflect.TypeOf((*MockTChanNode)(nil).AggregateTiles), ctx, req) -} - -// Bootstrapped mocks base method -func (m *MockTChanNode) Bootstrapped(ctx thrift.Context) (*NodeBootstrappedResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Bootstrapped", ctx) - ret0, _ := ret[0].(*NodeBootstrappedResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Bootstrapped indicates an expected call of Bootstrapped -func (mr *MockTChanNodeMockRecorder) Bootstrapped(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrapped", reflect.TypeOf((*MockTChanNode)(nil).Bootstrapped), ctx) -} - -// BootstrappedInPlacementOrNoPlacement mocks base method -func (m *MockTChanNode) BootstrappedInPlacementOrNoPlacement(ctx thrift.Context) (*NodeBootstrappedInPlacementOrNoPlacementResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BootstrappedInPlacementOrNoPlacement", ctx) - ret0, _ := ret[0].(*NodeBootstrappedInPlacementOrNoPlacementResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BootstrappedInPlacementOrNoPlacement indicates an expected call of BootstrappedInPlacementOrNoPlacement -func (mr *MockTChanNodeMockRecorder) BootstrappedInPlacementOrNoPlacement(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrappedInPlacementOrNoPlacement", reflect.TypeOf((*MockTChanNode)(nil).BootstrappedInPlacementOrNoPlacement), ctx) -} - -// DebugIndexMemorySegments mocks base method -func (m *MockTChanNode) DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DebugIndexMemorySegments", ctx, req) - ret0, _ := ret[0].(*DebugIndexMemorySegmentsResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DebugIndexMemorySegments indicates an expected call of DebugIndexMemorySegments -func (mr *MockTChanNodeMockRecorder) DebugIndexMemorySegments(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugIndexMemorySegments", reflect.TypeOf((*MockTChanNode)(nil).DebugIndexMemorySegments), ctx, req) -} - -// DebugProfileStart mocks base method -func (m *MockTChanNode) DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DebugProfileStart", ctx, req) - ret0, _ := ret[0].(*DebugProfileStartResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DebugProfileStart indicates an expected call of DebugProfileStart -func (mr *MockTChanNodeMockRecorder) DebugProfileStart(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStart", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStart), ctx, req) -} - -// DebugProfileStop mocks base method -func (m *MockTChanNode) DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DebugProfileStop", ctx, req) - ret0, _ := ret[0].(*DebugProfileStopResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DebugProfileStop indicates an expected call of DebugProfileStop -func (mr *MockTChanNodeMockRecorder) DebugProfileStop(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStop", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStop), ctx, req) -} - -// Fetch mocks base method -func (m *MockTChanNode) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Fetch", ctx, req) - ret0, _ := ret[0].(*FetchResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Fetch indicates an expected call of Fetch -func (mr *MockTChanNodeMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanNode)(nil).Fetch), ctx, req) -} - -// FetchBatchRaw mocks base method -func (m *MockTChanNode) FetchBatchRaw(ctx thrift.Context, req *FetchBatchRawRequest) (*FetchBatchRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBatchRaw", ctx, req) - ret0, _ := ret[0].(*FetchBatchRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBatchRaw indicates an expected call of FetchBatchRaw -func (mr *MockTChanNodeMockRecorder) FetchBatchRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRaw), ctx, req) -} - -// FetchBatchRawV2 mocks base method -func (m *MockTChanNode) FetchBatchRawV2(ctx thrift.Context, req *FetchBatchRawV2Request) (*FetchBatchRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBatchRawV2", ctx, req) - ret0, _ := ret[0].(*FetchBatchRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBatchRawV2 indicates an expected call of FetchBatchRawV2 -func (mr *MockTChanNodeMockRecorder) FetchBatchRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRawV2), ctx, req) -} - -// FetchBlocksMetadataRawV2 mocks base method -func (m *MockTChanNode) FetchBlocksMetadataRawV2(ctx thrift.Context, req *FetchBlocksMetadataRawV2Request) (*FetchBlocksMetadataRawV2Result_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBlocksMetadataRawV2", ctx, req) - ret0, _ := ret[0].(*FetchBlocksMetadataRawV2Result_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBlocksMetadataRawV2 indicates an expected call of FetchBlocksMetadataRawV2 -func (mr *MockTChanNodeMockRecorder) FetchBlocksMetadataRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksMetadataRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksMetadataRawV2), ctx, req) -} - -// FetchBlocksRaw mocks base method -func (m *MockTChanNode) FetchBlocksRaw(ctx thrift.Context, req *FetchBlocksRawRequest) (*FetchBlocksRawResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchBlocksRaw", ctx, req) - ret0, _ := ret[0].(*FetchBlocksRawResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchBlocksRaw indicates an expected call of FetchBlocksRaw -func (mr *MockTChanNodeMockRecorder) FetchBlocksRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksRaw), ctx, req) -} - -// FetchTagged mocks base method -func (m *MockTChanNode) FetchTagged(ctx thrift.Context, req *FetchTaggedRequest) (*FetchTaggedResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchTagged", ctx, req) - ret0, _ := ret[0].(*FetchTaggedResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchTagged indicates an expected call of FetchTagged -func (mr *MockTChanNodeMockRecorder) FetchTagged(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchTagged", reflect.TypeOf((*MockTChanNode)(nil).FetchTagged), ctx, req) -} - -// GetPersistRateLimit mocks base method -func (m *MockTChanNode) GetPersistRateLimit(ctx thrift.Context) (*NodePersistRateLimitResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPersistRateLimit", ctx) - ret0, _ := ret[0].(*NodePersistRateLimitResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPersistRateLimit indicates an expected call of GetPersistRateLimit -func (mr *MockTChanNodeMockRecorder) GetPersistRateLimit(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).GetPersistRateLimit), ctx) -} - -// GetWriteNewSeriesAsync mocks base method -func (m *MockTChanNode) GetWriteNewSeriesAsync(ctx thrift.Context) (*NodeWriteNewSeriesAsyncResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWriteNewSeriesAsync", ctx) - ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWriteNewSeriesAsync indicates an expected call of GetWriteNewSeriesAsync -func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesAsync(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesAsync), ctx) -} - -// GetWriteNewSeriesBackoffDuration mocks base method -func (m *MockTChanNode) GetWriteNewSeriesBackoffDuration(ctx thrift.Context) (*NodeWriteNewSeriesBackoffDurationResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWriteNewSeriesBackoffDuration", ctx) - ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWriteNewSeriesBackoffDuration indicates an expected call of GetWriteNewSeriesBackoffDuration -func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesBackoffDuration(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesBackoffDuration), ctx) -} - -// GetWriteNewSeriesLimitPerShardPerSecond mocks base method -func (m *MockTChanNode) GetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWriteNewSeriesLimitPerShardPerSecond", ctx) - ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of GetWriteNewSeriesLimitPerShardPerSecond -func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesLimitPerShardPerSecond(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesLimitPerShardPerSecond), ctx) -} - -// Health mocks base method -func (m *MockTChanNode) Health(ctx thrift.Context) (*NodeHealthResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Health", ctx) - ret0, _ := ret[0].(*NodeHealthResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Health indicates an expected call of Health -func (mr *MockTChanNodeMockRecorder) Health(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanNode)(nil).Health), ctx) -} - -// Query mocks base method -func (m *MockTChanNode) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Query", ctx, req) - ret0, _ := ret[0].(*QueryResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Query indicates an expected call of Query -func (mr *MockTChanNodeMockRecorder) Query(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanNode)(nil).Query), ctx, req) -} - -// Repair mocks base method -func (m *MockTChanNode) Repair(ctx thrift.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Repair", ctx) - ret0, _ := ret[0].(error) - return ret0 -} - -// Repair indicates an expected call of Repair -func (mr *MockTChanNodeMockRecorder) Repair(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockTChanNode)(nil).Repair), ctx) -} - -// SetPersistRateLimit mocks base method -func (m *MockTChanNode) SetPersistRateLimit(ctx thrift.Context, req *NodeSetPersistRateLimitRequest) (*NodePersistRateLimitResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetPersistRateLimit", ctx, req) - ret0, _ := ret[0].(*NodePersistRateLimitResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetPersistRateLimit indicates an expected call of SetPersistRateLimit -func (mr *MockTChanNodeMockRecorder) SetPersistRateLimit(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).SetPersistRateLimit), ctx, req) -} - -// SetQueryLimitOverrides mocks base method -func (m *MockTChanNode) SetQueryLimitOverrides(ctx thrift.Context, req *NodeSetQueryLimitOverridesRequest) (*NodeQueryLimitOverridesResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetQueryLimitOverrides", ctx, req) - ret0, _ := ret[0].(*NodeQueryLimitOverridesResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetQueryLimitOverrides indicates an expected call of SetQueryLimitOverrides -func (mr *MockTChanNodeMockRecorder) SetQueryLimitOverrides(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetQueryLimitOverrides", reflect.TypeOf((*MockTChanNode)(nil).SetQueryLimitOverrides), ctx, req) -} - -// SetWriteNewSeriesAsync mocks base method -func (m *MockTChanNode) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWriteNewSeriesAsync", ctx, req) - ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWriteNewSeriesAsync indicates an expected call of SetWriteNewSeriesAsync -func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesAsync(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesAsync), ctx, req) -} - -// SetWriteNewSeriesBackoffDuration mocks base method -func (m *MockTChanNode) SetWriteNewSeriesBackoffDuration(ctx thrift.Context, req *NodeSetWriteNewSeriesBackoffDurationRequest) (*NodeWriteNewSeriesBackoffDurationResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWriteNewSeriesBackoffDuration", ctx, req) - ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWriteNewSeriesBackoffDuration indicates an expected call of SetWriteNewSeriesBackoffDuration -func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesBackoffDuration(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesBackoffDuration), ctx, req) -} - -// SetWriteNewSeriesLimitPerShardPerSecond mocks base method -func (m *MockTChanNode) SetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context, req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWriteNewSeriesLimitPerShardPerSecond", ctx, req) - ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of SetWriteNewSeriesLimitPerShardPerSecond -func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesLimitPerShardPerSecond(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesLimitPerShardPerSecond), ctx, req) -} - -// Truncate mocks base method -func (m *MockTChanNode) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Truncate", ctx, req) - ret0, _ := ret[0].(*TruncateResult_) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Truncate indicates an expected call of Truncate -func (mr *MockTChanNodeMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanNode)(nil).Truncate), ctx, req) -} - -// Write mocks base method -func (m *MockTChanNode) Write(ctx thrift.Context, req *WriteRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// Write indicates an expected call of Write -func (mr *MockTChanNodeMockRecorder) Write(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanNode)(nil).Write), ctx, req) -} - -// WriteBatchRaw mocks base method -func (m *MockTChanNode) WriteBatchRaw(ctx thrift.Context, req *WriteBatchRawRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteBatchRaw", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteBatchRaw indicates an expected call of WriteBatchRaw -func (mr *MockTChanNodeMockRecorder) WriteBatchRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRaw), ctx, req) -} - -// WriteBatchRawV2 mocks base method -func (m *MockTChanNode) WriteBatchRawV2(ctx thrift.Context, req *WriteBatchRawV2Request) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteBatchRawV2", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteBatchRawV2 indicates an expected call of WriteBatchRawV2 -func (mr *MockTChanNodeMockRecorder) WriteBatchRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRawV2), ctx, req) -} - -// WriteTagged mocks base method -func (m *MockTChanNode) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTagged", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTagged indicates an expected call of WriteTagged -func (mr *MockTChanNodeMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanNode)(nil).WriteTagged), ctx, req) -} - -// WriteTaggedBatchRaw mocks base method -func (m *MockTChanNode) WriteTaggedBatchRaw(ctx thrift.Context, req *WriteTaggedBatchRawRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTaggedBatchRaw", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTaggedBatchRaw indicates an expected call of WriteTaggedBatchRaw -func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRaw(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRaw), ctx, req) -} - -// WriteTaggedBatchRawV2 mocks base method -func (m *MockTChanNode) WriteTaggedBatchRawV2(ctx thrift.Context, req *WriteTaggedBatchRawV2Request) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteTaggedBatchRawV2", ctx, req) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteTaggedBatchRawV2 indicates an expected call of WriteTaggedBatchRawV2 -func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRawV2(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRawV2), ctx, req) -} diff --git a/src/dbnode/generated/thrift/rpc/tchan-rpc.go b/src/dbnode/generated/thrift/rpc/tchan-rpc.go index 8c4b5b8473..fd65c45d8e 100644 --- a/src/dbnode/generated/thrift/rpc/tchan-rpc.go +++ b/src/dbnode/generated/thrift/rpc/tchan-rpc.go @@ -67,7 +67,6 @@ type TChanNode interface { Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) Repair(ctx thrift.Context) error SetPersistRateLimit(ctx thrift.Context, req *NodeSetPersistRateLimitRequest) (*NodePersistRateLimitResult_, error) - SetQueryLimitOverrides(ctx thrift.Context, req *NodeSetQueryLimitOverridesRequest) (*NodeQueryLimitOverridesResult_, error) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) SetWriteNewSeriesBackoffDuration(ctx thrift.Context, req *NodeSetWriteNewSeriesBackoffDurationRequest) (*NodeWriteNewSeriesBackoffDurationResult_, error) SetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context, req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) @@ -864,24 +863,6 @@ func (c *tchanNodeClient) SetPersistRateLimit(ctx thrift.Context, req *NodeSetPe return resp.GetSuccess(), err } -func (c *tchanNodeClient) SetQueryLimitOverrides(ctx thrift.Context, req *NodeSetQueryLimitOverridesRequest) (*NodeQueryLimitOverridesResult_, error) { - var resp NodeSetQueryLimitOverridesResult - args := NodeSetQueryLimitOverridesArgs{ - Req: req, - } - success, err := c.client.Call(ctx, c.thriftService, "setQueryLimitOverrides", &args, &resp) - if err == nil && !success { - switch { - case resp.Err != nil: - err = resp.Err - default: - err = fmt.Errorf("received no result or unknown exception for setQueryLimitOverrides") - } - } - - return resp.GetSuccess(), err -} - func (c *tchanNodeClient) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) { var resp NodeSetWriteNewSeriesAsyncResult args := NodeSetWriteNewSeriesAsyncArgs{ @@ -1102,7 +1083,6 @@ func (s *tchanNodeServer) Methods() []string { "query", "repair", "setPersistRateLimit", - "setQueryLimitOverrides", "setWriteNewSeriesAsync", "setWriteNewSeriesBackoffDuration", "setWriteNewSeriesLimitPerShardPerSecond", @@ -1162,8 +1142,6 @@ func (s *tchanNodeServer) Handle(ctx thrift.Context, methodName string, protocol return s.handleRepair(ctx, protocol) case "setPersistRateLimit": return s.handleSetPersistRateLimit(ctx, protocol) - case "setQueryLimitOverrides": - return s.handleSetQueryLimitOverrides(ctx, protocol) case "setWriteNewSeriesAsync": return s.handleSetWriteNewSeriesAsync(ctx, protocol) case "setWriteNewSeriesBackoffDuration": @@ -1805,34 +1783,6 @@ func (s *tchanNodeServer) handleSetPersistRateLimit(ctx thrift.Context, protocol return err == nil, &res, nil } -func (s *tchanNodeServer) handleSetQueryLimitOverrides(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) { - var req NodeSetQueryLimitOverridesArgs - var res NodeSetQueryLimitOverridesResult - - if err := req.Read(protocol); err != nil { - return false, nil, err - } - - r, err := - s.handler.SetQueryLimitOverrides(ctx, req.Req) - - if err != nil { - switch v := err.(type) { - case *Error: - if v == nil { - return false, nil, fmt.Errorf("Handler for err returned non-nil error type *Error but nil value") - } - res.Err = v - default: - return false, nil, err - } - } else { - res.Success = r - } - - return err == nil, &res, nil -} - func (s *tchanNodeServer) handleSetWriteNewSeriesAsync(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) { var req NodeSetWriteNewSeriesAsyncArgs var res NodeSetWriteNewSeriesAsyncResult From 32f148f55a8cfbf3da1c924a92fc4be58616eb91 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:14:56 -0500 Subject: [PATCH 23/80] Add back rpc mock --- src/dbnode/generated/thrift/rpc/rpc_mock.go | 654 ++++++++++++++++++++ 1 file changed, 654 insertions(+) create mode 100644 src/dbnode/generated/thrift/rpc/rpc_mock.go diff --git a/src/dbnode/generated/thrift/rpc/rpc_mock.go b/src/dbnode/generated/thrift/rpc/rpc_mock.go new file mode 100644 index 0000000000..a7be5ca909 --- /dev/null +++ b/src/dbnode/generated/thrift/rpc/rpc_mock.go @@ -0,0 +1,654 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/dbnode/generated/thrift/rpc/tchan-go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package rpc is a generated GoMock package. +package rpc + +import ( + "reflect" + + "github.com/golang/mock/gomock" + "github.com/uber/tchannel-go/thrift" +) + +// MockTChanCluster is a mock of TChanCluster interface +type MockTChanCluster struct { + ctrl *gomock.Controller + recorder *MockTChanClusterMockRecorder +} + +// MockTChanClusterMockRecorder is the mock recorder for MockTChanCluster +type MockTChanClusterMockRecorder struct { + mock *MockTChanCluster +} + +// NewMockTChanCluster creates a new mock instance +func NewMockTChanCluster(ctrl *gomock.Controller) *MockTChanCluster { + mock := &MockTChanCluster{ctrl: ctrl} + mock.recorder = &MockTChanClusterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTChanCluster) EXPECT() *MockTChanClusterMockRecorder { + return m.recorder +} + +// Aggregate mocks base method +func (m *MockTChanCluster) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Aggregate", ctx, req) + ret0, _ := ret[0].(*AggregateQueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Aggregate indicates an expected call of Aggregate +func (mr *MockTChanClusterMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanCluster)(nil).Aggregate), ctx, req) +} + +// Fetch mocks base method +func (m *MockTChanCluster) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fetch", ctx, req) + ret0, _ := ret[0].(*FetchResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fetch indicates an expected call of Fetch +func (mr *MockTChanClusterMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanCluster)(nil).Fetch), ctx, req) +} + +// Health mocks base method +func (m *MockTChanCluster) Health(ctx thrift.Context) (*HealthResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Health", ctx) + ret0, _ := ret[0].(*HealthResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Health indicates an expected call of Health +func (mr *MockTChanClusterMockRecorder) Health(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanCluster)(nil).Health), ctx) +} + +// Query mocks base method +func (m *MockTChanCluster) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Query", ctx, req) + ret0, _ := ret[0].(*QueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Query indicates an expected call of Query +func (mr *MockTChanClusterMockRecorder) Query(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanCluster)(nil).Query), ctx, req) +} + +// Truncate mocks base method +func (m *MockTChanCluster) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Truncate", ctx, req) + ret0, _ := ret[0].(*TruncateResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Truncate indicates an expected call of Truncate +func (mr *MockTChanClusterMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanCluster)(nil).Truncate), ctx, req) +} + +// Write mocks base method +func (m *MockTChanCluster) Write(ctx thrift.Context, req *WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write +func (mr *MockTChanClusterMockRecorder) Write(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanCluster)(nil).Write), ctx, req) +} + +// WriteTagged mocks base method +func (m *MockTChanCluster) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTagged", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTagged indicates an expected call of WriteTagged +func (mr *MockTChanClusterMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanCluster)(nil).WriteTagged), ctx, req) +} + +// MockTChanNode is a mock of TChanNode interface +type MockTChanNode struct { + ctrl *gomock.Controller + recorder *MockTChanNodeMockRecorder +} + +// MockTChanNodeMockRecorder is the mock recorder for MockTChanNode +type MockTChanNodeMockRecorder struct { + mock *MockTChanNode +} + +// NewMockTChanNode creates a new mock instance +func NewMockTChanNode(ctrl *gomock.Controller) *MockTChanNode { + mock := &MockTChanNode{ctrl: ctrl} + mock.recorder = &MockTChanNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTChanNode) EXPECT() *MockTChanNodeMockRecorder { + return m.recorder +} + +// Aggregate mocks base method +func (m *MockTChanNode) Aggregate(ctx thrift.Context, req *AggregateQueryRequest) (*AggregateQueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Aggregate", ctx, req) + ret0, _ := ret[0].(*AggregateQueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Aggregate indicates an expected call of Aggregate +func (mr *MockTChanNodeMockRecorder) Aggregate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockTChanNode)(nil).Aggregate), ctx, req) +} + +// AggregateRaw mocks base method +func (m *MockTChanNode) AggregateRaw(ctx thrift.Context, req *AggregateQueryRawRequest) (*AggregateQueryRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateRaw", ctx, req) + ret0, _ := ret[0].(*AggregateQueryRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AggregateRaw indicates an expected call of AggregateRaw +func (mr *MockTChanNodeMockRecorder) AggregateRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateRaw", reflect.TypeOf((*MockTChanNode)(nil).AggregateRaw), ctx, req) +} + +// AggregateTiles mocks base method +func (m *MockTChanNode) AggregateTiles(ctx thrift.Context, req *AggregateTilesRequest) (*AggregateTilesResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateTiles", ctx, req) + ret0, _ := ret[0].(*AggregateTilesResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AggregateTiles indicates an expected call of AggregateTiles +func (mr *MockTChanNodeMockRecorder) AggregateTiles(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateTiles", reflect.TypeOf((*MockTChanNode)(nil).AggregateTiles), ctx, req) +} + +// Bootstrapped mocks base method +func (m *MockTChanNode) Bootstrapped(ctx thrift.Context) (*NodeBootstrappedResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bootstrapped", ctx) + ret0, _ := ret[0].(*NodeBootstrappedResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Bootstrapped indicates an expected call of Bootstrapped +func (mr *MockTChanNodeMockRecorder) Bootstrapped(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrapped", reflect.TypeOf((*MockTChanNode)(nil).Bootstrapped), ctx) +} + +// BootstrappedInPlacementOrNoPlacement mocks base method +func (m *MockTChanNode) BootstrappedInPlacementOrNoPlacement(ctx thrift.Context) (*NodeBootstrappedInPlacementOrNoPlacementResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BootstrappedInPlacementOrNoPlacement", ctx) + ret0, _ := ret[0].(*NodeBootstrappedInPlacementOrNoPlacementResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BootstrappedInPlacementOrNoPlacement indicates an expected call of BootstrappedInPlacementOrNoPlacement +func (mr *MockTChanNodeMockRecorder) BootstrappedInPlacementOrNoPlacement(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrappedInPlacementOrNoPlacement", reflect.TypeOf((*MockTChanNode)(nil).BootstrappedInPlacementOrNoPlacement), ctx) +} + +// DebugIndexMemorySegments mocks base method +func (m *MockTChanNode) DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DebugIndexMemorySegments", ctx, req) + ret0, _ := ret[0].(*DebugIndexMemorySegmentsResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DebugIndexMemorySegments indicates an expected call of DebugIndexMemorySegments +func (mr *MockTChanNodeMockRecorder) DebugIndexMemorySegments(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugIndexMemorySegments", reflect.TypeOf((*MockTChanNode)(nil).DebugIndexMemorySegments), ctx, req) +} + +// DebugProfileStart mocks base method +func (m *MockTChanNode) DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DebugProfileStart", ctx, req) + ret0, _ := ret[0].(*DebugProfileStartResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DebugProfileStart indicates an expected call of DebugProfileStart +func (mr *MockTChanNodeMockRecorder) DebugProfileStart(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStart", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStart), ctx, req) +} + +// DebugProfileStop mocks base method +func (m *MockTChanNode) DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DebugProfileStop", ctx, req) + ret0, _ := ret[0].(*DebugProfileStopResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DebugProfileStop indicates an expected call of DebugProfileStop +func (mr *MockTChanNodeMockRecorder) DebugProfileStop(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStop", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStop), ctx, req) +} + +// Fetch mocks base method +func (m *MockTChanNode) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fetch", ctx, req) + ret0, _ := ret[0].(*FetchResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fetch indicates an expected call of Fetch +func (mr *MockTChanNodeMockRecorder) Fetch(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fetch", reflect.TypeOf((*MockTChanNode)(nil).Fetch), ctx, req) +} + +// FetchBatchRaw mocks base method +func (m *MockTChanNode) FetchBatchRaw(ctx thrift.Context, req *FetchBatchRawRequest) (*FetchBatchRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBatchRaw", ctx, req) + ret0, _ := ret[0].(*FetchBatchRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBatchRaw indicates an expected call of FetchBatchRaw +func (mr *MockTChanNodeMockRecorder) FetchBatchRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRaw), ctx, req) +} + +// FetchBatchRawV2 mocks base method +func (m *MockTChanNode) FetchBatchRawV2(ctx thrift.Context, req *FetchBatchRawV2Request) (*FetchBatchRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBatchRawV2", ctx, req) + ret0, _ := ret[0].(*FetchBatchRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBatchRawV2 indicates an expected call of FetchBatchRawV2 +func (mr *MockTChanNodeMockRecorder) FetchBatchRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBatchRawV2), ctx, req) +} + +// FetchBlocksMetadataRawV2 mocks base method +func (m *MockTChanNode) FetchBlocksMetadataRawV2(ctx thrift.Context, req *FetchBlocksMetadataRawV2Request) (*FetchBlocksMetadataRawV2Result_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBlocksMetadataRawV2", ctx, req) + ret0, _ := ret[0].(*FetchBlocksMetadataRawV2Result_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBlocksMetadataRawV2 indicates an expected call of FetchBlocksMetadataRawV2 +func (mr *MockTChanNodeMockRecorder) FetchBlocksMetadataRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksMetadataRawV2", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksMetadataRawV2), ctx, req) +} + +// FetchBlocksRaw mocks base method +func (m *MockTChanNode) FetchBlocksRaw(ctx thrift.Context, req *FetchBlocksRawRequest) (*FetchBlocksRawResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchBlocksRaw", ctx, req) + ret0, _ := ret[0].(*FetchBlocksRawResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchBlocksRaw indicates an expected call of FetchBlocksRaw +func (mr *MockTChanNodeMockRecorder) FetchBlocksRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksRaw", reflect.TypeOf((*MockTChanNode)(nil).FetchBlocksRaw), ctx, req) +} + +// FetchTagged mocks base method +func (m *MockTChanNode) FetchTagged(ctx thrift.Context, req *FetchTaggedRequest) (*FetchTaggedResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchTagged", ctx, req) + ret0, _ := ret[0].(*FetchTaggedResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchTagged indicates an expected call of FetchTagged +func (mr *MockTChanNodeMockRecorder) FetchTagged(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchTagged", reflect.TypeOf((*MockTChanNode)(nil).FetchTagged), ctx, req) +} + +// GetPersistRateLimit mocks base method +func (m *MockTChanNode) GetPersistRateLimit(ctx thrift.Context) (*NodePersistRateLimitResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistRateLimit", ctx) + ret0, _ := ret[0].(*NodePersistRateLimitResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistRateLimit indicates an expected call of GetPersistRateLimit +func (mr *MockTChanNodeMockRecorder) GetPersistRateLimit(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).GetPersistRateLimit), ctx) +} + +// GetWriteNewSeriesAsync mocks base method +func (m *MockTChanNode) GetWriteNewSeriesAsync(ctx thrift.Context) (*NodeWriteNewSeriesAsyncResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWriteNewSeriesAsync", ctx) + ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWriteNewSeriesAsync indicates an expected call of GetWriteNewSeriesAsync +func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesAsync(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesAsync), ctx) +} + +// GetWriteNewSeriesBackoffDuration mocks base method +func (m *MockTChanNode) GetWriteNewSeriesBackoffDuration(ctx thrift.Context) (*NodeWriteNewSeriesBackoffDurationResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWriteNewSeriesBackoffDuration", ctx) + ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWriteNewSeriesBackoffDuration indicates an expected call of GetWriteNewSeriesBackoffDuration +func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesBackoffDuration(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesBackoffDuration), ctx) +} + +// GetWriteNewSeriesLimitPerShardPerSecond mocks base method +func (m *MockTChanNode) GetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWriteNewSeriesLimitPerShardPerSecond", ctx) + ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of GetWriteNewSeriesLimitPerShardPerSecond +func (mr *MockTChanNodeMockRecorder) GetWriteNewSeriesLimitPerShardPerSecond(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).GetWriteNewSeriesLimitPerShardPerSecond), ctx) +} + +// Health mocks base method +func (m *MockTChanNode) Health(ctx thrift.Context) (*NodeHealthResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Health", ctx) + ret0, _ := ret[0].(*NodeHealthResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Health indicates an expected call of Health +func (mr *MockTChanNodeMockRecorder) Health(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockTChanNode)(nil).Health), ctx) +} + +// Query mocks base method +func (m *MockTChanNode) Query(ctx thrift.Context, req *QueryRequest) (*QueryResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Query", ctx, req) + ret0, _ := ret[0].(*QueryResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Query indicates an expected call of Query +func (mr *MockTChanNodeMockRecorder) Query(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockTChanNode)(nil).Query), ctx, req) +} + +// Repair mocks base method +func (m *MockTChanNode) Repair(ctx thrift.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Repair", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Repair indicates an expected call of Repair +func (mr *MockTChanNodeMockRecorder) Repair(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockTChanNode)(nil).Repair), ctx) +} + +// SetPersistRateLimit mocks base method +func (m *MockTChanNode) SetPersistRateLimit(ctx thrift.Context, req *NodeSetPersistRateLimitRequest) (*NodePersistRateLimitResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPersistRateLimit", ctx, req) + ret0, _ := ret[0].(*NodePersistRateLimitResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetPersistRateLimit indicates an expected call of SetPersistRateLimit +func (mr *MockTChanNodeMockRecorder) SetPersistRateLimit(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPersistRateLimit", reflect.TypeOf((*MockTChanNode)(nil).SetPersistRateLimit), ctx, req) +} + +// SetWriteNewSeriesAsync mocks base method +func (m *MockTChanNode) SetWriteNewSeriesAsync(ctx thrift.Context, req *NodeSetWriteNewSeriesAsyncRequest) (*NodeWriteNewSeriesAsyncResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWriteNewSeriesAsync", ctx, req) + ret0, _ := ret[0].(*NodeWriteNewSeriesAsyncResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWriteNewSeriesAsync indicates an expected call of SetWriteNewSeriesAsync +func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesAsync(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesAsync", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesAsync), ctx, req) +} + +// SetWriteNewSeriesBackoffDuration mocks base method +func (m *MockTChanNode) SetWriteNewSeriesBackoffDuration(ctx thrift.Context, req *NodeSetWriteNewSeriesBackoffDurationRequest) (*NodeWriteNewSeriesBackoffDurationResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWriteNewSeriesBackoffDuration", ctx, req) + ret0, _ := ret[0].(*NodeWriteNewSeriesBackoffDurationResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWriteNewSeriesBackoffDuration indicates an expected call of SetWriteNewSeriesBackoffDuration +func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesBackoffDuration(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesBackoffDuration", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesBackoffDuration), ctx, req) +} + +// SetWriteNewSeriesLimitPerShardPerSecond mocks base method +func (m *MockTChanNode) SetWriteNewSeriesLimitPerShardPerSecond(ctx thrift.Context, req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (*NodeWriteNewSeriesLimitPerShardPerSecondResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetWriteNewSeriesLimitPerShardPerSecond", ctx, req) + ret0, _ := ret[0].(*NodeWriteNewSeriesLimitPerShardPerSecondResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetWriteNewSeriesLimitPerShardPerSecond indicates an expected call of SetWriteNewSeriesLimitPerShardPerSecond +func (mr *MockTChanNodeMockRecorder) SetWriteNewSeriesLimitPerShardPerSecond(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockTChanNode)(nil).SetWriteNewSeriesLimitPerShardPerSecond), ctx, req) +} + +// Truncate mocks base method +func (m *MockTChanNode) Truncate(ctx thrift.Context, req *TruncateRequest) (*TruncateResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Truncate", ctx, req) + ret0, _ := ret[0].(*TruncateResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Truncate indicates an expected call of Truncate +func (mr *MockTChanNodeMockRecorder) Truncate(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Truncate", reflect.TypeOf((*MockTChanNode)(nil).Truncate), ctx, req) +} + +// Write mocks base method +func (m *MockTChanNode) Write(ctx thrift.Context, req *WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write +func (mr *MockTChanNodeMockRecorder) Write(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockTChanNode)(nil).Write), ctx, req) +} + +// WriteBatchRaw mocks base method +func (m *MockTChanNode) WriteBatchRaw(ctx thrift.Context, req *WriteBatchRawRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatchRaw", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatchRaw indicates an expected call of WriteBatchRaw +func (mr *MockTChanNodeMockRecorder) WriteBatchRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRaw), ctx, req) +} + +// WriteBatchRawV2 mocks base method +func (m *MockTChanNode) WriteBatchRawV2(ctx thrift.Context, req *WriteBatchRawV2Request) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatchRawV2", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatchRawV2 indicates an expected call of WriteBatchRawV2 +func (mr *MockTChanNodeMockRecorder) WriteBatchRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteBatchRawV2), ctx, req) +} + +// WriteTagged mocks base method +func (m *MockTChanNode) WriteTagged(ctx thrift.Context, req *WriteTaggedRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTagged", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTagged indicates an expected call of WriteTagged +func (mr *MockTChanNodeMockRecorder) WriteTagged(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTagged", reflect.TypeOf((*MockTChanNode)(nil).WriteTagged), ctx, req) +} + +// WriteTaggedBatchRaw mocks base method +func (m *MockTChanNode) WriteTaggedBatchRaw(ctx thrift.Context, req *WriteTaggedBatchRawRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTaggedBatchRaw", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTaggedBatchRaw indicates an expected call of WriteTaggedBatchRaw +func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRaw(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRaw", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRaw), ctx, req) +} + +// WriteTaggedBatchRawV2 mocks base method +func (m *MockTChanNode) WriteTaggedBatchRawV2(ctx thrift.Context, req *WriteTaggedBatchRawV2Request) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteTaggedBatchRawV2", ctx, req) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteTaggedBatchRawV2 indicates an expected call of WriteTaggedBatchRawV2 +func (mr *MockTChanNodeMockRecorder) WriteTaggedBatchRawV2(ctx, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTaggedBatchRawV2", reflect.TypeOf((*MockTChanNode)(nil).WriteTaggedBatchRawV2), ctx, req) +} From 66a0a42d135cb21bf761b401e5d58435f29e9ab6 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:18:21 -0500 Subject: [PATCH 24/80] Fix test --- src/dbnode/integration/query_limit_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dbnode/integration/query_limit_test.go b/src/dbnode/integration/query_limit_test.go index 53a00b7665..3ce610d52c 100644 --- a/src/dbnode/integration/query_limit_test.go +++ b/src/dbnode/integration/query_limit_test.go @@ -87,8 +87,9 @@ func newTestOptionsWithIndexedNamespace(t *testing.T) (TestOptions, namespace.Me func newTestSetupWithQueryLimits(t *testing.T, opts TestOptions) TestSetup { storageLimitsFn := func(storageOpts storage.Options) storage.Options { + limit := 1 queryLookback := limits.DefaultLookbackLimitOptions() - queryLookback.Limit = 1 + queryLookback.Limit = &limit queryLookback.Lookback = time.Hour limitOpts := limits.NewOptions(). From a39700f0190d6c717a02bd6f385e0f72f0c923d7 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:21:52 -0500 Subject: [PATCH 25/80] Fix test 2 --- src/dbnode/integration/query_limit_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/integration/query_limit_test.go b/src/dbnode/integration/query_limit_test.go index 3ce610d52c..111f9d4a1b 100644 --- a/src/dbnode/integration/query_limit_test.go +++ b/src/dbnode/integration/query_limit_test.go @@ -87,7 +87,7 @@ func newTestOptionsWithIndexedNamespace(t *testing.T) (TestOptions, namespace.Me func newTestSetupWithQueryLimits(t *testing.T, opts TestOptions) TestSetup { storageLimitsFn := func(storageOpts storage.Options) storage.Options { - limit := 1 + limit := int64(1) queryLookback := limits.DefaultLookbackLimitOptions() queryLookback.Limit = &limit queryLookback.Lookback = time.Hour From 0e561872a10ac5069845750c0d1e6520901f38db Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:31:55 -0500 Subject: [PATCH 26/80] Add back m3em_mock --- src/dbnode/generated/thrift/rpc/rpc.go | 3 +- src/dbnode/generated/thrift/rpc/tchan-rpc.go | 2 +- src/m3em/generated/proto/m3em/m3em_mock.go | 556 +++++++++++++++++++ 3 files changed, 559 insertions(+), 2 deletions(-) create mode 100644 src/m3em/generated/proto/m3em/m3em_mock.go diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go index ea8421cec3..5b51ff530d 100644 --- a/src/dbnode/generated/thrift/rpc/rpc.go +++ b/src/dbnode/generated/thrift/rpc/rpc.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -28,6 +28,7 @@ import ( "database/sql/driver" "errors" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/src/dbnode/generated/thrift/rpc/tchan-rpc.go b/src/dbnode/generated/thrift/rpc/tchan-rpc.go index fd65c45d8e..cf6c94667a 100644 --- a/src/dbnode/generated/thrift/rpc/tchan-rpc.go +++ b/src/dbnode/generated/thrift/rpc/tchan-rpc.go @@ -1,6 +1,6 @@ // @generated Code generated by thrift-gen. Do not modify. -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go new file mode 100644 index 0000000000..24d6d67813 --- /dev/null +++ b/src/m3em/generated/proto/m3em/m3em_mock.go @@ -0,0 +1,556 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer) + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package m3em is a generated GoMock package. +package m3em + +import ( + "context" + "reflect" + + "github.com/golang/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// MockOperatorClient is a mock of OperatorClient interface +type MockOperatorClient struct { + ctrl *gomock.Controller + recorder *MockOperatorClientMockRecorder +} + +// MockOperatorClientMockRecorder is the mock recorder for MockOperatorClient +type MockOperatorClientMockRecorder struct { + mock *MockOperatorClient +} + +// NewMockOperatorClient creates a new mock instance +func NewMockOperatorClient(ctrl *gomock.Controller) *MockOperatorClient { + mock := &MockOperatorClient{ctrl: ctrl} + mock.recorder = &MockOperatorClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperatorClient) EXPECT() *MockOperatorClientMockRecorder { + return m.recorder +} + +// PullFile mocks base method +func (m *MockOperatorClient) PullFile(arg0 context.Context, arg1 *PullFileRequest, arg2 ...grpc.CallOption) (Operator_PullFileClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PullFile", varargs...) + ret0, _ := ret[0].(Operator_PullFileClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PullFile indicates an expected call of PullFile +func (mr *MockOperatorClientMockRecorder) PullFile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullFile", reflect.TypeOf((*MockOperatorClient)(nil).PullFile), varargs...) +} + +// PushFile mocks base method +func (m *MockOperatorClient) PushFile(arg0 context.Context, arg1 ...grpc.CallOption) (Operator_PushFileClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PushFile", varargs...) + ret0, _ := ret[0].(Operator_PushFileClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PushFile indicates an expected call of PushFile +func (mr *MockOperatorClientMockRecorder) PushFile(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushFile", reflect.TypeOf((*MockOperatorClient)(nil).PushFile), varargs...) +} + +// Setup mocks base method +func (m *MockOperatorClient) Setup(arg0 context.Context, arg1 *SetupRequest, arg2 ...grpc.CallOption) (*SetupResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Setup", varargs...) + ret0, _ := ret[0].(*SetupResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Setup indicates an expected call of Setup +func (mr *MockOperatorClientMockRecorder) Setup(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockOperatorClient)(nil).Setup), varargs...) +} + +// Start mocks base method +func (m *MockOperatorClient) Start(arg0 context.Context, arg1 *StartRequest, arg2 ...grpc.CallOption) (*StartResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Start", varargs...) + ret0, _ := ret[0].(*StartResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Start indicates an expected call of Start +func (mr *MockOperatorClientMockRecorder) Start(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorClient)(nil).Start), varargs...) +} + +// Stop mocks base method +func (m *MockOperatorClient) Stop(arg0 context.Context, arg1 *StopRequest, arg2 ...grpc.CallOption) (*StopResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*StopResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop +func (mr *MockOperatorClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorClient)(nil).Stop), varargs...) +} + +// Teardown mocks base method +func (m *MockOperatorClient) Teardown(arg0 context.Context, arg1 *TeardownRequest, arg2 ...grpc.CallOption) (*TeardownResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Teardown", varargs...) + ret0, _ := ret[0].(*TeardownResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Teardown indicates an expected call of Teardown +func (mr *MockOperatorClientMockRecorder) Teardown(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Teardown", reflect.TypeOf((*MockOperatorClient)(nil).Teardown), varargs...) +} + +// MockOperator_PushFileClient is a mock of Operator_PushFileClient interface +type MockOperator_PushFileClient struct { + ctrl *gomock.Controller + recorder *MockOperator_PushFileClientMockRecorder +} + +// MockOperator_PushFileClientMockRecorder is the mock recorder for MockOperator_PushFileClient +type MockOperator_PushFileClientMockRecorder struct { + mock *MockOperator_PushFileClient +} + +// NewMockOperator_PushFileClient creates a new mock instance +func NewMockOperator_PushFileClient(ctrl *gomock.Controller) *MockOperator_PushFileClient { + mock := &MockOperator_PushFileClient{ctrl: ctrl} + mock.recorder = &MockOperator_PushFileClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PushFileClient) EXPECT() *MockOperator_PushFileClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method +func (m *MockOperator_PushFileClient) CloseAndRecv() (*PushFileResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*PushFileResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv +func (mr *MockOperator_PushFileClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method +func (m *MockOperator_PushFileClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockOperator_PushFileClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockOperator_PushFileClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PushFileClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockOperator_PushFileClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockOperator_PushFileClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Header)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PushFileClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PushFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockOperator_PushFileClient) Send(arg0 *PushFileRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockOperator_PushFileClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Send), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PushFileClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PushFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockOperator_PushFileClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockOperator_PushFileClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Trailer)) +} + +// MockOperator_PullFileClient is a mock of Operator_PullFileClient interface +type MockOperator_PullFileClient struct { + ctrl *gomock.Controller + recorder *MockOperator_PullFileClientMockRecorder +} + +// MockOperator_PullFileClientMockRecorder is the mock recorder for MockOperator_PullFileClient +type MockOperator_PullFileClientMockRecorder struct { + mock *MockOperator_PullFileClient +} + +// NewMockOperator_PullFileClient creates a new mock instance +func NewMockOperator_PullFileClient(ctrl *gomock.Controller) *MockOperator_PullFileClient { + mock := &MockOperator_PullFileClient{ctrl: ctrl} + mock.recorder = &MockOperator_PullFileClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PullFileClient) EXPECT() *MockOperator_PullFileClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method +func (m *MockOperator_PullFileClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockOperator_PullFileClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PullFileClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockOperator_PullFileClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PullFileClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockOperator_PullFileClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockOperator_PullFileClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Header)) +} + +// Recv mocks base method +func (m *MockOperator_PullFileClient) Recv() (*PullFileResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*PullFileResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv +func (mr *MockOperator_PullFileClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Recv)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PullFileClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PullFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PullFileClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PullFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockOperator_PullFileClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockOperator_PullFileClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Trailer)) +} + +// MockOperator_PullFileServer is a mock of Operator_PullFileServer interface +type MockOperator_PullFileServer struct { + ctrl *gomock.Controller + recorder *MockOperator_PullFileServerMockRecorder +} + +// MockOperator_PullFileServerMockRecorder is the mock recorder for MockOperator_PullFileServer +type MockOperator_PullFileServerMockRecorder struct { + mock *MockOperator_PullFileServer +} + +// NewMockOperator_PullFileServer creates a new mock instance +func NewMockOperator_PullFileServer(ctrl *gomock.Controller) *MockOperator_PullFileServer { + mock := &MockOperator_PullFileServer{ctrl: ctrl} + mock.recorder = &MockOperator_PullFileServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PullFileServer) EXPECT() *MockOperator_PullFileServerMockRecorder { + return m.recorder +} + +// Context mocks base method +func (m *MockOperator_PullFileServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PullFileServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Context)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PullFileServer) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PullFileServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockOperator_PullFileServer) Send(arg0 *PullFileResponse) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockOperator_PullFileServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Send), arg0) +} + +// SendHeader mocks base method +func (m *MockOperator_PullFileServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader +func (mr *MockOperator_PullFileServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PullFileServer) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PullFileServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendMsg), arg0) +} + +// SetHeader mocks base method +func (m *MockOperator_PullFileServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader +func (mr *MockOperator_PullFileServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method +func (m *MockOperator_PullFileServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer +func (mr *MockOperator_PullFileServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetTrailer), arg0) +} From 2dbef2cf9f8abcbfc0a089de49ac8573a0745aae Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:38:45 -0500 Subject: [PATCH 27/80] Fix gen --- src/dbnode/generated/thrift/rpc/rpc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go index 5b51ff530d..cacba6df69 100644 --- a/src/dbnode/generated/thrift/rpc/rpc.go +++ b/src/dbnode/generated/thrift/rpc/rpc.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal From fb292677dc397c65a9ca16452998e94f59d7e68d Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:46:18 -0500 Subject: [PATCH 28/80] Gen --- src/dbnode/generated/thrift/rpc/rpc.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go index cacba6df69..ea8421cec3 100644 --- a/src/dbnode/generated/thrift/rpc/rpc.go +++ b/src/dbnode/generated/thrift/rpc/rpc.go @@ -28,7 +28,6 @@ import ( "database/sql/driver" "errors" "fmt" - "github.com/apache/thrift/lib/go/thrift" ) From 90bef8a97037bbd8978ae9e543d14d439455e375 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 11:53:33 -0500 Subject: [PATCH 29/80] Lint --- src/dbnode/server/server.go | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 8c4a7d66d0..99fb1ac14f 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1181,16 +1181,11 @@ func kvWatchQueryLimit( if err == nil { options = parseLookbackLimitOptions(logger, kvName, protoValue.Value, options) } + } else if errors.Is(err, kv.ErrNotFound) { + logger.Warn("error resolving encoder per block limit", zap.Error(err)) } - if err != nil { - if errors.Is(err, kv.ErrNotFound) { - logger.Warn("error resolving encoder per block limit", zap.Error(err)) - } - } - - err = limit.Update(options) - if err != nil { + if err := limit.Update(options); err != nil { logger.Warn("unable to set query limit", zap.Error(err), zap.String("name", kvName)) } @@ -1212,8 +1207,7 @@ func kvWatchQueryLimit( value = parseLookbackLimitOptions(logger, kvName, protoValue.Value, value) } - err = limit.Update(value) - if err != nil { + if err := limit.Update(value); err != nil { logger.Warn("unable to set query limit", zap.Error(err), zap.String("name", kvName)) } } @@ -1223,7 +1217,8 @@ func kvWatchQueryLimit( func parseLookbackLimitOptions(logger *zap.Logger, kvName string, val string, - defaultOpts limits.LookbackLimitOptions) limits.LookbackLimitOptions { + defaultOpts limits.LookbackLimitOptions, +) limits.LookbackLimitOptions { parts := strings.Split(val, ",") if val == "" { defaultOpts.Limit = nil From d3747ec79f8eb1c607eaf30c12ac164a4adee4af Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 12:17:48 -0500 Subject: [PATCH 30/80] Fix tests --- src/dbnode/storage/limits/query_limits.go | 1 - .../storage/limits/query_limits_test.go | 36 ++++++++++++------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index a9dc593d44..d5049cc8cc 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -43,7 +43,6 @@ type queryLimits struct { type lookbackLimit struct { name string - limit *int64 options LookbackLimitOptions metrics lookbackLimitMetrics recent *atomic.Int64 diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 8f58e6a80f..9bd198e202 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -92,7 +92,8 @@ func TestLookbackLimit(t *testing.T) { name string limit *int64 }{ - {name: "no limit", limit: prt(0)}, + {name: "no limit", limit: nil}, + {name: "zero limit", limit: prt(0)}, {name: "limit", limit: prt(5)}, } { t.Run(test.name, func(t *testing.T) { @@ -106,24 +107,30 @@ func TestLookbackLimit(t *testing.T) { limit := newLookbackLimit(iOpts, opts, name, &sourceLoggerBuilder{}) require.Equal(t, int64(0), limit.current()) + + var exceededCount int64 err := limit.exceeded() - require.NoError(t, err) + if test.limit == nil || *test.limit > 0 { + require.NoError(t, err) + } else { + require.Error(t, err) + exceededCount++ + } // Validate ascending while checking limits. - var exceededCount int64 - exceededCount += verifyLimit(t, limit, 3, limit.limit) + exceededCount += verifyLimit(t, limit, 3, test.limit) require.Equal(t, int64(3), limit.current()) verifyMetrics(t, scope, name, 3, 0, 3, exceededCount) - exceededCount += verifyLimit(t, limit, 2, limit.limit) + exceededCount += verifyLimit(t, limit, 2, test.limit) require.Equal(t, int64(5), limit.current()) verifyMetrics(t, scope, name, 5, 0, 5, exceededCount) - exceededCount += verifyLimit(t, limit, 1, limit.limit) + exceededCount += verifyLimit(t, limit, 1, test.limit) require.Equal(t, int64(6), limit.current()) verifyMetrics(t, scope, name, 6, 0, 6, exceededCount) - exceededCount += verifyLimit(t, limit, 4, limit.limit) + exceededCount += verifyLimit(t, limit, 4, test.limit) require.Equal(t, int64(10), limit.current()) verifyMetrics(t, scope, name, 10, 0, 10, exceededCount) @@ -133,11 +140,11 @@ func TestLookbackLimit(t *testing.T) { verifyMetrics(t, scope, name, 0, 10, 10, exceededCount) // Validate ascending again post-reset. - exceededCount += verifyLimit(t, limit, 2, limit.limit) + exceededCount += verifyLimit(t, limit, 2, test.limit) require.Equal(t, int64(2), limit.current()) verifyMetrics(t, scope, name, 2, 10, 12, exceededCount) - exceededCount += verifyLimit(t, limit, 5, limit.limit) + exceededCount += verifyLimit(t, limit, 5, test.limit) require.Equal(t, int64(7), limit.current()) verifyMetrics(t, scope, name, 7, 10, 17, exceededCount) @@ -182,7 +189,9 @@ func TestLookbackLimit(t *testing.T) { func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int64) int64 { var exceededCount int64 err := limit.Inc(inc, nil) - if expectedLimit == nil || limit.current() < *expectedLimit { + if expectedLimit == nil { + require.NoError(t, err) + } else if limit.current() < *expectedLimit && *expectedLimit != 0 { require.NoError(t, err) } else { require.Error(t, err) @@ -190,8 +199,11 @@ func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int require.True(t, IsQueryLimitExceededError(err)) exceededCount++ } + err = limit.exceeded() - if expectedLimit == nil || limit.current() < *expectedLimit { + if expectedLimit == nil { + require.NoError(t, err) + } else if limit.current() < *expectedLimit && *expectedLimit != 0 { require.NoError(t, err) } else { require.Error(t, err) @@ -317,7 +329,7 @@ func TestSourceLogger(t *testing.T) { scope = tally.NewTestScope("test", nil) iOpts = instrument.NewOptions().SetMetricsScope(scope) noLimit = LookbackLimitOptions{ - Limit: prt(0), + Limit: nil, Lookback: time.Millisecond * 100, } From 053739b06c7bd47153a76833583fad8341fbebec Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 12:19:53 -0500 Subject: [PATCH 31/80] Rebased --- src/dbnode/storage/limits/query_limits_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index a22745a31d..e3f1d5827e 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -58,7 +58,7 @@ func TestQueryLimits(t *testing.T) { Lookback: time.Second, } seriesOpts := LookbackLimitOptions{ - Limit: 1, + Limit: &l, Lookback: time.Second, } opts := testQueryLimitOptions(docOpts, bytesOpts, seriesOpts, instrument.NewOptions()) From 2272d5ebf8fc1a7e832d6905b2dfb9307d7aa136 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 12:24:57 -0500 Subject: [PATCH 32/80] Rebased 2 --- src/dbnode/kvconfig/keys.go | 8 ++++++-- src/dbnode/server/server.go | 7 +++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/dbnode/kvconfig/keys.go b/src/dbnode/kvconfig/keys.go index 3145cc89df..feec213693 100644 --- a/src/dbnode/kvconfig/keys.go +++ b/src/dbnode/kvconfig/keys.go @@ -54,7 +54,11 @@ const ( // Settings in string form "{limit},{lookback}", e.g. "1000,15s". DocsLimit = "m3db.limits.docs" - // BytesReadLimit is the KV config key for the bytes read query limit. + // DiskBytesReadLimit is the KV config key for the disk bytes read query limit. // Settings in string form "{limit},{lookback}", e.g. "1000,15s". - BytesReadLimit = "m3db.limits.bytes-read" + DiskBytesReadLimit = "m3db.limits.disk-bytes-read" + + // DiskSeriesReadLimit is the KV config key for the disk series read query limit. + // Settings in string form "{limit},{lookback}", e.g. "1000,15s". + DiskSeriesReadLimit = "m3db.limits.disk-series-read" ) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 71fd650d62..389b4491de 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -465,7 +465,9 @@ func Run(runOpts RunOptions) { bytesReadLimit.Lookback = limitConfig.Lookback } if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskRead; limitConfig != nil { - diskSeriesReadLimit.Limit = limitConfig.Value + if limitConfig.Value != 0 { + diskSeriesReadLimit.Limit = &limitConfig.Value + } diskSeriesReadLimit.Lookback = limitConfig.Lookback } limitOpts := limits.NewOptions(). @@ -999,7 +1001,8 @@ func Run(runOpts RunOptions) { kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger, runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock) kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.DocsLimit(), kvconfig.DocsLimit) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.BytesReadLimit(), kvconfig.BytesReadLimit) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.DiskSeriesReadLimit(), kvconfig.DiskBytesReadLimit) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.BytesReadLimit(), kvconfig.DiskSeriesReadLimit) }() // Wait for process interrupt. From dddecfb5200f5e94b752792c1c10f191dee56c6d Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 13:02:24 -0500 Subject: [PATCH 33/80] Test fix --- src/dbnode/persist/fs/retriever_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dbnode/persist/fs/retriever_test.go b/src/dbnode/persist/fs/retriever_test.go index 54d57c0947..f1fc631a5a 100644 --- a/src/dbnode/persist/fs/retriever_test.go +++ b/src/dbnode/persist/fs/retriever_test.go @@ -805,12 +805,13 @@ func TestBlockRetrieverHandlesSeekByIndexEntryErrors(t *testing.T) { func TestLimitSeriesReadFromDisk(t *testing.T) { scope := tally.NewTestScope("test", nil) + limit := int64(1) limitOpts := limits.NewOptions(). SetInstrumentOptions(instrument.NewOptions().SetMetricsScope(scope)). SetBytesReadLimitOpts(limits.DefaultLookbackLimitOptions()). SetDocsLimitOpts(limits.DefaultLookbackLimitOptions()). SetDiskSeriesReadLimitOpts(limits.LookbackLimitOptions{ - Limit: 1, + Limit: &limit, Lookback: time.Second * 1, }) queryLimits, err := limits.NewQueryLimits(limitOpts) From 0bc43065d6a8669a90e42f651fa348f5cc80dfc0 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 13:29:15 -0500 Subject: [PATCH 34/80] Lint --- src/dbnode/storage/limits/query_limits_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index e3f1d5827e..85c6873f15 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -211,9 +211,7 @@ func TestLookbackLimit(t *testing.T) { func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int64) int64 { var exceededCount int64 err := limit.Inc(inc, nil) - if expectedLimit == nil { - require.NoError(t, err) - } else if limit.current() < *expectedLimit && *expectedLimit != 0 { + if expectedLimit == nil || (limit.current() < *expectedLimit && *expectedLimit != 0) { require.NoError(t, err) } else { require.Error(t, err) @@ -223,9 +221,7 @@ func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int } err = limit.exceeded() - if expectedLimit == nil { - require.NoError(t, err) - } else if limit.current() < *expectedLimit && *expectedLimit != 0 { + if expectedLimit == nil || (limit.current() < *expectedLimit && *expectedLimit != 0) { require.NoError(t, err) } else { require.Error(t, err) From 8e249e665312405dc8ffedfa4694940d8d0dcb26 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 14:06:14 -0500 Subject: [PATCH 35/80] Test fix 2 --- src/dbnode/persist/fs/retriever_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/persist/fs/retriever_test.go b/src/dbnode/persist/fs/retriever_test.go index f1fc631a5a..284dc4eaa8 100644 --- a/src/dbnode/persist/fs/retriever_test.go +++ b/src/dbnode/persist/fs/retriever_test.go @@ -805,7 +805,7 @@ func TestBlockRetrieverHandlesSeekByIndexEntryErrors(t *testing.T) { func TestLimitSeriesReadFromDisk(t *testing.T) { scope := tally.NewTestScope("test", nil) - limit := int64(1) + limit := int64(2) limitOpts := limits.NewOptions(). SetInstrumentOptions(instrument.NewOptions().SetMetricsScope(scope)). SetBytesReadLimitOpts(limits.DefaultLookbackLimitOptions()). From c8507c0fd45399fdca3840b8a37d1cb9bc25bfdc Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Mon, 18 Jan 2021 16:22:07 -0500 Subject: [PATCH 36/80] Fix logs --- src/dbnode/server/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 389b4491de..2a6aab971f 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1191,7 +1191,7 @@ func kvWatchQueryLimit( options = parseLookbackLimitOptions(logger, kvName, protoValue.Value, options) } } else if errors.Is(err, kv.ErrNotFound) { - logger.Warn("error resolving encoder per block limit", zap.Error(err)) + logger.Warn("error resolving query limit", zap.Error(err), zap.String("name", kvName)) } if err := limit.Update(options); err != nil { @@ -1210,7 +1210,7 @@ func kvWatchQueryLimit( value := options if newValue := watch.Get(); newValue != nil { if err := newValue.Unmarshal(protoValue); err != nil { - logger.Warn("unable to parse new encoder per block limit", zap.Error(err)) + logger.Warn("unable to parse new query limit", zap.Error(err), zap.String("name", kvName)) continue } value = parseLookbackLimitOptions(logger, kvName, protoValue.Value, value) From 027aaae089ef6e76d01d301d2d77d431c15f0aeb Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 11:22:32 -0500 Subject: [PATCH 37/80] Add limit logs and metrics --- src/dbnode/storage/limits/query_limits.go | 31 ++++++++++++++++++----- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index f288ef879c..2152f535c8 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -30,6 +30,7 @@ import ( "github.com/uber-go/tally" "go.uber.org/atomic" + "go.uber.org/zap" ) const ( @@ -46,6 +47,7 @@ type lookbackLimit struct { name string options LookbackLimitOptions metrics lookbackLimitMetrics + logger *zap.Logger recent *atomic.Int64 ticker *time.Ticker stopCh chan struct{} @@ -53,10 +55,12 @@ type lookbackLimit struct { } type lookbackLimitMetrics struct { - recentCount tally.Gauge - recentMax tally.Gauge - total tally.Counter - exceeded tally.Counter + optionsMax tally.Gauge + optionsLookback tally.Gauge + recentCount tally.Gauge + recentMax tally.Gauge + total tally.Counter + exceeded tally.Counter sourceLogger SourceLogger } @@ -113,6 +117,7 @@ func newLookbackLimit( name: name, options: opts, metrics: newLookbackLimitMetrics(instrumentOpts, name, sourceLoggerBuilder), + logger: instrumentOpts.Logger(), recent: atomic.NewInt64(0), stopCh: make(chan struct{}), } @@ -187,13 +192,20 @@ func (q *lookbackLimit) Update(opts LookbackLimitOptions) error { q.lock.Lock() defer q.lock.Unlock() - loobackUpdated := q.options.Lookback != opts.Lookback + old := q.options q.options = opts - if loobackUpdated { + + // If the lookback changed, replace the background goroutine that manages the periodic resetting. + if q.options.Lookback != old.Lookback { q.stop() q.start() } + q.logger.Info("query limit options updated", + zap.String("name", q.name), + zap.Any("new", opts), + zap.Any("old", old)) + return nil } @@ -245,6 +257,7 @@ func (q *lookbackLimit) checkLimit(recent int64) error { func (q *lookbackLimit) start() { q.ticker = time.NewTicker(q.options.Lookback) go func() { + q.logger.Info("query limit interval started", zap.String("name", q.name)) for { select { case <-q.ticker.C: @@ -255,10 +268,16 @@ func (q *lookbackLimit) start() { } } }() + + q.metrics.optionsMax.Update(float64(*q.options.Limit)) + q.metrics.optionsLookback.Update(q.options.Lookback.Seconds()) } func (q *lookbackLimit) stop() { close(q.stopCh) + q.stopCh = make(chan struct{}) + + q.logger.Info("query limit interval stopped", zap.String("name", q.name)) } func (q *lookbackLimit) current() int64 { From 8b9b602a9bd79922ece9bfce353b15ddb2676b39 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 13:56:19 -0500 Subject: [PATCH 38/80] Test reset --- src/dbnode/storage/limits/query_limits.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 2152f535c8..c3f9dc4591 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -49,7 +49,6 @@ type lookbackLimit struct { metrics lookbackLimitMetrics logger *zap.Logger recent *atomic.Int64 - ticker *time.Ticker stopCh chan struct{} lock sync.RWMutex } @@ -255,15 +254,16 @@ func (q *lookbackLimit) checkLimit(recent int64) error { } func (q *lookbackLimit) start() { - q.ticker = time.NewTicker(q.options.Lookback) + ticker := time.NewTicker(q.options.Lookback) + ticker.Reset(q.options.Lookback) go func() { q.logger.Info("query limit interval started", zap.String("name", q.name)) for { select { - case <-q.ticker.C: + case <-ticker.C: q.reset() case <-q.stopCh: - q.ticker.Stop() + ticker.Stop() return } } From 98efb93645830484d8de8085a06925615c90ca40 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 16:32:22 -0500 Subject: [PATCH 39/80] Add etcd kv update endpoint 1 --- go.mod | 1 + src/cluster/generated/proto/kv/kv.pb.go | 326 ++++++++++++++++++ src/cluster/generated/proto/kv/kv.proto | 37 ++ src/query/api/v1/handler/database/kvstore.go | 187 ++++++++++ .../api/v1/handler/database/kvstore_test.go | 61 ++++ 5 files changed, 612 insertions(+) create mode 100644 src/cluster/generated/proto/kv/kv.pb.go create mode 100644 src/cluster/generated/proto/kv/kv.proto create mode 100644 src/query/api/v1/handler/database/kvstore.go create mode 100644 src/query/api/v1/handler/database/kvstore_test.go diff --git a/go.mod b/go.mod index 7cec7ccf03..b223fe63be 100644 --- a/go.mod +++ b/go.mod @@ -110,6 +110,7 @@ require ( golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752 google.golang.org/grpc v1.29.1 + google.golang.org/protobuf v1.23.0 gopkg.in/go-ini/ini.v1 v1.57.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/go-playground/validator.v9 v9.7.0 diff --git a/src/cluster/generated/proto/kv/kv.pb.go b/src/cluster/generated/proto/kv/kv.pb.go new file mode 100644 index 0000000000..bf32eb3292 --- /dev/null +++ b/src/cluster/generated/proto/kv/kv.pb.go @@ -0,0 +1,326 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/m3db/m3/src/cluster/generated/proto/kvtest/kvtest.proto + +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +/* +Package kvtest is a generated protocol buffer package. + +It is generated from these files: + github.com/m3db/m3/src/cluster/generated/proto/kvtest/kvtest.proto + +It has these top-level messages: + Foo +*/ +package kvtest + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Foo struct { + Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *Foo) Reset() { *m = Foo{} } +func (m *Foo) String() string { return proto.CompactTextString(m) } +func (*Foo) ProtoMessage() {} +func (*Foo) Descriptor() ([]byte, []int) { return fileDescriptorKvtest, []int{0} } + +func (m *Foo) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +func init() { + proto.RegisterType((*Foo)(nil), "kvtest.Foo") +} +func (m *Foo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Foo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Msg) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKvtest(dAtA, i, uint64(len(m.Msg))) + i += copy(dAtA[i:], m.Msg) + } + return i, nil +} + +func encodeVarintKvtest(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Foo) Size() (n int) { + var l int + _ = l + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovKvtest(uint64(l)) + } + return n +} + +func sovKvtest(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKvtest(x uint64) (n int) { + return sovKvtest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Foo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvtest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Foo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Foo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvtest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvtest + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvtest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvtest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKvtest(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvtest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvtest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvtest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKvtest + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvtest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKvtest(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKvtest = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKvtest = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/m3db/m3/src/cluster/generated/proto/kvtest/kvtest.proto", fileDescriptorKvtest) +} + +var fileDescriptorKvtest = []byte{ + // 134 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4a, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x35, 0x4e, 0x49, 0xd2, 0xcf, 0x35, 0xd6, 0x2f, + 0x2e, 0x4a, 0xd6, 0x4f, 0xce, 0x29, 0x2d, 0x2e, 0x49, 0x2d, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, + 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0xcf, 0x2e, 0x2b, 0x49, 0x2d, + 0x2e, 0x81, 0x52, 0x7a, 0x60, 0x31, 0x21, 0x36, 0x08, 0x4f, 0x49, 0x9c, 0x8b, 0xd9, 0x2d, 0x3f, + 0x5f, 0x48, 0x80, 0x8b, 0x39, 0xb7, 0x38, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc4, + 0x74, 0x12, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, + 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xeb, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x63, + 0xbf, 0x2b, 0x7f, 0x00, 0x00, 0x00, +} diff --git a/src/cluster/generated/proto/kv/kv.proto b/src/cluster/generated/proto/kv/kv.proto new file mode 100644 index 0000000000..33af6f43ea --- /dev/null +++ b/src/cluster/generated/proto/kv/kv.proto @@ -0,0 +1,37 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +syntax = "proto3"; + +package kv; + +message KeyValueUpdate { + string key = 1; + string value = 2; + bool commit = 3; +} + +message KeyValueUpdateResult { + string key = 1; + string old = 2; + string new = 3; +} + + + diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go new file mode 100644 index 0000000000..3c1e70865e --- /dev/null +++ b/src/query/api/v1/handler/database/kvstore.go @@ -0,0 +1,187 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package database + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/jhump/protoreflect/dynamic" + "github.com/m3db/m3/src/cluster/generated/proto/commonpb" + "github.com/m3db/m3/src/cluster/kv" + "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions" + "github.com/m3db/m3/src/query/api/v1/options" + "github.com/m3db/m3/src/query/storage" + "github.com/m3db/m3/src/query/util/logging" + xerrors "github.com/m3db/m3/src/x/errors" + "github.com/m3db/m3/src/x/instrument" + xhttp "github.com/m3db/m3/src/x/net/http" + "google.golang.org/protobuf/runtime/protoiface" + + "github.com/gogo/protobuf/jsonpb" + "go.uber.org/zap" +) + +const ( + // KeyValueStoreURL is the url to edit key/value configuration values. + KeyValueStoreURL = "/search" + + // KeyValueStoreHTTPMethod is the HTTP method used with this resource. + KeyValueStoreHTTPMethod = http.MethodPost + + defaultLimit = 1000 +) + +// KeyValueUpdate defines an update to a key's value. +type KeyValueUpdate struct { + // Key to update. + Key string `json:"key"` + // Value to update the key to. + Value json.RawMessage `json:"value"` + // Commit, if false, will not persist the update. If true, the + // update will be persisted. Used to test format of inputs. + Commit bool `json:"commit"` +} + +// KeyValueUpdateResult defines the result of an update to a key's value. +type KeyValueUpdateResult struct { + // Key to update. + Key string `json:"new"` + // Old is the value before the update. + Old string `json:"old"` + // New is the value after the update. + New string `json:"new"` + // Version of the key. + Version int `json:"version"` +} + +// KeyValueStoreHandler represents a handler for the search endpoint +type KeyValueStoreHandler struct { + storage storage.Storage + kvStore kv.Store + fetchOptionsBuilder handleroptions.FetchOptionsBuilder + instrumentOpts instrument.Options +} + +// NewKeyValueStoreHandler returns a new instance of handler +func NewKeyValueStoreHandler(opts options.HandlerOptions) (http.Handler, error) { + kvStore, err := opts.ClusterClient().KV() + if err != nil { + return nil, err + } + + m := dynamic.Message{} + err = m.UnmarshalJSON([]byte(`{"key":"foo","value": { "bar" : "baz" }}`)) + + return &KeyValueStoreHandler{ + storage: opts.Storage(), + kvStore: kvStore, + fetchOptionsBuilder: opts.FetchOptionsBuilder(), + instrumentOpts: opts.InstrumentOpts(), + }, nil +} + +func (h *KeyValueStoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + logger := logging.WithContext(r.Context(), h.instrumentOpts) + + update, err := h.parseBody(r) + if err != nil { + logger.Error("unable to parse request", zap.Error(err)) + xhttp.WriteError(w, err) + return + } + + results, err := h.update(r.Context(), logger, update) + if err != nil { + logger.Error("kv store error", + zap.Error(err), + zap.Any("update", update)) + xhttp.WriteError(w, err) + return + } + + xhttp.WriteJSONResponse(w, results, logger) +} + +func (h *KeyValueStoreHandler) parseBody(r *http.Request) (*KeyValueUpdate, error) { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, xerrors.NewInvalidParamsError(err) + } + defer r.Body.Close() + + var parsed KeyValueUpdate + if err := json.Unmarshal(body, &parsed); err != nil { + return nil, xerrors.NewInvalidParamsError(err) + } + + return &parsed, nil +} + +func (h *KeyValueStoreHandler) update( + ctx context.Context, + logger *zap.Logger, + update *KeyValueUpdate, +) (*KeyValueUpdateResult, error) { + old, err := h.kvStore.Get(update.Key) + if err != nil && err != kv.ErrNotFound { + return nil, err + } + + oldProto := newKVProtoMessage(update.Key) + if old != nil { + if err := old.Unmarshal(oldProto); err != nil { + // Only log so we can overwrite corrupt existing entries. + logger.Error("cannot unmarshal old kv proto", zap.Error(err), zap.String("key", update.Key)) + } + } + + newProto := newKVProtoMessage(update.Key) + if err := jsonpb.UnmarshalString(string([]byte(update.Value)), newProto); err != nil { + return nil, err + } + + version, err := h.kvStore.Set(update.Key, newProto) + if err != nil { + return nil, err + } + + result := KeyValueUpdateResult{ + Key: update.Key, + Old: oldProto.String(), + New: newProto.String(), + Version: version, + } + + logger.Error("updated kv store", zap.Any("result", result)) + + return &result, nil +} + +func newKVProtoMessage(key string) protoiface.MessageV1 { + switch key { + case "abc": + return &commonpb.StringProto{} + } + return nil +} diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go new file mode 100644 index 0000000000..407c66af3b --- /dev/null +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -0,0 +1,61 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package database + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/jhump/protoreflect/dynamic" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestDynamic(t *testing.T) { + m := dynamic.Message{} + err := m.UnmarshalJSON([]byte(`{"key":"foo"}`)) + fmt.Println(m) + fmt.Println(err) +} + +func TestDynamic2(t *testing.T) { + m := map[string]interface{}{ + "foo": "bar", + "baz": 123, + } + b, err := json.Marshal(m) + require.NoError(t, err) + fmt.Println(string(b)) + s := &structpb.Struct{} + err = protojson.Unmarshal([]byte(`{"key":"foo"}`), s) + require.NoError(t, err) + fmt.Println(m) + fmt.Println(b) + fmt.Println(s) + + // v := &commonpb.StringProto{} + // err = protojson.Unmarshal([]byte(`{"value":"foo"}`), v) + // require.NoError(t, err) + // fmt.Println(v) +} From 6be4a30d54d88e133af128f94e24ecda2c42608e Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 16:46:37 -0500 Subject: [PATCH 40/80] Add etcd kv update endpoint 2 --- src/cluster/generated/proto/kv/kv.pb.go | 326 ----------- src/cluster/generated/proto/kv/kv.proto | 37 -- src/dbnode/kvconfig/keys.go | 13 +- src/m3em/generated/proto/m3em/m3em_mock.go | 556 ------------------- src/query/api/v1/handler/database/kvstore.go | 13 +- 5 files changed, 13 insertions(+), 932 deletions(-) delete mode 100644 src/cluster/generated/proto/kv/kv.pb.go delete mode 100644 src/cluster/generated/proto/kv/kv.proto delete mode 100644 src/m3em/generated/proto/m3em/m3em_mock.go diff --git a/src/cluster/generated/proto/kv/kv.pb.go b/src/cluster/generated/proto/kv/kv.pb.go deleted file mode 100644 index bf32eb3292..0000000000 --- a/src/cluster/generated/proto/kv/kv.pb.go +++ /dev/null @@ -1,326 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/m3db/m3/src/cluster/generated/proto/kvtest/kvtest.proto - -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -/* -Package kvtest is a generated protocol buffer package. - -It is generated from these files: - github.com/m3db/m3/src/cluster/generated/proto/kvtest/kvtest.proto - -It has these top-level messages: - Foo -*/ -package kvtest - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type Foo struct { - Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` -} - -func (m *Foo) Reset() { *m = Foo{} } -func (m *Foo) String() string { return proto.CompactTextString(m) } -func (*Foo) ProtoMessage() {} -func (*Foo) Descriptor() ([]byte, []int) { return fileDescriptorKvtest, []int{0} } - -func (m *Foo) GetMsg() string { - if m != nil { - return m.Msg - } - return "" -} - -func init() { - proto.RegisterType((*Foo)(nil), "kvtest.Foo") -} -func (m *Foo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Foo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Msg) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintKvtest(dAtA, i, uint64(len(m.Msg))) - i += copy(dAtA[i:], m.Msg) - } - return i, nil -} - -func encodeVarintKvtest(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Foo) Size() (n int) { - var l int - _ = l - l = len(m.Msg) - if l > 0 { - n += 1 + l + sovKvtest(uint64(l)) - } - return n -} - -func sovKvtest(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozKvtest(x uint64) (n int) { - return sovKvtest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Foo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKvtest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Foo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Foo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKvtest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthKvtest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Msg = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKvtest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKvtest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKvtest(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKvtest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKvtest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKvtest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthKvtest - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKvtest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipKvtest(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthKvtest = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKvtest = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/m3db/m3/src/cluster/generated/proto/kvtest/kvtest.proto", fileDescriptorKvtest) -} - -var fileDescriptorKvtest = []byte{ - // 134 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4a, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x35, 0x4e, 0x49, 0xd2, 0xcf, 0x35, 0xd6, 0x2f, - 0x2e, 0x4a, 0xd6, 0x4f, 0xce, 0x29, 0x2d, 0x2e, 0x49, 0x2d, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, - 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0xcf, 0x2e, 0x2b, 0x49, 0x2d, - 0x2e, 0x81, 0x52, 0x7a, 0x60, 0x31, 0x21, 0x36, 0x08, 0x4f, 0x49, 0x9c, 0x8b, 0xd9, 0x2d, 0x3f, - 0x5f, 0x48, 0x80, 0x8b, 0x39, 0xb7, 0x38, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc4, - 0x74, 0x12, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, - 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xeb, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x63, - 0xbf, 0x2b, 0x7f, 0x00, 0x00, 0x00, -} diff --git a/src/cluster/generated/proto/kv/kv.proto b/src/cluster/generated/proto/kv/kv.proto deleted file mode 100644 index 33af6f43ea..0000000000 --- a/src/cluster/generated/proto/kv/kv.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -syntax = "proto3"; - -package kv; - -message KeyValueUpdate { - string key = 1; - string value = 2; - bool commit = 3; -} - -message KeyValueUpdateResult { - string key = 1; - string old = 2; - string new = 3; -} - - - diff --git a/src/dbnode/kvconfig/keys.go b/src/dbnode/kvconfig/keys.go index feec213693..92c5d27292 100644 --- a/src/dbnode/kvconfig/keys.go +++ b/src/dbnode/kvconfig/keys.go @@ -50,15 +50,6 @@ const ( // configuration specifying the client write consistency level ClientWriteConsistencyLevel = "m3db.client.write-consistency-level" - // DocsLimit is the KV config key for the docs matched query limit. - // Settings in string form "{limit},{lookback}", e.g. "1000,15s". - DocsLimit = "m3db.limits.docs" - - // DiskBytesReadLimit is the KV config key for the disk bytes read query limit. - // Settings in string form "{limit},{lookback}", e.g. "1000,15s". - DiskBytesReadLimit = "m3db.limits.disk-bytes-read" - - // DiskSeriesReadLimit is the KV config key for the disk series read query limit. - // Settings in string form "{limit},{lookback}", e.g. "1000,15s". - DiskSeriesReadLimit = "m3db.limits.disk-series-read" + // QueryLimits is the KV config key for query limits enforced on each dbnode. + QueryLimits = "m3db.querylimits" ) diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go deleted file mode 100644 index 24d6d67813..0000000000 --- a/src/m3em/generated/proto/m3em/m3em_mock.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer) - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package m3em is a generated GoMock package. -package m3em - -import ( - "context" - "reflect" - - "github.com/golang/mock/gomock" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// MockOperatorClient is a mock of OperatorClient interface -type MockOperatorClient struct { - ctrl *gomock.Controller - recorder *MockOperatorClientMockRecorder -} - -// MockOperatorClientMockRecorder is the mock recorder for MockOperatorClient -type MockOperatorClientMockRecorder struct { - mock *MockOperatorClient -} - -// NewMockOperatorClient creates a new mock instance -func NewMockOperatorClient(ctrl *gomock.Controller) *MockOperatorClient { - mock := &MockOperatorClient{ctrl: ctrl} - mock.recorder = &MockOperatorClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperatorClient) EXPECT() *MockOperatorClientMockRecorder { - return m.recorder -} - -// PullFile mocks base method -func (m *MockOperatorClient) PullFile(arg0 context.Context, arg1 *PullFileRequest, arg2 ...grpc.CallOption) (Operator_PullFileClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PullFile", varargs...) - ret0, _ := ret[0].(Operator_PullFileClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PullFile indicates an expected call of PullFile -func (mr *MockOperatorClientMockRecorder) PullFile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullFile", reflect.TypeOf((*MockOperatorClient)(nil).PullFile), varargs...) -} - -// PushFile mocks base method -func (m *MockOperatorClient) PushFile(arg0 context.Context, arg1 ...grpc.CallOption) (Operator_PushFileClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PushFile", varargs...) - ret0, _ := ret[0].(Operator_PushFileClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PushFile indicates an expected call of PushFile -func (mr *MockOperatorClientMockRecorder) PushFile(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushFile", reflect.TypeOf((*MockOperatorClient)(nil).PushFile), varargs...) -} - -// Setup mocks base method -func (m *MockOperatorClient) Setup(arg0 context.Context, arg1 *SetupRequest, arg2 ...grpc.CallOption) (*SetupResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Setup", varargs...) - ret0, _ := ret[0].(*SetupResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Setup indicates an expected call of Setup -func (mr *MockOperatorClientMockRecorder) Setup(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockOperatorClient)(nil).Setup), varargs...) -} - -// Start mocks base method -func (m *MockOperatorClient) Start(arg0 context.Context, arg1 *StartRequest, arg2 ...grpc.CallOption) (*StartResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Start", varargs...) - ret0, _ := ret[0].(*StartResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Start indicates an expected call of Start -func (mr *MockOperatorClientMockRecorder) Start(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorClient)(nil).Start), varargs...) -} - -// Stop mocks base method -func (m *MockOperatorClient) Stop(arg0 context.Context, arg1 *StopRequest, arg2 ...grpc.CallOption) (*StopResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*StopResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stop indicates an expected call of Stop -func (mr *MockOperatorClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorClient)(nil).Stop), varargs...) -} - -// Teardown mocks base method -func (m *MockOperatorClient) Teardown(arg0 context.Context, arg1 *TeardownRequest, arg2 ...grpc.CallOption) (*TeardownResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Teardown", varargs...) - ret0, _ := ret[0].(*TeardownResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Teardown indicates an expected call of Teardown -func (mr *MockOperatorClientMockRecorder) Teardown(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Teardown", reflect.TypeOf((*MockOperatorClient)(nil).Teardown), varargs...) -} - -// MockOperator_PushFileClient is a mock of Operator_PushFileClient interface -type MockOperator_PushFileClient struct { - ctrl *gomock.Controller - recorder *MockOperator_PushFileClientMockRecorder -} - -// MockOperator_PushFileClientMockRecorder is the mock recorder for MockOperator_PushFileClient -type MockOperator_PushFileClientMockRecorder struct { - mock *MockOperator_PushFileClient -} - -// NewMockOperator_PushFileClient creates a new mock instance -func NewMockOperator_PushFileClient(ctrl *gomock.Controller) *MockOperator_PushFileClient { - mock := &MockOperator_PushFileClient{ctrl: ctrl} - mock.recorder = &MockOperator_PushFileClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PushFileClient) EXPECT() *MockOperator_PushFileClientMockRecorder { - return m.recorder -} - -// CloseAndRecv mocks base method -func (m *MockOperator_PushFileClient) CloseAndRecv() (*PushFileResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseAndRecv") - ret0, _ := ret[0].(*PushFileResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CloseAndRecv indicates an expected call of CloseAndRecv -func (mr *MockOperator_PushFileClientMockRecorder) CloseAndRecv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseAndRecv)) -} - -// CloseSend mocks base method -func (m *MockOperator_PushFileClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend -func (mr *MockOperator_PushFileClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseSend)) -} - -// Context mocks base method -func (m *MockOperator_PushFileClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PushFileClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Context)) -} - -// Header mocks base method -func (m *MockOperator_PushFileClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header -func (mr *MockOperator_PushFileClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Header)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PushFileClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PushFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).RecvMsg), arg0) -} - -// Send mocks base method -func (m *MockOperator_PushFileClient) Send(arg0 *PushFileRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send -func (mr *MockOperator_PushFileClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Send), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PushFileClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PushFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method -func (m *MockOperator_PushFileClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer -func (mr *MockOperator_PushFileClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Trailer)) -} - -// MockOperator_PullFileClient is a mock of Operator_PullFileClient interface -type MockOperator_PullFileClient struct { - ctrl *gomock.Controller - recorder *MockOperator_PullFileClientMockRecorder -} - -// MockOperator_PullFileClientMockRecorder is the mock recorder for MockOperator_PullFileClient -type MockOperator_PullFileClientMockRecorder struct { - mock *MockOperator_PullFileClient -} - -// NewMockOperator_PullFileClient creates a new mock instance -func NewMockOperator_PullFileClient(ctrl *gomock.Controller) *MockOperator_PullFileClient { - mock := &MockOperator_PullFileClient{ctrl: ctrl} - mock.recorder = &MockOperator_PullFileClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PullFileClient) EXPECT() *MockOperator_PullFileClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method -func (m *MockOperator_PullFileClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend -func (mr *MockOperator_PullFileClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PullFileClient)(nil).CloseSend)) -} - -// Context mocks base method -func (m *MockOperator_PullFileClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PullFileClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Context)) -} - -// Header mocks base method -func (m *MockOperator_PullFileClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header -func (mr *MockOperator_PullFileClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Header)) -} - -// Recv mocks base method -func (m *MockOperator_PullFileClient) Recv() (*PullFileResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*PullFileResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv -func (mr *MockOperator_PullFileClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Recv)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PullFileClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PullFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).RecvMsg), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PullFileClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PullFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method -func (m *MockOperator_PullFileClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer -func (mr *MockOperator_PullFileClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Trailer)) -} - -// MockOperator_PullFileServer is a mock of Operator_PullFileServer interface -type MockOperator_PullFileServer struct { - ctrl *gomock.Controller - recorder *MockOperator_PullFileServerMockRecorder -} - -// MockOperator_PullFileServerMockRecorder is the mock recorder for MockOperator_PullFileServer -type MockOperator_PullFileServerMockRecorder struct { - mock *MockOperator_PullFileServer -} - -// NewMockOperator_PullFileServer creates a new mock instance -func NewMockOperator_PullFileServer(ctrl *gomock.Controller) *MockOperator_PullFileServer { - mock := &MockOperator_PullFileServer{ctrl: ctrl} - mock.recorder = &MockOperator_PullFileServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PullFileServer) EXPECT() *MockOperator_PullFileServerMockRecorder { - return m.recorder -} - -// Context mocks base method -func (m *MockOperator_PullFileServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PullFileServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Context)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PullFileServer) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PullFileServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).RecvMsg), arg0) -} - -// Send mocks base method -func (m *MockOperator_PullFileServer) Send(arg0 *PullFileResponse) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send -func (mr *MockOperator_PullFileServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Send), arg0) -} - -// SendHeader mocks base method -func (m *MockOperator_PullFileServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader -func (mr *MockOperator_PullFileServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PullFileServer) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PullFileServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendMsg), arg0) -} - -// SetHeader mocks base method -func (m *MockOperator_PullFileServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader -func (mr *MockOperator_PullFileServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method -func (m *MockOperator_PullFileServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer -func (mr *MockOperator_PullFileServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetTrailer), arg0) -} diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 3c1e70865e..41cbcc2eec 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -27,8 +27,9 @@ import ( "net/http" "github.com/jhump/protoreflect/dynamic" - "github.com/m3db/m3/src/cluster/generated/proto/commonpb" + "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" + "github.com/m3db/m3/src/dbnode/kvconfig" "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions" "github.com/m3db/m3/src/query/api/v1/options" "github.com/m3db/m3/src/query/storage" @@ -180,8 +181,16 @@ func (h *KeyValueStoreHandler) update( func newKVProtoMessage(key string) protoiface.MessageV1 { switch key { - case "abc": + case NamespacesKey: + case BootstrapperKey: + case ClusterNewSeriesInsertLimitKey: + case EncodersPerBlockLimitKey: + case ClientBootstrapConsistencyLevel: + case ClientReadConsistencyLevel: + case ClientWriteConsistencyLevel: return &commonpb.StringProto{} + case kvconfig.QueryLimits: + return &kvpb.QueryLimits{} } return nil } From db34398754a432bb473a11be3181a2a602265a12 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 16:46:42 -0500 Subject: [PATCH 41/80] Add etcd kv update endpoint 3 --- src/cluster/generated/proto/kvpb/kv.pb.go | 1076 +++++++++++++++++++++ src/cluster/generated/proto/kvpb/kv.proto | 50 + 2 files changed, 1126 insertions(+) create mode 100644 src/cluster/generated/proto/kvpb/kv.pb.go create mode 100644 src/cluster/generated/proto/kvpb/kv.proto diff --git a/src/cluster/generated/proto/kvpb/kv.pb.go b/src/cluster/generated/proto/kvpb/kv.pb.go new file mode 100644 index 0000000000..5c177970b8 --- /dev/null +++ b/src/cluster/generated/proto/kvpb/kv.pb.go @@ -0,0 +1,1076 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/m3db/m3/src/cluster/generated/proto/kvpb/kv.proto + +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +/* + Package kvpb is a generated protocol buffer package. + + It is generated from these files: + github.com/m3db/m3/src/cluster/generated/proto/kvpb/kv.proto + + It has these top-level messages: + KeyValueUpdate + KeyValueUpdateResult + QueryLimits + QueryLimit +*/ +package kvpb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/duration" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type KeyValueUpdate struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Commit bool `protobuf:"varint,3,opt,name=commit,proto3" json:"commit,omitempty"` +} + +func (m *KeyValueUpdate) Reset() { *m = KeyValueUpdate{} } +func (m *KeyValueUpdate) String() string { return proto.CompactTextString(m) } +func (*KeyValueUpdate) ProtoMessage() {} +func (*KeyValueUpdate) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } + +func (m *KeyValueUpdate) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValueUpdate) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *KeyValueUpdate) GetCommit() bool { + if m != nil { + return m.Commit + } + return false +} + +type KeyValueUpdateResult struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Old string `protobuf:"bytes,2,opt,name=old,proto3" json:"old,omitempty"` + New string `protobuf:"bytes,3,opt,name=new,proto3" json:"new,omitempty"` +} + +func (m *KeyValueUpdateResult) Reset() { *m = KeyValueUpdateResult{} } +func (m *KeyValueUpdateResult) String() string { return proto.CompactTextString(m) } +func (*KeyValueUpdateResult) ProtoMessage() {} +func (*KeyValueUpdateResult) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } + +func (m *KeyValueUpdateResult) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValueUpdateResult) GetOld() string { + if m != nil { + return m.Old + } + return "" +} + +func (m *KeyValueUpdateResult) GetNew() string { + if m != nil { + return m.New + } + return "" +} + +type QueryLimits struct { + DocsMatched *QueryLimit `protobuf:"bytes,1,opt,name=DocsMatched" json:"DocsMatched,omitempty"` + SeriesReadFromDisk *QueryLimit `protobuf:"bytes,2,opt,name=SeriesReadFromDisk" json:"SeriesReadFromDisk,omitempty"` + BytesReadFromDisk *QueryLimit `protobuf:"bytes,3,opt,name=BytesReadFromDisk" json:"BytesReadFromDisk,omitempty"` +} + +func (m *QueryLimits) Reset() { *m = QueryLimits{} } +func (m *QueryLimits) String() string { return proto.CompactTextString(m) } +func (*QueryLimits) ProtoMessage() {} +func (*QueryLimits) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{2} } + +func (m *QueryLimits) GetDocsMatched() *QueryLimit { + if m != nil { + return m.DocsMatched + } + return nil +} + +func (m *QueryLimits) GetSeriesReadFromDisk() *QueryLimit { + if m != nil { + return m.SeriesReadFromDisk + } + return nil +} + +func (m *QueryLimits) GetBytesReadFromDisk() *QueryLimit { + if m != nil { + return m.BytesReadFromDisk + } + return nil +} + +type QueryLimit struct { + Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Lookback *google_protobuf.Duration `protobuf:"bytes,2,opt,name=lookback" json:"lookback,omitempty"` +} + +func (m *QueryLimit) Reset() { *m = QueryLimit{} } +func (m *QueryLimit) String() string { return proto.CompactTextString(m) } +func (*QueryLimit) ProtoMessage() {} +func (*QueryLimit) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{3} } + +func (m *QueryLimit) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *QueryLimit) GetLookback() *google_protobuf.Duration { + if m != nil { + return m.Lookback + } + return nil +} + +func init() { + proto.RegisterType((*KeyValueUpdate)(nil), "kvpb.KeyValueUpdate") + proto.RegisterType((*KeyValueUpdateResult)(nil), "kvpb.KeyValueUpdateResult") + proto.RegisterType((*QueryLimits)(nil), "kvpb.QueryLimits") + proto.RegisterType((*QueryLimit)(nil), "kvpb.QueryLimit") +} +func (m *KeyValueUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Commit { + dAtA[i] = 0x18 + i++ + if m.Commit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *KeyValueUpdateResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueUpdateResult) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Old) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Old))) + i += copy(dAtA[i:], m.Old) + } + if len(m.New) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.New))) + i += copy(dAtA[i:], m.New) + } + return i, nil +} + +func (m *QueryLimits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLimits) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.DocsMatched != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(m.DocsMatched.Size())) + n1, err := m.DocsMatched.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.SeriesReadFromDisk != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.SeriesReadFromDisk.Size())) + n2, err := m.SeriesReadFromDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.BytesReadFromDisk != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.BytesReadFromDisk.Size())) + n3, err := m.BytesReadFromDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *QueryLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLimit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Limit != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Limit)) + } + if m.Lookback != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lookback.Size())) + n4, err := m.Lookback.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KeyValueUpdate) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.Commit { + n += 2 + } + return n +} + +func (m *KeyValueUpdateResult) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + l = len(m.Old) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + l = len(m.New) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func (m *QueryLimits) Size() (n int) { + var l int + _ = l + if m.DocsMatched != nil { + l = m.DocsMatched.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.SeriesReadFromDisk != nil { + l = m.SeriesReadFromDisk.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.BytesReadFromDisk != nil { + l = m.BytesReadFromDisk.Size() + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func (m *QueryLimit) Size() (n int) { + var l int + _ = l + if m.Limit != 0 { + n += 1 + sovKv(uint64(m.Limit)) + } + if m.Lookback != nil { + l = m.Lookback.Size() + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func sovKv(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyValueUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValueUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValueUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Commit = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValueUpdateResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValueUpdateResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValueUpdateResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Old", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Old = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field New", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.New = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DocsMatched", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DocsMatched == nil { + m.DocsMatched = &QueryLimit{} + } + if err := m.DocsMatched.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesReadFromDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeriesReadFromDisk == nil { + m.SeriesReadFromDisk = &QueryLimit{} + } + if err := m.SeriesReadFromDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesReadFromDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BytesReadFromDisk == nil { + m.BytesReadFromDisk = &QueryLimit{} + } + if err := m.BytesReadFromDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lookback", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lookback == nil { + m.Lookback = &google_protobuf.Duration{} + } + if err := m.Lookback.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKv + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKv(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/m3db/m3/src/cluster/generated/proto/kvpb/kv.proto", fileDescriptorKv) +} + +var fileDescriptorKv = []byte{ + // 352 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xcd, 0x4a, 0xf3, 0x40, + 0x14, 0x86, 0xbf, 0x7c, 0xd1, 0xd2, 0x9e, 0x82, 0xc4, 0xa1, 0x48, 0x75, 0x11, 0x4a, 0x56, 0x5d, + 0x65, 0xa0, 0xc5, 0x9d, 0x88, 0x94, 0xe2, 0xc6, 0x0a, 0x3a, 0xa2, 0xe0, 0x32, 0x3f, 0xc7, 0x34, + 0x24, 0xe9, 0x94, 0xc9, 0x4c, 0x25, 0x77, 0xe1, 0x3d, 0xb9, 0x71, 0xe9, 0x25, 0x48, 0xbd, 0x11, + 0x99, 0x49, 0x6b, 0xfd, 0xa9, 0xbb, 0xf7, 0x3d, 0x3c, 0x79, 0x72, 0x92, 0x03, 0x27, 0x49, 0x2a, + 0xa7, 0x2a, 0xf4, 0x23, 0x5e, 0xd0, 0x62, 0x18, 0x87, 0xb4, 0x18, 0xd2, 0x52, 0x44, 0x34, 0xca, + 0x55, 0x29, 0x51, 0xd0, 0x04, 0x67, 0x28, 0x02, 0x89, 0x31, 0x9d, 0x0b, 0x2e, 0x39, 0xcd, 0x16, + 0xf3, 0x90, 0x66, 0x0b, 0xdf, 0x34, 0xb2, 0xa3, 0xeb, 0x91, 0x9b, 0x70, 0x9e, 0xe4, 0x58, 0x13, + 0xa1, 0x7a, 0xa0, 0xb1, 0x12, 0x81, 0x4c, 0xf9, 0xac, 0xa6, 0xbc, 0x2b, 0xd8, 0xbb, 0xc0, 0xea, + 0x2e, 0xc8, 0x15, 0xde, 0xce, 0xe3, 0x40, 0x22, 0x71, 0xc0, 0xce, 0xb0, 0xea, 0x5a, 0x3d, 0xab, + 0xdf, 0x62, 0x3a, 0x92, 0x0e, 0xec, 0x2e, 0x34, 0xd0, 0xfd, 0x6f, 0x66, 0x75, 0x21, 0x07, 0xd0, + 0x88, 0x78, 0x51, 0xa4, 0xb2, 0x6b, 0xf7, 0xac, 0x7e, 0x93, 0xad, 0x9a, 0x37, 0x81, 0xce, 0x77, + 0x23, 0xc3, 0x52, 0xe5, 0x72, 0x8b, 0xd7, 0x01, 0x9b, 0xe7, 0xf1, 0xca, 0xaa, 0xa3, 0x9e, 0xcc, + 0xf0, 0xd1, 0x08, 0x5b, 0x4c, 0x47, 0xef, 0xd9, 0x82, 0xf6, 0xb5, 0x42, 0x51, 0x4d, 0xd2, 0x22, + 0x95, 0x25, 0x19, 0x40, 0x7b, 0xcc, 0xa3, 0xf2, 0x32, 0x90, 0xd1, 0x14, 0x63, 0x63, 0x6b, 0x0f, + 0x1c, 0x5f, 0x7f, 0xab, 0xbf, 0xe1, 0xd8, 0x57, 0x88, 0x9c, 0x01, 0xb9, 0x41, 0x91, 0x62, 0xc9, + 0x30, 0x88, 0xcf, 0x05, 0x2f, 0xc6, 0x69, 0x99, 0x99, 0xd7, 0x6e, 0x7b, 0x74, 0x0b, 0x4b, 0x4e, + 0x61, 0x7f, 0x54, 0xc9, 0x1f, 0x02, 0xfb, 0x0f, 0xc1, 0x6f, 0xd4, 0xbb, 0x07, 0xd8, 0x00, 0xfa, + 0x7f, 0xe6, 0x3a, 0x98, 0xed, 0x6d, 0x56, 0x17, 0x72, 0x0c, 0xcd, 0x9c, 0xf3, 0x2c, 0x0c, 0xa2, + 0xf5, 0x6e, 0x87, 0x7e, 0x7d, 0x3c, 0x7f, 0x7d, 0x3c, 0x7f, 0xbc, 0x3a, 0x1e, 0xfb, 0x44, 0x47, + 0xce, 0xcb, 0xd2, 0xb5, 0x5e, 0x97, 0xae, 0xf5, 0xb6, 0x74, 0xad, 0xa7, 0x77, 0xf7, 0x5f, 0xd8, + 0x30, 0xf8, 0xf0, 0x23, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x5c, 0x84, 0x87, 0x3f, 0x02, 0x00, 0x00, +} diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto new file mode 100644 index 0000000000..a22363ecc2 --- /dev/null +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -0,0 +1,50 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +syntax = "proto3"; + +package kvpb; + +import "google/protobuf/duration.proto"; + +message KeyValueUpdate { + string key = 1; + string value = 2; + bool commit = 3; +} + +message KeyValueUpdateResult { + string key = 1; + string old = 2; + string new = 3; +} + +message QueryLimits { + QueryLimit DocsMatched = 1; + QueryLimit SeriesReadFromDisk = 2; + QueryLimit BytesReadFromDisk = 3; +} + +message QueryLimit { + int64 limit = 1; + google.protobuf.Duration lookback = 2; +} + + + From 02fcd565e91a6253bc9a2157434868a6102f2990 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 17:41:43 -0500 Subject: [PATCH 42/80] Add etcd kv update endpoint 4 --- src/cluster/generated/proto/kvpb/kv.pb.go | 94 ++++++++------------ src/cluster/generated/proto/kvpb/kv.proto | 4 +- src/dbnode/server/server.go | 92 +++++++++---------- src/query/api/v1/handler/database/kvstore.go | 14 +-- 4 files changed, 86 insertions(+), 118 deletions(-) diff --git a/src/cluster/generated/proto/kvpb/kv.pb.go b/src/cluster/generated/proto/kvpb/kv.pb.go index 5c177970b8..47238df397 100644 --- a/src/cluster/generated/proto/kvpb/kv.pb.go +++ b/src/cluster/generated/proto/kvpb/kv.pb.go @@ -38,7 +38,6 @@ package kvpb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/duration" import io "io" @@ -150,8 +149,8 @@ func (m *QueryLimits) GetBytesReadFromDisk() *QueryLimit { } type QueryLimit struct { - Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` - Lookback *google_protobuf.Duration `protobuf:"bytes,2,opt,name=lookback" json:"lookback,omitempty"` + Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + LookbackSeconds int64 `protobuf:"varint,2,opt,name=lookbackSeconds,proto3" json:"lookbackSeconds,omitempty"` } func (m *QueryLimit) Reset() { *m = QueryLimit{} } @@ -166,11 +165,11 @@ func (m *QueryLimit) GetLimit() int64 { return 0 } -func (m *QueryLimit) GetLookback() *google_protobuf.Duration { +func (m *QueryLimit) GetLookbackSeconds() int64 { if m != nil { - return m.Lookback + return m.LookbackSeconds } - return nil + return 0 } func init() { @@ -323,15 +322,10 @@ func (m *QueryLimit) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintKv(dAtA, i, uint64(m.Limit)) } - if m.Lookback != nil { - dAtA[i] = 0x12 + if m.LookbackSeconds != 0 { + dAtA[i] = 0x10 i++ - i = encodeVarintKv(dAtA, i, uint64(m.Lookback.Size())) - n4, err := m.Lookback.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 + i = encodeVarintKv(dAtA, i, uint64(m.LookbackSeconds)) } return i, nil } @@ -404,9 +398,8 @@ func (m *QueryLimit) Size() (n int) { if m.Limit != 0 { n += 1 + sovKv(uint64(m.Limit)) } - if m.Lookback != nil { - l = m.Lookback.Size() - n += 1 + l + sovKv(uint64(l)) + if m.LookbackSeconds != 0 { + n += 1 + sovKv(uint64(m.LookbackSeconds)) } return n } @@ -887,10 +880,10 @@ func (m *QueryLimit) Unmarshal(dAtA []byte) error { } } case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lookback", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LookbackSeconds", wireType) } - var msglen int + m.LookbackSeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKv @@ -900,25 +893,11 @@ func (m *QueryLimit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.LookbackSeconds |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lookback == nil { - m.Lookback = &google_protobuf.Duration{} - } - if err := m.Lookback.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKv(dAtA[iNdEx:]) @@ -1050,27 +1029,26 @@ func init() { } var fileDescriptorKv = []byte{ - // 352 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xcd, 0x4a, 0xf3, 0x40, - 0x14, 0x86, 0xbf, 0x7c, 0xd1, 0xd2, 0x9e, 0x82, 0xc4, 0xa1, 0x48, 0x75, 0x11, 0x4a, 0x56, 0x5d, - 0x65, 0xa0, 0xc5, 0x9d, 0x88, 0x94, 0xe2, 0xc6, 0x0a, 0x3a, 0xa2, 0xe0, 0x32, 0x3f, 0xc7, 0x34, - 0x24, 0xe9, 0x94, 0xc9, 0x4c, 0x25, 0x77, 0xe1, 0x3d, 0xb9, 0x71, 0xe9, 0x25, 0x48, 0xbd, 0x11, - 0x99, 0x49, 0x6b, 0xfd, 0xa9, 0xbb, 0xf7, 0x3d, 0x3c, 0x79, 0x72, 0x92, 0x03, 0x27, 0x49, 0x2a, - 0xa7, 0x2a, 0xf4, 0x23, 0x5e, 0xd0, 0x62, 0x18, 0x87, 0xb4, 0x18, 0xd2, 0x52, 0x44, 0x34, 0xca, - 0x55, 0x29, 0x51, 0xd0, 0x04, 0x67, 0x28, 0x02, 0x89, 0x31, 0x9d, 0x0b, 0x2e, 0x39, 0xcd, 0x16, - 0xf3, 0x90, 0x66, 0x0b, 0xdf, 0x34, 0xb2, 0xa3, 0xeb, 0x91, 0x9b, 0x70, 0x9e, 0xe4, 0x58, 0x13, - 0xa1, 0x7a, 0xa0, 0xb1, 0x12, 0x81, 0x4c, 0xf9, 0xac, 0xa6, 0xbc, 0x2b, 0xd8, 0xbb, 0xc0, 0xea, - 0x2e, 0xc8, 0x15, 0xde, 0xce, 0xe3, 0x40, 0x22, 0x71, 0xc0, 0xce, 0xb0, 0xea, 0x5a, 0x3d, 0xab, - 0xdf, 0x62, 0x3a, 0x92, 0x0e, 0xec, 0x2e, 0x34, 0xd0, 0xfd, 0x6f, 0x66, 0x75, 0x21, 0x07, 0xd0, - 0x88, 0x78, 0x51, 0xa4, 0xb2, 0x6b, 0xf7, 0xac, 0x7e, 0x93, 0xad, 0x9a, 0x37, 0x81, 0xce, 0x77, - 0x23, 0xc3, 0x52, 0xe5, 0x72, 0x8b, 0xd7, 0x01, 0x9b, 0xe7, 0xf1, 0xca, 0xaa, 0xa3, 0x9e, 0xcc, - 0xf0, 0xd1, 0x08, 0x5b, 0x4c, 0x47, 0xef, 0xd9, 0x82, 0xf6, 0xb5, 0x42, 0x51, 0x4d, 0xd2, 0x22, - 0x95, 0x25, 0x19, 0x40, 0x7b, 0xcc, 0xa3, 0xf2, 0x32, 0x90, 0xd1, 0x14, 0x63, 0x63, 0x6b, 0x0f, - 0x1c, 0x5f, 0x7f, 0xab, 0xbf, 0xe1, 0xd8, 0x57, 0x88, 0x9c, 0x01, 0xb9, 0x41, 0x91, 0x62, 0xc9, - 0x30, 0x88, 0xcf, 0x05, 0x2f, 0xc6, 0x69, 0x99, 0x99, 0xd7, 0x6e, 0x7b, 0x74, 0x0b, 0x4b, 0x4e, - 0x61, 0x7f, 0x54, 0xc9, 0x1f, 0x02, 0xfb, 0x0f, 0xc1, 0x6f, 0xd4, 0xbb, 0x07, 0xd8, 0x00, 0xfa, - 0x7f, 0xe6, 0x3a, 0x98, 0xed, 0x6d, 0x56, 0x17, 0x72, 0x0c, 0xcd, 0x9c, 0xf3, 0x2c, 0x0c, 0xa2, - 0xf5, 0x6e, 0x87, 0x7e, 0x7d, 0x3c, 0x7f, 0x7d, 0x3c, 0x7f, 0xbc, 0x3a, 0x1e, 0xfb, 0x44, 0x47, - 0xce, 0xcb, 0xd2, 0xb5, 0x5e, 0x97, 0xae, 0xf5, 0xb6, 0x74, 0xad, 0xa7, 0x77, 0xf7, 0x5f, 0xd8, - 0x30, 0xf8, 0xf0, 0x23, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x5c, 0x84, 0x87, 0x3f, 0x02, 0x00, 0x00, + // 329 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0x6f, 0x6e, 0xee, 0x2d, 0xb7, 0xa7, 0x70, 0x8d, 0x43, 0x91, 0xae, 0x42, 0xc9, 0xaa, + 0xab, 0x0c, 0xb4, 0x5b, 0x11, 0x29, 0xc5, 0x8d, 0x15, 0x74, 0x8a, 0xee, 0x93, 0x99, 0x43, 0x1b, + 0x92, 0xe9, 0x94, 0x99, 0x49, 0x25, 0x6f, 0xe1, 0x3b, 0xb9, 0x71, 0xe9, 0x23, 0x48, 0x7d, 0x11, + 0x99, 0x69, 0xa1, 0x5a, 0xeb, 0xee, 0xff, 0x0f, 0x5f, 0xbe, 0x9c, 0x99, 0x81, 0xf3, 0x79, 0x61, + 0x17, 0x75, 0x9e, 0x72, 0x25, 0xa9, 0x1c, 0x89, 0x9c, 0xca, 0x11, 0x35, 0x9a, 0x53, 0x5e, 0xd5, + 0xc6, 0xa2, 0xa6, 0x73, 0x5c, 0xa2, 0xce, 0x2c, 0x0a, 0xba, 0xd2, 0xca, 0x2a, 0x5a, 0xae, 0x57, + 0x39, 0x2d, 0xd7, 0xa9, 0x6f, 0xe4, 0x8f, 0xab, 0xc9, 0x2d, 0xfc, 0xbf, 0xc6, 0xe6, 0x21, 0xab, + 0x6a, 0xbc, 0x5f, 0x89, 0xcc, 0x22, 0x89, 0x20, 0x2c, 0xb1, 0xe9, 0x05, 0xfd, 0x60, 0xd0, 0x66, + 0x2e, 0x92, 0x2e, 0xfc, 0x5d, 0x3b, 0xa0, 0xf7, 0xdb, 0xcf, 0xb6, 0x85, 0x9c, 0x41, 0x8b, 0x2b, + 0x29, 0x0b, 0xdb, 0x0b, 0xfb, 0xc1, 0xe0, 0x1f, 0xdb, 0xb5, 0x64, 0x0a, 0xdd, 0xaf, 0x46, 0x86, + 0xa6, 0xae, 0xec, 0x11, 0x6f, 0x04, 0xa1, 0xaa, 0xc4, 0xce, 0xea, 0xa2, 0x9b, 0x2c, 0xf1, 0xd1, + 0x0b, 0xdb, 0xcc, 0xc5, 0xe4, 0x39, 0x80, 0xce, 0x5d, 0x8d, 0xba, 0x99, 0x16, 0xb2, 0xb0, 0x86, + 0x0c, 0xa1, 0x33, 0x51, 0xdc, 0xdc, 0x64, 0x96, 0x2f, 0x50, 0x78, 0x5b, 0x67, 0x18, 0xa5, 0xee, + 0x2c, 0xe9, 0x9e, 0x63, 0x9f, 0x21, 0x72, 0x09, 0x64, 0x86, 0xba, 0x40, 0xc3, 0x30, 0x13, 0x57, + 0x5a, 0xc9, 0x49, 0x61, 0x4a, 0xff, 0xdb, 0x63, 0x9f, 0x1e, 0x61, 0xc9, 0x05, 0x9c, 0x8e, 0x1b, + 0x7b, 0x20, 0x08, 0x7f, 0x10, 0x7c, 0x47, 0x93, 0x29, 0xc0, 0x1e, 0x70, 0xf7, 0x59, 0xb9, 0xe0, + 0xb7, 0x0f, 0xd9, 0xb6, 0x90, 0x01, 0x9c, 0x54, 0x4a, 0x95, 0x79, 0xc6, 0xcb, 0x19, 0x72, 0xb5, + 0x14, 0xc6, 0xaf, 0x18, 0xb2, 0xc3, 0xf1, 0x38, 0x7a, 0xd9, 0xc4, 0xc1, 0xeb, 0x26, 0x0e, 0xde, + 0x36, 0x71, 0xf0, 0xf4, 0x1e, 0xff, 0xca, 0x5b, 0xfe, 0x49, 0x47, 0x1f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xe6, 0xe7, 0x13, 0x0d, 0x12, 0x02, 0x00, 0x00, } diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto index a22363ecc2..2623ae7d62 100644 --- a/src/cluster/generated/proto/kvpb/kv.proto +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -21,8 +21,6 @@ syntax = "proto3"; package kvpb; -import "google/protobuf/duration.proto"; - message KeyValueUpdate { string key = 1; string value = 2; @@ -43,7 +41,7 @@ message QueryLimits { message QueryLimit { int64 limit = 1; - google.protobuf.Duration lookback = 2; + int64 lookbackSeconds = 2; } diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 2a6aab971f..c6cc8fc03a 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -32,7 +32,6 @@ import ( "path" "runtime" "runtime/debug" - "strconv" "strings" "sync" "time" @@ -40,6 +39,7 @@ import ( clusterclient "github.com/m3db/m3/src/cluster/client" "github.com/m3db/m3/src/cluster/client/etcd" "github.com/m3db/m3/src/cluster/generated/proto/commonpb" + "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" "github.com/m3db/m3/src/cmd/services/m3dbnode/config" queryconfig "github.com/m3db/m3/src/cmd/services/m3query/config" @@ -1000,9 +1000,7 @@ func Run(runOpts RunOptions) { runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond) kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger, runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.DocsLimit(), kvconfig.DocsLimit) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.DiskSeriesReadLimit(), kvconfig.DiskBytesReadLimit) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits.BytesReadLimit(), kvconfig.DiskSeriesReadLimit) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits) }() // Wait for process interrupt. @@ -1178,74 +1176,68 @@ func kvWatchEncodersPerBlockLimit( func kvWatchQueryLimit( store kv.Store, logger *zap.Logger, - limit limits.LookbackLimit, - kvName string, + limits limits.QueryLimits, ) { - options := limit.Options() - - value, err := store.Get(kvName) + value, err := store.Get(kvconfig.QueryLimits) if err == nil { - protoValue := &commonpb.StringProto{} + protoValue := &kvpb.QueryLimits{} err = value.Unmarshal(protoValue) - if err == nil { - options = parseLookbackLimitOptions(logger, kvName, protoValue.Value, options) + if err == nil && protoValue != nil { + updateQueryLimits(logger, limits, protoValue) } - } else if errors.Is(err, kv.ErrNotFound) { - logger.Warn("error resolving query limit", zap.Error(err), zap.String("name", kvName)) + } else if !errors.Is(err, kv.ErrNotFound) { + logger.Warn("error resolving query limit", zap.Error(err)) } - if err := limit.Update(options); err != nil { - logger.Warn("unable to set query limit", zap.Error(err), zap.String("name", kvName)) - } - - watch, err := store.Watch(kvName) + watch, err := store.Watch(kvconfig.QueryLimits) if err != nil { - logger.Error("could not watch query limit", zap.Error(err), zap.String("name", kvName)) + logger.Error("could not watch query limit", zap.Error(err)) return } go func() { - protoValue := &commonpb.StringProto{} + protoValue := &kvpb.QueryLimits{} for range watch.C() { - value := options if newValue := watch.Get(); newValue != nil { if err := newValue.Unmarshal(protoValue); err != nil { - logger.Warn("unable to parse new query limit", zap.Error(err), zap.String("name", kvName)) + logger.Warn("unable to parse new query limits", zap.Error(err)) continue } - value = parseLookbackLimitOptions(logger, kvName, protoValue.Value, value) - } - - if err := limit.Update(value); err != nil { - logger.Warn("unable to set query limit", zap.Error(err), zap.String("name", kvName)) + updateQueryLimits(logger, limits, protoValue) } } }() } -func parseLookbackLimitOptions(logger *zap.Logger, - kvName string, - val string, - defaultOpts limits.LookbackLimitOptions, -) limits.LookbackLimitOptions { - parts := strings.Split(val, ",") - if val == "" { - defaultOpts.Limit = nil - } else if len(parts) == 2 { - parsedLimit, err := strconv.ParseInt(parts[0], 10, 64) - if err != nil { - logger.Warn("error parsing query limit value", zap.Error(err), zap.String("name", kvName)) - } else { - defaultOpts.Limit = &parsedLimit - } - parsedLookback, err := time.ParseDuration(parts[1]) - if err != nil { - logger.Warn("error parsing query limit lookback", zap.Error(err), zap.String("name", kvName)) - } else { - defaultOpts.Lookback = parsedLookback - } +func updateQueryLimits(logger *zap.Logger, + limits limits.QueryLimits, + settings *kvpb.QueryLimits, +) { + if err := updateQueryLimit(logger, limits.DocsLimit(), settings.DocsMatched); err != nil { + logger.Error("error updating docs limit", zap.Error(err)) + } + if err := updateQueryLimit(logger, limits.DiskSeriesReadLimit(), settings.SeriesReadFromDisk); err != nil { + logger.Error("error updating series read limit", zap.Error(err)) + } + if err := updateQueryLimit(logger, limits.BytesReadLimit(), settings.BytesReadFromDisk); err != nil { + logger.Error("error updating bytes read limit", zap.Error(err)) + } +} + +func updateQueryLimit(logger *zap.Logger, + limit limits.LookbackLimit, + settings *kvpb.QueryLimit, +) error { + limitOpts := limits.LookbackLimitOptions{ + // If the settings are nil, then that means the limit is disabled. + Limit: nil, + Lookback: limit.Options().Lookback, + } + if settings != nil { + limitOpts.Limit = &settings.Limit + limitOpts.Lookback = time.Second * time.Duration(settings.LookbackSeconds) } - return defaultOpts + return limit.Update(limitOpts) } func kvWatchClientConsistencyLevels( diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 41cbcc2eec..fb6063172b 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -181,13 +181,13 @@ func (h *KeyValueStoreHandler) update( func newKVProtoMessage(key string) protoiface.MessageV1 { switch key { - case NamespacesKey: - case BootstrapperKey: - case ClusterNewSeriesInsertLimitKey: - case EncodersPerBlockLimitKey: - case ClientBootstrapConsistencyLevel: - case ClientReadConsistencyLevel: - case ClientWriteConsistencyLevel: + case kvconfig.NamespacesKey: + case kvconfig.BootstrapperKey: + case kvconfig.ClusterNewSeriesInsertLimitKey: + case kvconfig.EncodersPerBlockLimitKey: + case kvconfig.ClientBootstrapConsistencyLevel: + case kvconfig.ClientReadConsistencyLevel: + case kvconfig.ClientWriteConsistencyLevel: return &commonpb.StringProto{} case kvconfig.QueryLimits: return &kvpb.QueryLimits{} From 8351c8918621a649a7254520211b171767408c5b Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 17:42:27 -0500 Subject: [PATCH 43/80] Add etcd kv update endpoint 5 --- src/query/api/v1/handler/database/kvstore.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index fb6063172b..d9479c8359 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -27,6 +27,7 @@ import ( "net/http" "github.com/jhump/protoreflect/dynamic" + "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" "github.com/m3db/m3/src/dbnode/kvconfig" From 731ec5fe2e6bfeda99c54cbf1bdf0954d2df7deb Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 17:50:38 -0500 Subject: [PATCH 44/80] Remove reset --- src/dbnode/storage/limits/query_limits.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index c3f9dc4591..0fd22c8edc 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -182,7 +182,7 @@ func (q *lookbackLimit) Options() LookbackLimitOptions { return q.options } -// Override overrides the limit set on construction. +// Update updates the limit. func (q *lookbackLimit) Update(opts LookbackLimitOptions) error { if err := opts.validate(); err != nil { return err @@ -255,7 +255,6 @@ func (q *lookbackLimit) checkLimit(recent int64) error { func (q *lookbackLimit) start() { ticker := time.NewTicker(q.options.Lookback) - ticker.Reset(q.options.Lookback) go func() { q.logger.Info("query limit interval started", zap.String("name", q.name)) for { From ffcea5eff136a33fe7c272a2bd12954b69069c7f Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 17:58:12 -0500 Subject: [PATCH 45/80] Add back mock --- src/dbnode/server/server.go | 3 + src/m3em/generated/proto/m3em/m3em_mock.go | 556 +++++++++++++++++++++ 2 files changed, 559 insertions(+) create mode 100644 src/m3em/generated/proto/m3em/m3em_mock.go diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index c6cc8fc03a..bc9d329fd6 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1213,6 +1213,9 @@ func updateQueryLimits(logger *zap.Logger, limits limits.QueryLimits, settings *kvpb.QueryLimits, ) { + if settings == nil { + return + } if err := updateQueryLimit(logger, limits.DocsLimit(), settings.DocsMatched); err != nil { logger.Error("error updating docs limit", zap.Error(err)) } diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go new file mode 100644 index 0000000000..24d6d67813 --- /dev/null +++ b/src/m3em/generated/proto/m3em/m3em_mock.go @@ -0,0 +1,556 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer) + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package m3em is a generated GoMock package. +package m3em + +import ( + "context" + "reflect" + + "github.com/golang/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// MockOperatorClient is a mock of OperatorClient interface +type MockOperatorClient struct { + ctrl *gomock.Controller + recorder *MockOperatorClientMockRecorder +} + +// MockOperatorClientMockRecorder is the mock recorder for MockOperatorClient +type MockOperatorClientMockRecorder struct { + mock *MockOperatorClient +} + +// NewMockOperatorClient creates a new mock instance +func NewMockOperatorClient(ctrl *gomock.Controller) *MockOperatorClient { + mock := &MockOperatorClient{ctrl: ctrl} + mock.recorder = &MockOperatorClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperatorClient) EXPECT() *MockOperatorClientMockRecorder { + return m.recorder +} + +// PullFile mocks base method +func (m *MockOperatorClient) PullFile(arg0 context.Context, arg1 *PullFileRequest, arg2 ...grpc.CallOption) (Operator_PullFileClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PullFile", varargs...) + ret0, _ := ret[0].(Operator_PullFileClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PullFile indicates an expected call of PullFile +func (mr *MockOperatorClientMockRecorder) PullFile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullFile", reflect.TypeOf((*MockOperatorClient)(nil).PullFile), varargs...) +} + +// PushFile mocks base method +func (m *MockOperatorClient) PushFile(arg0 context.Context, arg1 ...grpc.CallOption) (Operator_PushFileClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PushFile", varargs...) + ret0, _ := ret[0].(Operator_PushFileClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PushFile indicates an expected call of PushFile +func (mr *MockOperatorClientMockRecorder) PushFile(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushFile", reflect.TypeOf((*MockOperatorClient)(nil).PushFile), varargs...) +} + +// Setup mocks base method +func (m *MockOperatorClient) Setup(arg0 context.Context, arg1 *SetupRequest, arg2 ...grpc.CallOption) (*SetupResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Setup", varargs...) + ret0, _ := ret[0].(*SetupResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Setup indicates an expected call of Setup +func (mr *MockOperatorClientMockRecorder) Setup(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockOperatorClient)(nil).Setup), varargs...) +} + +// Start mocks base method +func (m *MockOperatorClient) Start(arg0 context.Context, arg1 *StartRequest, arg2 ...grpc.CallOption) (*StartResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Start", varargs...) + ret0, _ := ret[0].(*StartResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Start indicates an expected call of Start +func (mr *MockOperatorClientMockRecorder) Start(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorClient)(nil).Start), varargs...) +} + +// Stop mocks base method +func (m *MockOperatorClient) Stop(arg0 context.Context, arg1 *StopRequest, arg2 ...grpc.CallOption) (*StopResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*StopResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop +func (mr *MockOperatorClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorClient)(nil).Stop), varargs...) +} + +// Teardown mocks base method +func (m *MockOperatorClient) Teardown(arg0 context.Context, arg1 *TeardownRequest, arg2 ...grpc.CallOption) (*TeardownResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Teardown", varargs...) + ret0, _ := ret[0].(*TeardownResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Teardown indicates an expected call of Teardown +func (mr *MockOperatorClientMockRecorder) Teardown(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Teardown", reflect.TypeOf((*MockOperatorClient)(nil).Teardown), varargs...) +} + +// MockOperator_PushFileClient is a mock of Operator_PushFileClient interface +type MockOperator_PushFileClient struct { + ctrl *gomock.Controller + recorder *MockOperator_PushFileClientMockRecorder +} + +// MockOperator_PushFileClientMockRecorder is the mock recorder for MockOperator_PushFileClient +type MockOperator_PushFileClientMockRecorder struct { + mock *MockOperator_PushFileClient +} + +// NewMockOperator_PushFileClient creates a new mock instance +func NewMockOperator_PushFileClient(ctrl *gomock.Controller) *MockOperator_PushFileClient { + mock := &MockOperator_PushFileClient{ctrl: ctrl} + mock.recorder = &MockOperator_PushFileClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PushFileClient) EXPECT() *MockOperator_PushFileClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method +func (m *MockOperator_PushFileClient) CloseAndRecv() (*PushFileResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*PushFileResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv +func (mr *MockOperator_PushFileClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method +func (m *MockOperator_PushFileClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockOperator_PushFileClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockOperator_PushFileClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PushFileClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockOperator_PushFileClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockOperator_PushFileClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Header)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PushFileClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PushFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockOperator_PushFileClient) Send(arg0 *PushFileRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockOperator_PushFileClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Send), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PushFileClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PushFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockOperator_PushFileClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockOperator_PushFileClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Trailer)) +} + +// MockOperator_PullFileClient is a mock of Operator_PullFileClient interface +type MockOperator_PullFileClient struct { + ctrl *gomock.Controller + recorder *MockOperator_PullFileClientMockRecorder +} + +// MockOperator_PullFileClientMockRecorder is the mock recorder for MockOperator_PullFileClient +type MockOperator_PullFileClientMockRecorder struct { + mock *MockOperator_PullFileClient +} + +// NewMockOperator_PullFileClient creates a new mock instance +func NewMockOperator_PullFileClient(ctrl *gomock.Controller) *MockOperator_PullFileClient { + mock := &MockOperator_PullFileClient{ctrl: ctrl} + mock.recorder = &MockOperator_PullFileClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PullFileClient) EXPECT() *MockOperator_PullFileClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method +func (m *MockOperator_PullFileClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockOperator_PullFileClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PullFileClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockOperator_PullFileClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PullFileClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockOperator_PullFileClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockOperator_PullFileClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Header)) +} + +// Recv mocks base method +func (m *MockOperator_PullFileClient) Recv() (*PullFileResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*PullFileResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv +func (mr *MockOperator_PullFileClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Recv)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PullFileClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PullFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PullFileClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PullFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockOperator_PullFileClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockOperator_PullFileClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Trailer)) +} + +// MockOperator_PullFileServer is a mock of Operator_PullFileServer interface +type MockOperator_PullFileServer struct { + ctrl *gomock.Controller + recorder *MockOperator_PullFileServerMockRecorder +} + +// MockOperator_PullFileServerMockRecorder is the mock recorder for MockOperator_PullFileServer +type MockOperator_PullFileServerMockRecorder struct { + mock *MockOperator_PullFileServer +} + +// NewMockOperator_PullFileServer creates a new mock instance +func NewMockOperator_PullFileServer(ctrl *gomock.Controller) *MockOperator_PullFileServer { + mock := &MockOperator_PullFileServer{ctrl: ctrl} + mock.recorder = &MockOperator_PullFileServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PullFileServer) EXPECT() *MockOperator_PullFileServerMockRecorder { + return m.recorder +} + +// Context mocks base method +func (m *MockOperator_PullFileServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PullFileServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Context)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PullFileServer) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PullFileServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockOperator_PullFileServer) Send(arg0 *PullFileResponse) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockOperator_PullFileServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Send), arg0) +} + +// SendHeader mocks base method +func (m *MockOperator_PullFileServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader +func (mr *MockOperator_PullFileServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PullFileServer) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PullFileServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendMsg), arg0) +} + +// SetHeader mocks base method +func (m *MockOperator_PullFileServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader +func (mr *MockOperator_PullFileServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method +func (m *MockOperator_PullFileServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer +func (mr *MockOperator_PullFileServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetTrailer), arg0) +} From 6c42ae203e4e749253ca6be484659e6418e67c50 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 18:40:50 -0500 Subject: [PATCH 46/80] Fixing tests --- src/dbnode/storage/limits/query_limits.go | 34 ++++++---- .../storage/limits/query_limits_test.go | 64 +++++++++---------- src/dbnode/storage/limits/types.go | 5 +- 3 files changed, 57 insertions(+), 46 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 0fd22c8edc..c86f2f3a97 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -34,6 +34,8 @@ import ( ) const ( + disabledLimitValue = 0 + defaultLookback = time.Second * 15 ) @@ -54,7 +56,7 @@ type lookbackLimit struct { } type lookbackLimitMetrics struct { - optionsMax tally.Gauge + optionsLimit tally.Gauge optionsLookback tally.Gauge recentCount tally.Gauge recentMax tally.Gauge @@ -73,7 +75,7 @@ var ( func DefaultLookbackLimitOptions() LookbackLimitOptions { return LookbackLimitOptions{ // Default to no limit. - Limit: nil, + Limit: disabledLimitValue, Lookback: defaultLookback, } } @@ -135,10 +137,12 @@ func newLookbackLimitMetrics( instrumentOpts.SetMetricsScope(scope)) return lookbackLimitMetrics{ - recentCount: scope.Gauge(fmt.Sprintf("recent-count-%s", name)), - recentMax: scope.Gauge(fmt.Sprintf("recent-max-%s", name)), - total: scope.Counter(fmt.Sprintf("total-%s", name)), - exceeded: scope.Tagged(map[string]string{"limit": name}).Counter("exceeded"), + optionsLimit: scope.Gauge(fmt.Sprintf("current-limit%s", name)), + optionsLookback: scope.Gauge(fmt.Sprintf("current-lookback-%s", name)), + recentCount: scope.Gauge(fmt.Sprintf("recent-count-%s", name)), + recentMax: scope.Gauge(fmt.Sprintf("recent-max-%s", name)), + total: scope.Counter(fmt.Sprintf("total-%s", name)), + exceeded: scope.Tagged(map[string]string{"limit": name}).Counter("exceeded"), sourceLogger: sourceLogger, } @@ -237,14 +241,20 @@ func (q *lookbackLimit) exceeded() error { func (q *lookbackLimit) checkLimit(recent int64) error { q.lock.RLock() - limit := q.options.Limit + currentOpts := q.options q.lock.RUnlock() - if limit == nil { + if currentOpts.ForceExceeded { + q.metrics.exceeded.Inc(1) + return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( + "query aborted due to forced limit: name=%s", q.name))) + } + + if currentOpts.Limit == disabledLimitValue { return nil } - if recent >= *limit { + if recent >= currentOpts.Limit { q.metrics.exceeded.Inc(1) return xerrors.NewInvalidParamsError(NewQueryLimitExceededError(fmt.Sprintf( "query aborted due to limit: name=%s, limit=%d, current=%d, within=%s", @@ -268,7 +278,7 @@ func (q *lookbackLimit) start() { } }() - q.metrics.optionsMax.Update(float64(*q.options.Limit)) + q.metrics.optionsLimit.Update(float64(q.options.Limit)) q.metrics.optionsLookback.Update(q.options.Lookback.Seconds()) } @@ -296,8 +306,8 @@ func (q *lookbackLimit) reset() { } func (opts LookbackLimitOptions) validate() error { - if opts.Limit != nil && *opts.Limit < 0 { - return fmt.Errorf("query limit requires limit >= 0 or nil (%d)", *opts.Limit) + if opts.Limit < 0 { + return fmt.Errorf("query limit requires limit >= 0 (%d)", opts.Limit) } if opts.Lookback <= 0 { return fmt.Errorf("query limit requires lookback > 0 (%d)", opts.Lookback) diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 85c6873f15..93616d4131 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -50,15 +50,15 @@ func testQueryLimitOptions( func TestQueryLimits(t *testing.T) { l := int64(1) docOpts := LookbackLimitOptions{ - Limit: &l, + Limit: l, Lookback: time.Second, } bytesOpts := LookbackLimitOptions{ - Limit: &l, + Limit: l, Lookback: time.Second, } seriesOpts := LookbackLimitOptions{ - Limit: &l, + Limit: l, Lookback: time.Second, } opts := testQueryLimitOptions(docOpts, bytesOpts, seriesOpts, instrument.NewOptions()) @@ -111,19 +111,21 @@ func TestQueryLimits(t *testing.T) { func TestLookbackLimit(t *testing.T) { for _, test := range []struct { - name string - limit *int64 + name string + limit int64 + forceExceeded bool }{ - {name: "no limit", limit: nil}, - {name: "zero limit", limit: prt(0)}, - {name: "limit", limit: prt(5)}, + {name: "no limit", limit: 0}, + {name: "limit", limit: 5}, + {name: "force exceeded limit", limit: 5, forceExceeded: true}, } { t.Run(test.name, func(t *testing.T) { scope := tally.NewTestScope("", nil) iOpts := instrument.NewOptions().SetMetricsScope(scope) opts := LookbackLimitOptions{ - Limit: test.limit, - Lookback: time.Millisecond * 100, + Limit: test.limit, + Lookback: time.Millisecond * 100, + ForceExceeded: test.forceExceeded, } name := "test" limit := newLookbackLimit(iOpts, opts, name, &sourceLoggerBuilder{}) @@ -132,7 +134,7 @@ func TestLookbackLimit(t *testing.T) { var exceededCount int64 err := limit.exceeded() - if test.limit == nil || *test.limit > 0 { + if test.limit >= 0 && !test.forceExceeded { require.NoError(t, err) } else { require.Error(t, err) @@ -140,19 +142,19 @@ func TestLookbackLimit(t *testing.T) { } // Validate ascending while checking limits. - exceededCount += verifyLimit(t, limit, 3, test.limit) + exceededCount += verifyLimit(t, limit, 3, test.limit, test.forceExceeded) require.Equal(t, int64(3), limit.current()) verifyMetrics(t, scope, name, 3, 0, 3, exceededCount) - exceededCount += verifyLimit(t, limit, 2, test.limit) + exceededCount += verifyLimit(t, limit, 2, test.limit, test.forceExceeded) require.Equal(t, int64(5), limit.current()) verifyMetrics(t, scope, name, 5, 0, 5, exceededCount) - exceededCount += verifyLimit(t, limit, 1, test.limit) + exceededCount += verifyLimit(t, limit, 1, test.limit, test.forceExceeded) require.Equal(t, int64(6), limit.current()) verifyMetrics(t, scope, name, 6, 0, 6, exceededCount) - exceededCount += verifyLimit(t, limit, 4, test.limit) + exceededCount += verifyLimit(t, limit, 4, test.limit, test.forceExceeded) require.Equal(t, int64(10), limit.current()) verifyMetrics(t, scope, name, 10, 0, 10, exceededCount) @@ -162,11 +164,11 @@ func TestLookbackLimit(t *testing.T) { verifyMetrics(t, scope, name, 0, 10, 10, exceededCount) // Validate ascending again post-reset. - exceededCount += verifyLimit(t, limit, 2, test.limit) + exceededCount += verifyLimit(t, limit, 2, test.limit, test.forceExceeded) require.Equal(t, int64(2), limit.current()) verifyMetrics(t, scope, name, 2, 10, 12, exceededCount) - exceededCount += verifyLimit(t, limit, 5, test.limit) + exceededCount += verifyLimit(t, limit, 5, test.limit, test.forceExceeded) require.Equal(t, int64(7), limit.current()) verifyMetrics(t, scope, name, 7, 10, 17, exceededCount) @@ -184,34 +186,34 @@ func TestLookbackLimit(t *testing.T) { limit.reset() - opts.Limit = prt(0) + opts.Limit = 0 require.NoError(t, limit.Update(opts)) - exceededCount += verifyLimit(t, limit, 0, opts.Limit) + exceededCount += verifyLimit(t, limit, 0, opts.Limit, test.forceExceeded) require.Equal(t, int64(0), limit.current()) - opts.Limit = prt(2) + opts.Limit = 2 require.NoError(t, limit.Update(opts)) - exceededCount += verifyLimit(t, limit, 1, opts.Limit) + exceededCount += verifyLimit(t, limit, 1, opts.Limit, test.forceExceeded) require.Equal(t, int64(1), limit.current()) verifyMetrics(t, scope, name, 1, 0, 18, exceededCount) - exceededCount += verifyLimit(t, limit, 1, opts.Limit) + exceededCount += verifyLimit(t, limit, 1, opts.Limit, test.forceExceeded) require.Equal(t, int64(2), limit.current()) verifyMetrics(t, scope, name, 2, 0, 19, exceededCount) - exceededCount += verifyLimit(t, limit, 1, opts.Limit) + exceededCount += verifyLimit(t, limit, 1, opts.Limit, test.forceExceeded) require.Equal(t, int64(3), limit.current()) verifyMetrics(t, scope, name, 3, 0, 20, exceededCount) }) } } -func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int64) int64 { +func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int64, forceExceeded bool) int64 { var exceededCount int64 err := limit.Inc(inc, nil) - if expectedLimit == nil || (limit.current() < *expectedLimit && *expectedLimit != 0) { + if (limit.options.Limit == 0 || limit.current() < expectedLimit) && !forceExceeded { require.NoError(t, err) } else { require.Error(t, err) @@ -221,7 +223,7 @@ func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit *int } err = limit.exceeded() - if expectedLimit == nil || (limit.current() < *expectedLimit && *expectedLimit != 0) { + if (limit.options.Limit == 0 || limit.current() < expectedLimit) && !forceExceeded { require.NoError(t, err) } else { require.Error(t, err) @@ -236,7 +238,7 @@ func TestLookbackReset(t *testing.T) { scope := tally.NewTestScope("", nil) iOpts := instrument.NewOptions().SetMetricsScope(scope) opts := LookbackLimitOptions{ - Limit: prt(5), + Limit: 5, Lookback: time.Millisecond * 100, } name := "test" @@ -294,7 +296,7 @@ func TestValidateLookbackLimitOptions(t *testing.T) { } { t.Run(test.name, func(t *testing.T) { err := LookbackLimitOptions{ - Limit: prt(test.max), + Limit: test.max, Lookback: test.lookback, }.validate() if test.expectError { @@ -347,7 +349,7 @@ func TestSourceLogger(t *testing.T) { scope = tally.NewTestScope("test", nil) iOpts = instrument.NewOptions().SetMetricsScope(scope) noLimit = LookbackLimitOptions{ - Limit: nil, + Limit: 0, Lookback: time.Millisecond * 100, } @@ -371,10 +373,6 @@ func TestSourceLogger(t *testing.T) { }, builder.records) } -func prt(i int64) *int64 { - return &i -} - // NB: creates test logger records that share an underlying record set, // differentiated by source logger name. type testBuilder struct { diff --git a/src/dbnode/storage/limits/types.go b/src/dbnode/storage/limits/types.go index db7522207f..22b46b4b57 100644 --- a/src/dbnode/storage/limits/types.go +++ b/src/dbnode/storage/limits/types.go @@ -63,9 +63,12 @@ type LookbackLimit interface { // LookbackLimitOptions holds options for a lookback limit to be enforced. type LookbackLimitOptions struct { // Limit past which errors will be returned. - Limit *int64 + // Zero disables the limit. + Limit int64 // Lookback is the period over which the limit is enforced. Lookback time.Duration + // ForceExceeded, if true, makes all calls to the limit behave as though the limit is exceeded. + ForceExceeded bool } // SourceLoggerBuilder builds a SourceLogger given instrument options. From a86d55f5af183613094e1a425dab3ed47dd51119 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 18:44:42 -0500 Subject: [PATCH 47/80] More build fixes --- src/cluster/generated/proto/kvpb/kv.pb.go | 86 +++- src/cluster/generated/proto/kvpb/kv.proto | 1 + src/dbnode/server/server.go | 20 +- src/dbnode/storage/limits/query_limits.go | 8 +- src/m3em/generated/proto/m3em/m3em_mock.go | 556 --------------------- 5 files changed, 78 insertions(+), 593 deletions(-) delete mode 100644 src/m3em/generated/proto/m3em/m3em_mock.go diff --git a/src/cluster/generated/proto/kvpb/kv.pb.go b/src/cluster/generated/proto/kvpb/kv.pb.go index 47238df397..7d9e488642 100644 --- a/src/cluster/generated/proto/kvpb/kv.pb.go +++ b/src/cluster/generated/proto/kvpb/kv.pb.go @@ -151,6 +151,7 @@ func (m *QueryLimits) GetBytesReadFromDisk() *QueryLimit { type QueryLimit struct { Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` LookbackSeconds int64 `protobuf:"varint,2,opt,name=lookbackSeconds,proto3" json:"lookbackSeconds,omitempty"` + ForceExceeded bool `protobuf:"varint,3,opt,name=forceExceeded,proto3" json:"forceExceeded,omitempty"` } func (m *QueryLimit) Reset() { *m = QueryLimit{} } @@ -172,6 +173,13 @@ func (m *QueryLimit) GetLookbackSeconds() int64 { return 0 } +func (m *QueryLimit) GetForceExceeded() bool { + if m != nil { + return m.ForceExceeded + } + return false +} + func init() { proto.RegisterType((*KeyValueUpdate)(nil), "kvpb.KeyValueUpdate") proto.RegisterType((*KeyValueUpdateResult)(nil), "kvpb.KeyValueUpdateResult") @@ -327,6 +335,16 @@ func (m *QueryLimit) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintKv(dAtA, i, uint64(m.LookbackSeconds)) } + if m.ForceExceeded { + dAtA[i] = 0x18 + i++ + if m.ForceExceeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -401,6 +419,9 @@ func (m *QueryLimit) Size() (n int) { if m.LookbackSeconds != 0 { n += 1 + sovKv(uint64(m.LookbackSeconds)) } + if m.ForceExceeded { + n += 2 + } return n } @@ -898,6 +919,26 @@ func (m *QueryLimit) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceExceeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ForceExceeded = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKv(dAtA[iNdEx:]) @@ -1029,26 +1070,27 @@ func init() { } var fileDescriptorKv = []byte{ - // 329 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4a, 0xeb, 0x40, - 0x14, 0x86, 0x6f, 0x6e, 0xee, 0x2d, 0xb7, 0xa7, 0x70, 0x8d, 0x43, 0x91, 0xae, 0x42, 0xc9, 0xaa, - 0xab, 0x0c, 0xb4, 0x5b, 0x11, 0x29, 0xc5, 0x8d, 0x15, 0x74, 0x8a, 0xee, 0x93, 0x99, 0x43, 0x1b, - 0x92, 0xe9, 0x94, 0x99, 0x49, 0x25, 0x6f, 0xe1, 0x3b, 0xb9, 0x71, 0xe9, 0x23, 0x48, 0x7d, 0x11, - 0x99, 0x69, 0xa1, 0x5a, 0xeb, 0xee, 0xff, 0x0f, 0x5f, 0xbe, 0x9c, 0x99, 0x81, 0xf3, 0x79, 0x61, - 0x17, 0x75, 0x9e, 0x72, 0x25, 0xa9, 0x1c, 0x89, 0x9c, 0xca, 0x11, 0x35, 0x9a, 0x53, 0x5e, 0xd5, - 0xc6, 0xa2, 0xa6, 0x73, 0x5c, 0xa2, 0xce, 0x2c, 0x0a, 0xba, 0xd2, 0xca, 0x2a, 0x5a, 0xae, 0x57, - 0x39, 0x2d, 0xd7, 0xa9, 0x6f, 0xe4, 0x8f, 0xab, 0xc9, 0x2d, 0xfc, 0xbf, 0xc6, 0xe6, 0x21, 0xab, - 0x6a, 0xbc, 0x5f, 0x89, 0xcc, 0x22, 0x89, 0x20, 0x2c, 0xb1, 0xe9, 0x05, 0xfd, 0x60, 0xd0, 0x66, - 0x2e, 0x92, 0x2e, 0xfc, 0x5d, 0x3b, 0xa0, 0xf7, 0xdb, 0xcf, 0xb6, 0x85, 0x9c, 0x41, 0x8b, 0x2b, - 0x29, 0x0b, 0xdb, 0x0b, 0xfb, 0xc1, 0xe0, 0x1f, 0xdb, 0xb5, 0x64, 0x0a, 0xdd, 0xaf, 0x46, 0x86, - 0xa6, 0xae, 0xec, 0x11, 0x6f, 0x04, 0xa1, 0xaa, 0xc4, 0xce, 0xea, 0xa2, 0x9b, 0x2c, 0xf1, 0xd1, - 0x0b, 0xdb, 0xcc, 0xc5, 0xe4, 0x39, 0x80, 0xce, 0x5d, 0x8d, 0xba, 0x99, 0x16, 0xb2, 0xb0, 0x86, - 0x0c, 0xa1, 0x33, 0x51, 0xdc, 0xdc, 0x64, 0x96, 0x2f, 0x50, 0x78, 0x5b, 0x67, 0x18, 0xa5, 0xee, - 0x2c, 0xe9, 0x9e, 0x63, 0x9f, 0x21, 0x72, 0x09, 0x64, 0x86, 0xba, 0x40, 0xc3, 0x30, 0x13, 0x57, - 0x5a, 0xc9, 0x49, 0x61, 0x4a, 0xff, 0xdb, 0x63, 0x9f, 0x1e, 0x61, 0xc9, 0x05, 0x9c, 0x8e, 0x1b, - 0x7b, 0x20, 0x08, 0x7f, 0x10, 0x7c, 0x47, 0x93, 0x29, 0xc0, 0x1e, 0x70, 0xf7, 0x59, 0xb9, 0xe0, - 0xb7, 0x0f, 0xd9, 0xb6, 0x90, 0x01, 0x9c, 0x54, 0x4a, 0x95, 0x79, 0xc6, 0xcb, 0x19, 0x72, 0xb5, - 0x14, 0xc6, 0xaf, 0x18, 0xb2, 0xc3, 0xf1, 0x38, 0x7a, 0xd9, 0xc4, 0xc1, 0xeb, 0x26, 0x0e, 0xde, - 0x36, 0x71, 0xf0, 0xf4, 0x1e, 0xff, 0xca, 0x5b, 0xfe, 0x49, 0x47, 0x1f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xe6, 0xe7, 0x13, 0x0d, 0x12, 0x02, 0x00, 0x00, + // 347 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xe3, 0x30, + 0x14, 0x45, 0x27, 0x93, 0x99, 0x6a, 0xfa, 0xaa, 0x99, 0xc9, 0x58, 0xd5, 0xa8, 0xab, 0xa8, 0x8a, + 0x58, 0x74, 0x15, 0x4b, 0xed, 0x16, 0x21, 0x54, 0x15, 0x36, 0x14, 0x09, 0x5c, 0xc1, 0x3e, 0xb1, + 0x1f, 0x6d, 0x94, 0xa4, 0xae, 0x6c, 0xa7, 0x90, 0xbf, 0xe0, 0x9f, 0xd8, 0xb0, 0xe4, 0x13, 0x50, + 0xf9, 0x11, 0x64, 0xb7, 0x52, 0x69, 0x29, 0xbb, 0x7b, 0xaf, 0xae, 0x8f, 0x9f, 0xfd, 0xe0, 0x78, + 0x9a, 0x99, 0x59, 0x95, 0xc6, 0x5c, 0x96, 0xb4, 0x1c, 0x88, 0x94, 0x96, 0x03, 0xaa, 0x15, 0xa7, + 0xbc, 0xa8, 0xb4, 0x41, 0x45, 0xa7, 0x38, 0x47, 0x95, 0x18, 0x14, 0x74, 0xa1, 0xa4, 0x91, 0x34, + 0x5f, 0x2e, 0x52, 0x9a, 0x2f, 0x63, 0xe7, 0xc8, 0x0f, 0x6b, 0xa3, 0x2b, 0xf8, 0x73, 0x81, 0xf5, + 0x6d, 0x52, 0x54, 0x78, 0xb3, 0x10, 0x89, 0x41, 0x12, 0x80, 0x9f, 0x63, 0xdd, 0xf1, 0xba, 0x5e, + 0xaf, 0xc9, 0xac, 0x24, 0x6d, 0xf8, 0xb9, 0xb4, 0x85, 0xce, 0x77, 0x97, 0xad, 0x0d, 0xf9, 0x0f, + 0x0d, 0x2e, 0xcb, 0x32, 0x33, 0x1d, 0xbf, 0xeb, 0xf5, 0x7e, 0xb1, 0x8d, 0x8b, 0xc6, 0xd0, 0xde, + 0x25, 0x32, 0xd4, 0x55, 0x61, 0x0e, 0x70, 0x03, 0xf0, 0x65, 0x21, 0x36, 0x54, 0x2b, 0x6d, 0x32, + 0xc7, 0x7b, 0x07, 0x6c, 0x32, 0x2b, 0xa3, 0x27, 0x0f, 0x5a, 0xd7, 0x15, 0xaa, 0x7a, 0x9c, 0x95, + 0x99, 0xd1, 0xa4, 0x0f, 0xad, 0x91, 0xe4, 0xfa, 0x32, 0x31, 0x7c, 0x86, 0xc2, 0xd1, 0x5a, 0xfd, + 0x20, 0xb6, 0x6f, 0x89, 0xb7, 0x3d, 0xf6, 0xb1, 0x44, 0x4e, 0x81, 0x4c, 0x50, 0x65, 0xa8, 0x19, + 0x26, 0xe2, 0x5c, 0xc9, 0x72, 0x94, 0xe9, 0xdc, 0x5d, 0x7b, 0xe8, 0xe8, 0x81, 0x2e, 0x39, 0x81, + 0x7f, 0xc3, 0xda, 0xec, 0x01, 0xfc, 0x2f, 0x00, 0x9f, 0xab, 0x91, 0x02, 0xd8, 0x16, 0xec, 0x7f, + 0x16, 0x56, 0xb8, 0xe9, 0x7d, 0xb6, 0x36, 0xa4, 0x07, 0x7f, 0x0b, 0x29, 0xf3, 0x34, 0xe1, 0xf9, + 0x04, 0xb9, 0x9c, 0x0b, 0xed, 0x46, 0xf4, 0xd9, 0x7e, 0x4c, 0x8e, 0xe0, 0xf7, 0x9d, 0x54, 0x1c, + 0xcf, 0x1e, 0x38, 0xa2, 0x40, 0xb1, 0x59, 0xc0, 0x6e, 0x38, 0x0c, 0x9e, 0x57, 0xa1, 0xf7, 0xb2, + 0x0a, 0xbd, 0xd7, 0x55, 0xe8, 0x3d, 0xbe, 0x85, 0xdf, 0xd2, 0x86, 0x5b, 0xfc, 0xe0, 0x3d, 0x00, + 0x00, 0xff, 0xff, 0xd3, 0x2a, 0x27, 0x0a, 0x38, 0x02, 0x00, 0x00, } diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto index 2623ae7d62..5b658422ba 100644 --- a/src/cluster/generated/proto/kvpb/kv.proto +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -42,6 +42,7 @@ message QueryLimits { message QueryLimit { int64 limit = 1; int64 lookbackSeconds = 2; + bool forceExceeded = 3; } diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index bc9d329fd6..dcf494b73a 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -453,21 +453,15 @@ func Run(runOpts RunOptions) { bytesReadLimit := limits.DefaultLookbackLimitOptions() diskSeriesReadLimit := limits.DefaultLookbackLimitOptions() if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesBlocks; limitConfig != nil { - if limitConfig.Value != 0 { - docsLimit.Limit = &limitConfig.Value - } + docsLimit.Limit = limitConfig.Value docsLimit.Lookback = limitConfig.Lookback } if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskBytesRead; limitConfig != nil { - if limitConfig.Value != 0 { - bytesReadLimit.Limit = &limitConfig.Value - } + bytesReadLimit.Limit = limitConfig.Value bytesReadLimit.Lookback = limitConfig.Lookback } if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskRead; limitConfig != nil { - if limitConfig.Value != 0 { - diskSeriesReadLimit.Limit = &limitConfig.Value - } + diskSeriesReadLimit.Limit = limitConfig.Value diskSeriesReadLimit.Lookback = limitConfig.Lookback } limitOpts := limits.NewOptions(). @@ -1233,12 +1227,14 @@ func updateQueryLimit(logger *zap.Logger, ) error { limitOpts := limits.LookbackLimitOptions{ // If the settings are nil, then that means the limit is disabled. - Limit: nil, - Lookback: limit.Options().Lookback, + Limit: limits.DisabledLimitValue, + Lookback: limit.Options().Lookback, + ForceExceeded: false, } if settings != nil { - limitOpts.Limit = &settings.Limit + limitOpts.Limit = settings.Limit limitOpts.Lookback = time.Second * time.Duration(settings.LookbackSeconds) + limitOpts.ForceExceeded = settings.ForceExceeded } return limit.Update(limitOpts) } diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index c86f2f3a97..4199576bf9 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -34,7 +34,9 @@ import ( ) const ( - disabledLimitValue = 0 + // DisabledLimitValue is the value, when set to + // a limit, disables the enforcement of that limit. + DisabledLimitValue = 0 defaultLookback = time.Second * 15 ) @@ -75,7 +77,7 @@ var ( func DefaultLookbackLimitOptions() LookbackLimitOptions { return LookbackLimitOptions{ // Default to no limit. - Limit: disabledLimitValue, + Limit: DisabledLimitValue, Lookback: defaultLookback, } } @@ -250,7 +252,7 @@ func (q *lookbackLimit) checkLimit(recent int64) error { "query aborted due to forced limit: name=%s", q.name))) } - if currentOpts.Limit == disabledLimitValue { + if currentOpts.Limit == DisabledLimitValue { return nil } diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go deleted file mode 100644 index 24d6d67813..0000000000 --- a/src/m3em/generated/proto/m3em/m3em_mock.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer) - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package m3em is a generated GoMock package. -package m3em - -import ( - "context" - "reflect" - - "github.com/golang/mock/gomock" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// MockOperatorClient is a mock of OperatorClient interface -type MockOperatorClient struct { - ctrl *gomock.Controller - recorder *MockOperatorClientMockRecorder -} - -// MockOperatorClientMockRecorder is the mock recorder for MockOperatorClient -type MockOperatorClientMockRecorder struct { - mock *MockOperatorClient -} - -// NewMockOperatorClient creates a new mock instance -func NewMockOperatorClient(ctrl *gomock.Controller) *MockOperatorClient { - mock := &MockOperatorClient{ctrl: ctrl} - mock.recorder = &MockOperatorClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperatorClient) EXPECT() *MockOperatorClientMockRecorder { - return m.recorder -} - -// PullFile mocks base method -func (m *MockOperatorClient) PullFile(arg0 context.Context, arg1 *PullFileRequest, arg2 ...grpc.CallOption) (Operator_PullFileClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PullFile", varargs...) - ret0, _ := ret[0].(Operator_PullFileClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PullFile indicates an expected call of PullFile -func (mr *MockOperatorClientMockRecorder) PullFile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullFile", reflect.TypeOf((*MockOperatorClient)(nil).PullFile), varargs...) -} - -// PushFile mocks base method -func (m *MockOperatorClient) PushFile(arg0 context.Context, arg1 ...grpc.CallOption) (Operator_PushFileClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PushFile", varargs...) - ret0, _ := ret[0].(Operator_PushFileClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PushFile indicates an expected call of PushFile -func (mr *MockOperatorClientMockRecorder) PushFile(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushFile", reflect.TypeOf((*MockOperatorClient)(nil).PushFile), varargs...) -} - -// Setup mocks base method -func (m *MockOperatorClient) Setup(arg0 context.Context, arg1 *SetupRequest, arg2 ...grpc.CallOption) (*SetupResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Setup", varargs...) - ret0, _ := ret[0].(*SetupResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Setup indicates an expected call of Setup -func (mr *MockOperatorClientMockRecorder) Setup(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockOperatorClient)(nil).Setup), varargs...) -} - -// Start mocks base method -func (m *MockOperatorClient) Start(arg0 context.Context, arg1 *StartRequest, arg2 ...grpc.CallOption) (*StartResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Start", varargs...) - ret0, _ := ret[0].(*StartResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Start indicates an expected call of Start -func (mr *MockOperatorClientMockRecorder) Start(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorClient)(nil).Start), varargs...) -} - -// Stop mocks base method -func (m *MockOperatorClient) Stop(arg0 context.Context, arg1 *StopRequest, arg2 ...grpc.CallOption) (*StopResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*StopResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stop indicates an expected call of Stop -func (mr *MockOperatorClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorClient)(nil).Stop), varargs...) -} - -// Teardown mocks base method -func (m *MockOperatorClient) Teardown(arg0 context.Context, arg1 *TeardownRequest, arg2 ...grpc.CallOption) (*TeardownResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Teardown", varargs...) - ret0, _ := ret[0].(*TeardownResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Teardown indicates an expected call of Teardown -func (mr *MockOperatorClientMockRecorder) Teardown(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Teardown", reflect.TypeOf((*MockOperatorClient)(nil).Teardown), varargs...) -} - -// MockOperator_PushFileClient is a mock of Operator_PushFileClient interface -type MockOperator_PushFileClient struct { - ctrl *gomock.Controller - recorder *MockOperator_PushFileClientMockRecorder -} - -// MockOperator_PushFileClientMockRecorder is the mock recorder for MockOperator_PushFileClient -type MockOperator_PushFileClientMockRecorder struct { - mock *MockOperator_PushFileClient -} - -// NewMockOperator_PushFileClient creates a new mock instance -func NewMockOperator_PushFileClient(ctrl *gomock.Controller) *MockOperator_PushFileClient { - mock := &MockOperator_PushFileClient{ctrl: ctrl} - mock.recorder = &MockOperator_PushFileClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PushFileClient) EXPECT() *MockOperator_PushFileClientMockRecorder { - return m.recorder -} - -// CloseAndRecv mocks base method -func (m *MockOperator_PushFileClient) CloseAndRecv() (*PushFileResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseAndRecv") - ret0, _ := ret[0].(*PushFileResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CloseAndRecv indicates an expected call of CloseAndRecv -func (mr *MockOperator_PushFileClientMockRecorder) CloseAndRecv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseAndRecv)) -} - -// CloseSend mocks base method -func (m *MockOperator_PushFileClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend -func (mr *MockOperator_PushFileClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseSend)) -} - -// Context mocks base method -func (m *MockOperator_PushFileClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PushFileClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Context)) -} - -// Header mocks base method -func (m *MockOperator_PushFileClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header -func (mr *MockOperator_PushFileClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Header)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PushFileClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PushFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).RecvMsg), arg0) -} - -// Send mocks base method -func (m *MockOperator_PushFileClient) Send(arg0 *PushFileRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send -func (mr *MockOperator_PushFileClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Send), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PushFileClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PushFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method -func (m *MockOperator_PushFileClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer -func (mr *MockOperator_PushFileClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Trailer)) -} - -// MockOperator_PullFileClient is a mock of Operator_PullFileClient interface -type MockOperator_PullFileClient struct { - ctrl *gomock.Controller - recorder *MockOperator_PullFileClientMockRecorder -} - -// MockOperator_PullFileClientMockRecorder is the mock recorder for MockOperator_PullFileClient -type MockOperator_PullFileClientMockRecorder struct { - mock *MockOperator_PullFileClient -} - -// NewMockOperator_PullFileClient creates a new mock instance -func NewMockOperator_PullFileClient(ctrl *gomock.Controller) *MockOperator_PullFileClient { - mock := &MockOperator_PullFileClient{ctrl: ctrl} - mock.recorder = &MockOperator_PullFileClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PullFileClient) EXPECT() *MockOperator_PullFileClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method -func (m *MockOperator_PullFileClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend -func (mr *MockOperator_PullFileClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PullFileClient)(nil).CloseSend)) -} - -// Context mocks base method -func (m *MockOperator_PullFileClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PullFileClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Context)) -} - -// Header mocks base method -func (m *MockOperator_PullFileClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header -func (mr *MockOperator_PullFileClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Header)) -} - -// Recv mocks base method -func (m *MockOperator_PullFileClient) Recv() (*PullFileResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*PullFileResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv -func (mr *MockOperator_PullFileClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Recv)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PullFileClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PullFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).RecvMsg), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PullFileClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PullFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method -func (m *MockOperator_PullFileClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer -func (mr *MockOperator_PullFileClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Trailer)) -} - -// MockOperator_PullFileServer is a mock of Operator_PullFileServer interface -type MockOperator_PullFileServer struct { - ctrl *gomock.Controller - recorder *MockOperator_PullFileServerMockRecorder -} - -// MockOperator_PullFileServerMockRecorder is the mock recorder for MockOperator_PullFileServer -type MockOperator_PullFileServerMockRecorder struct { - mock *MockOperator_PullFileServer -} - -// NewMockOperator_PullFileServer creates a new mock instance -func NewMockOperator_PullFileServer(ctrl *gomock.Controller) *MockOperator_PullFileServer { - mock := &MockOperator_PullFileServer{ctrl: ctrl} - mock.recorder = &MockOperator_PullFileServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOperator_PullFileServer) EXPECT() *MockOperator_PullFileServerMockRecorder { - return m.recorder -} - -// Context mocks base method -func (m *MockOperator_PullFileServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockOperator_PullFileServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Context)) -} - -// RecvMsg mocks base method -func (m *MockOperator_PullFileServer) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg -func (mr *MockOperator_PullFileServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).RecvMsg), arg0) -} - -// Send mocks base method -func (m *MockOperator_PullFileServer) Send(arg0 *PullFileResponse) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send -func (mr *MockOperator_PullFileServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Send), arg0) -} - -// SendHeader mocks base method -func (m *MockOperator_PullFileServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader -func (mr *MockOperator_PullFileServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method -func (m *MockOperator_PullFileServer) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg -func (mr *MockOperator_PullFileServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendMsg), arg0) -} - -// SetHeader mocks base method -func (m *MockOperator_PullFileServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader -func (mr *MockOperator_PullFileServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method -func (m *MockOperator_PullFileServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer -func (mr *MockOperator_PullFileServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetTrailer), arg0) -} From 7dc8689975deb8802a56c2120f60b9a5c2b5393f Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 18:49:54 -0500 Subject: [PATCH 48/80] Integration test fix --- src/dbnode/integration/query_limit_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/dbnode/integration/query_limit_test.go b/src/dbnode/integration/query_limit_test.go index 111f9d4a1b..53a00b7665 100644 --- a/src/dbnode/integration/query_limit_test.go +++ b/src/dbnode/integration/query_limit_test.go @@ -87,9 +87,8 @@ func newTestOptionsWithIndexedNamespace(t *testing.T) (TestOptions, namespace.Me func newTestSetupWithQueryLimits(t *testing.T, opts TestOptions) TestSetup { storageLimitsFn := func(storageOpts storage.Options) storage.Options { - limit := int64(1) queryLookback := limits.DefaultLookbackLimitOptions() - queryLookback.Limit = &limit + queryLookback.Limit = 1 queryLookback.Lookback = time.Hour limitOpts := limits.NewOptions(). From 3ec76733c2391c649487f6ab4b44746286909cfc Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:02:44 -0500 Subject: [PATCH 49/80] Add back mock --- src/m3em/generated/proto/m3em/m3em_mock.go | 556 +++++++++++++++++++++ 1 file changed, 556 insertions(+) create mode 100644 src/m3em/generated/proto/m3em/m3em_mock.go diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go new file mode 100644 index 0000000000..24d6d67813 --- /dev/null +++ b/src/m3em/generated/proto/m3em/m3em_mock.go @@ -0,0 +1,556 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer) + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package m3em is a generated GoMock package. +package m3em + +import ( + "context" + "reflect" + + "github.com/golang/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// MockOperatorClient is a mock of OperatorClient interface +type MockOperatorClient struct { + ctrl *gomock.Controller + recorder *MockOperatorClientMockRecorder +} + +// MockOperatorClientMockRecorder is the mock recorder for MockOperatorClient +type MockOperatorClientMockRecorder struct { + mock *MockOperatorClient +} + +// NewMockOperatorClient creates a new mock instance +func NewMockOperatorClient(ctrl *gomock.Controller) *MockOperatorClient { + mock := &MockOperatorClient{ctrl: ctrl} + mock.recorder = &MockOperatorClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperatorClient) EXPECT() *MockOperatorClientMockRecorder { + return m.recorder +} + +// PullFile mocks base method +func (m *MockOperatorClient) PullFile(arg0 context.Context, arg1 *PullFileRequest, arg2 ...grpc.CallOption) (Operator_PullFileClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PullFile", varargs...) + ret0, _ := ret[0].(Operator_PullFileClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PullFile indicates an expected call of PullFile +func (mr *MockOperatorClientMockRecorder) PullFile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullFile", reflect.TypeOf((*MockOperatorClient)(nil).PullFile), varargs...) +} + +// PushFile mocks base method +func (m *MockOperatorClient) PushFile(arg0 context.Context, arg1 ...grpc.CallOption) (Operator_PushFileClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PushFile", varargs...) + ret0, _ := ret[0].(Operator_PushFileClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PushFile indicates an expected call of PushFile +func (mr *MockOperatorClientMockRecorder) PushFile(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushFile", reflect.TypeOf((*MockOperatorClient)(nil).PushFile), varargs...) +} + +// Setup mocks base method +func (m *MockOperatorClient) Setup(arg0 context.Context, arg1 *SetupRequest, arg2 ...grpc.CallOption) (*SetupResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Setup", varargs...) + ret0, _ := ret[0].(*SetupResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Setup indicates an expected call of Setup +func (mr *MockOperatorClientMockRecorder) Setup(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockOperatorClient)(nil).Setup), varargs...) +} + +// Start mocks base method +func (m *MockOperatorClient) Start(arg0 context.Context, arg1 *StartRequest, arg2 ...grpc.CallOption) (*StartResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Start", varargs...) + ret0, _ := ret[0].(*StartResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Start indicates an expected call of Start +func (mr *MockOperatorClientMockRecorder) Start(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockOperatorClient)(nil).Start), varargs...) +} + +// Stop mocks base method +func (m *MockOperatorClient) Stop(arg0 context.Context, arg1 *StopRequest, arg2 ...grpc.CallOption) (*StopResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*StopResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop +func (mr *MockOperatorClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockOperatorClient)(nil).Stop), varargs...) +} + +// Teardown mocks base method +func (m *MockOperatorClient) Teardown(arg0 context.Context, arg1 *TeardownRequest, arg2 ...grpc.CallOption) (*TeardownResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Teardown", varargs...) + ret0, _ := ret[0].(*TeardownResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Teardown indicates an expected call of Teardown +func (mr *MockOperatorClientMockRecorder) Teardown(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Teardown", reflect.TypeOf((*MockOperatorClient)(nil).Teardown), varargs...) +} + +// MockOperator_PushFileClient is a mock of Operator_PushFileClient interface +type MockOperator_PushFileClient struct { + ctrl *gomock.Controller + recorder *MockOperator_PushFileClientMockRecorder +} + +// MockOperator_PushFileClientMockRecorder is the mock recorder for MockOperator_PushFileClient +type MockOperator_PushFileClientMockRecorder struct { + mock *MockOperator_PushFileClient +} + +// NewMockOperator_PushFileClient creates a new mock instance +func NewMockOperator_PushFileClient(ctrl *gomock.Controller) *MockOperator_PushFileClient { + mock := &MockOperator_PushFileClient{ctrl: ctrl} + mock.recorder = &MockOperator_PushFileClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PushFileClient) EXPECT() *MockOperator_PushFileClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method +func (m *MockOperator_PushFileClient) CloseAndRecv() (*PushFileResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*PushFileResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv +func (mr *MockOperator_PushFileClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method +func (m *MockOperator_PushFileClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockOperator_PushFileClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PushFileClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockOperator_PushFileClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PushFileClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockOperator_PushFileClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockOperator_PushFileClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Header)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PushFileClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PushFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockOperator_PushFileClient) Send(arg0 *PushFileRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockOperator_PushFileClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Send), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PushFileClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PushFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PushFileClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockOperator_PushFileClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockOperator_PushFileClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PushFileClient)(nil).Trailer)) +} + +// MockOperator_PullFileClient is a mock of Operator_PullFileClient interface +type MockOperator_PullFileClient struct { + ctrl *gomock.Controller + recorder *MockOperator_PullFileClientMockRecorder +} + +// MockOperator_PullFileClientMockRecorder is the mock recorder for MockOperator_PullFileClient +type MockOperator_PullFileClientMockRecorder struct { + mock *MockOperator_PullFileClient +} + +// NewMockOperator_PullFileClient creates a new mock instance +func NewMockOperator_PullFileClient(ctrl *gomock.Controller) *MockOperator_PullFileClient { + mock := &MockOperator_PullFileClient{ctrl: ctrl} + mock.recorder = &MockOperator_PullFileClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PullFileClient) EXPECT() *MockOperator_PullFileClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method +func (m *MockOperator_PullFileClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockOperator_PullFileClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockOperator_PullFileClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockOperator_PullFileClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PullFileClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockOperator_PullFileClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockOperator_PullFileClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Header)) +} + +// Recv mocks base method +func (m *MockOperator_PullFileClient) Recv() (*PullFileResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*PullFileResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv +func (mr *MockOperator_PullFileClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Recv)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PullFileClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PullFileClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PullFileClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PullFileClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockOperator_PullFileClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockOperator_PullFileClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockOperator_PullFileClient)(nil).Trailer)) +} + +// MockOperator_PullFileServer is a mock of Operator_PullFileServer interface +type MockOperator_PullFileServer struct { + ctrl *gomock.Controller + recorder *MockOperator_PullFileServerMockRecorder +} + +// MockOperator_PullFileServerMockRecorder is the mock recorder for MockOperator_PullFileServer +type MockOperator_PullFileServerMockRecorder struct { + mock *MockOperator_PullFileServer +} + +// NewMockOperator_PullFileServer creates a new mock instance +func NewMockOperator_PullFileServer(ctrl *gomock.Controller) *MockOperator_PullFileServer { + mock := &MockOperator_PullFileServer{ctrl: ctrl} + mock.recorder = &MockOperator_PullFileServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOperator_PullFileServer) EXPECT() *MockOperator_PullFileServerMockRecorder { + return m.recorder +} + +// Context mocks base method +func (m *MockOperator_PullFileServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockOperator_PullFileServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Context)) +} + +// RecvMsg mocks base method +func (m *MockOperator_PullFileServer) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockOperator_PullFileServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockOperator_PullFileServer) Send(arg0 *PullFileResponse) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockOperator_PullFileServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockOperator_PullFileServer)(nil).Send), arg0) +} + +// SendHeader mocks base method +func (m *MockOperator_PullFileServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader +func (mr *MockOperator_PullFileServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method +func (m *MockOperator_PullFileServer) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockOperator_PullFileServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SendMsg), arg0) +} + +// SetHeader mocks base method +func (m *MockOperator_PullFileServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader +func (mr *MockOperator_PullFileServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method +func (m *MockOperator_PullFileServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer +func (mr *MockOperator_PullFileServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockOperator_PullFileServer)(nil).SetTrailer), arg0) +} From 6a8f9ede2190f9577cdca239edbf4e2ca282fac7 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:09:05 -0500 Subject: [PATCH 50/80] PR feedback 1 --- src/dbnode/server/server.go | 25 ++++++++++++++--------- src/dbnode/storage/limits/query_limits.go | 7 +++++++ 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index dcf494b73a..5851214365 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1223,20 +1223,25 @@ func updateQueryLimits(logger *zap.Logger, func updateQueryLimit(logger *zap.Logger, limit limits.LookbackLimit, - settings *kvpb.QueryLimit, + limitOpts *kvpb.QueryLimit, ) error { - limitOpts := limits.LookbackLimitOptions{ + if limitOpts == nil { + return nil + } + + old := limit.Options() + new := limits.LookbackLimitOptions{ // If the settings are nil, then that means the limit is disabled. - Limit: limits.DisabledLimitValue, - Lookback: limit.Options().Lookback, - ForceExceeded: false, + Limit: limitOpts.Limit, + Lookback: time.Second * time.Duration(limitOpts.LookbackSeconds), + ForceExceeded: limitOpts.ForceExceeded, } - if settings != nil { - limitOpts.Limit = settings.Limit - limitOpts.Lookback = time.Second * time.Duration(settings.LookbackSeconds) - limitOpts.ForceExceeded = settings.ForceExceeded + + if old.Equals(new) { + return nil } - return limit.Update(limitOpts) + + return limit.Update(new) } func kvWatchClientConsistencyLevels( diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 4199576bf9..2dc328d31f 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -307,6 +307,13 @@ func (q *lookbackLimit) reset() { q.recent.Store(0) } +// Equals returns true if the other options match the current. +func (opts LookbackLimitOptions) Equals(other LookbackLimitOptions) bool { + return opts.Limit == other.Limit && + opts.Lookback == other.Lookback && + opts.ForceExceeded == other.ForceExceeded +} + func (opts LookbackLimitOptions) validate() error { if opts.Limit < 0 { return fmt.Errorf("query limit requires limit >= 0 (%d)", opts.Limit) From c05b6c9d19ac544cd78c6990587dc62796d6ebc9 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:09:39 -0500 Subject: [PATCH 51/80] Comment --- src/dbnode/server/server.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 5851214365..01751cd338 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1231,7 +1231,6 @@ func updateQueryLimit(logger *zap.Logger, old := limit.Options() new := limits.LookbackLimitOptions{ - // If the settings are nil, then that means the limit is disabled. Limit: limitOpts.Limit, Lookback: time.Second * time.Duration(limitOpts.LookbackSeconds), ForceExceeded: limitOpts.ForceExceeded, From 79cd2e410eb0b0b9af015a1a7b838675d66dfe73 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:17:35 -0500 Subject: [PATCH 52/80] Test fix --- src/dbnode/persist/fs/retriever_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/dbnode/persist/fs/retriever_test.go b/src/dbnode/persist/fs/retriever_test.go index 9335277856..063d81c710 100644 --- a/src/dbnode/persist/fs/retriever_test.go +++ b/src/dbnode/persist/fs/retriever_test.go @@ -815,13 +815,12 @@ func TestBlockRetrieverHandlesSeekByIndexEntryErrors(t *testing.T) { func TestLimitSeriesReadFromDisk(t *testing.T) { scope := tally.NewTestScope("test", nil) - limit := int64(2) limitOpts := limits.NewOptions(). SetInstrumentOptions(instrument.NewOptions().SetMetricsScope(scope)). SetBytesReadLimitOpts(limits.DefaultLookbackLimitOptions()). SetDocsLimitOpts(limits.DefaultLookbackLimitOptions()). SetDiskSeriesReadLimitOpts(limits.LookbackLimitOptions{ - Limit: &limit, + Limit: 2, Lookback: time.Second * 1, }) queryLimits, err := limits.NewQueryLimits(limitOpts) From 0a31c5332f642eed7c6cd24a5f2cfd37d29ea2ab Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:48:41 -0500 Subject: [PATCH 53/80] Cleanup --- src/cluster/generated/proto/kvpb/kv.pb.go | 119 +++++++++--------- src/cluster/generated/proto/kvpb/kv.proto | 6 +- src/dbnode/server/server.go | 9 +- src/query/api/v1/handler/database/common.go | 1 + src/query/api/v1/handler/database/kvstore.go | 9 +- .../api/v1/handler/database/kvstore_test.go | 61 --------- 6 files changed, 70 insertions(+), 135 deletions(-) delete mode 100644 src/query/api/v1/handler/database/kvstore_test.go diff --git a/src/cluster/generated/proto/kvpb/kv.pb.go b/src/cluster/generated/proto/kvpb/kv.pb.go index 7d9e488642..d3c7c43f87 100644 --- a/src/cluster/generated/proto/kvpb/kv.pb.go +++ b/src/cluster/generated/proto/kvpb/kv.pb.go @@ -117,9 +117,9 @@ func (m *KeyValueUpdateResult) GetNew() string { } type QueryLimits struct { - DocsMatched *QueryLimit `protobuf:"bytes,1,opt,name=DocsMatched" json:"DocsMatched,omitempty"` - SeriesReadFromDisk *QueryLimit `protobuf:"bytes,2,opt,name=SeriesReadFromDisk" json:"SeriesReadFromDisk,omitempty"` - BytesReadFromDisk *QueryLimit `protobuf:"bytes,3,opt,name=BytesReadFromDisk" json:"BytesReadFromDisk,omitempty"` + MaxRecentlyQueriedSeriesBlocks *QueryLimit `protobuf:"bytes,1,opt,name=maxRecentlyQueriedSeriesBlocks" json:"maxRecentlyQueriedSeriesBlocks,omitempty"` + MaxRecentlyQueriedSeriesDiskBytesRead *QueryLimit `protobuf:"bytes,2,opt,name=maxRecentlyQueriedSeriesDiskBytesRead" json:"maxRecentlyQueriedSeriesDiskBytesRead,omitempty"` + MaxRecentlyQueriedSeriesDiskRead *QueryLimit `protobuf:"bytes,3,opt,name=maxRecentlyQueriedSeriesDiskRead" json:"maxRecentlyQueriedSeriesDiskRead,omitempty"` } func (m *QueryLimits) Reset() { *m = QueryLimits{} } @@ -127,23 +127,23 @@ func (m *QueryLimits) String() string { return proto.CompactTextStrin func (*QueryLimits) ProtoMessage() {} func (*QueryLimits) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{2} } -func (m *QueryLimits) GetDocsMatched() *QueryLimit { +func (m *QueryLimits) GetMaxRecentlyQueriedSeriesBlocks() *QueryLimit { if m != nil { - return m.DocsMatched + return m.MaxRecentlyQueriedSeriesBlocks } return nil } -func (m *QueryLimits) GetSeriesReadFromDisk() *QueryLimit { +func (m *QueryLimits) GetMaxRecentlyQueriedSeriesDiskBytesRead() *QueryLimit { if m != nil { - return m.SeriesReadFromDisk + return m.MaxRecentlyQueriedSeriesDiskBytesRead } return nil } -func (m *QueryLimits) GetBytesReadFromDisk() *QueryLimit { +func (m *QueryLimits) GetMaxRecentlyQueriedSeriesDiskRead() *QueryLimit { if m != nil { - return m.BytesReadFromDisk + return m.MaxRecentlyQueriedSeriesDiskRead } return nil } @@ -277,31 +277,31 @@ func (m *QueryLimits) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.DocsMatched != nil { + if m.MaxRecentlyQueriedSeriesBlocks != nil { dAtA[i] = 0xa i++ - i = encodeVarintKv(dAtA, i, uint64(m.DocsMatched.Size())) - n1, err := m.DocsMatched.MarshalTo(dAtA[i:]) + i = encodeVarintKv(dAtA, i, uint64(m.MaxRecentlyQueriedSeriesBlocks.Size())) + n1, err := m.MaxRecentlyQueriedSeriesBlocks.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } - if m.SeriesReadFromDisk != nil { + if m.MaxRecentlyQueriedSeriesDiskBytesRead != nil { dAtA[i] = 0x12 i++ - i = encodeVarintKv(dAtA, i, uint64(m.SeriesReadFromDisk.Size())) - n2, err := m.SeriesReadFromDisk.MarshalTo(dAtA[i:]) + i = encodeVarintKv(dAtA, i, uint64(m.MaxRecentlyQueriedSeriesDiskBytesRead.Size())) + n2, err := m.MaxRecentlyQueriedSeriesDiskBytesRead.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } - if m.BytesReadFromDisk != nil { + if m.MaxRecentlyQueriedSeriesDiskRead != nil { dAtA[i] = 0x1a i++ - i = encodeVarintKv(dAtA, i, uint64(m.BytesReadFromDisk.Size())) - n3, err := m.BytesReadFromDisk.MarshalTo(dAtA[i:]) + i = encodeVarintKv(dAtA, i, uint64(m.MaxRecentlyQueriedSeriesDiskRead.Size())) + n3, err := m.MaxRecentlyQueriedSeriesDiskRead.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -395,16 +395,16 @@ func (m *KeyValueUpdateResult) Size() (n int) { func (m *QueryLimits) Size() (n int) { var l int _ = l - if m.DocsMatched != nil { - l = m.DocsMatched.Size() + if m.MaxRecentlyQueriedSeriesBlocks != nil { + l = m.MaxRecentlyQueriedSeriesBlocks.Size() n += 1 + l + sovKv(uint64(l)) } - if m.SeriesReadFromDisk != nil { - l = m.SeriesReadFromDisk.Size() + if m.MaxRecentlyQueriedSeriesDiskBytesRead != nil { + l = m.MaxRecentlyQueriedSeriesDiskBytesRead.Size() n += 1 + l + sovKv(uint64(l)) } - if m.BytesReadFromDisk != nil { - l = m.BytesReadFromDisk.Size() + if m.MaxRecentlyQueriedSeriesDiskRead != nil { + l = m.MaxRecentlyQueriedSeriesDiskRead.Size() n += 1 + l + sovKv(uint64(l)) } return n @@ -734,7 +734,7 @@ func (m *QueryLimits) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocsMatched", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxRecentlyQueriedSeriesBlocks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -758,16 +758,16 @@ func (m *QueryLimits) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DocsMatched == nil { - m.DocsMatched = &QueryLimit{} + if m.MaxRecentlyQueriedSeriesBlocks == nil { + m.MaxRecentlyQueriedSeriesBlocks = &QueryLimit{} } - if err := m.DocsMatched.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxRecentlyQueriedSeriesBlocks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeriesReadFromDisk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxRecentlyQueriedSeriesDiskBytesRead", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -791,16 +791,16 @@ func (m *QueryLimits) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SeriesReadFromDisk == nil { - m.SeriesReadFromDisk = &QueryLimit{} + if m.MaxRecentlyQueriedSeriesDiskBytesRead == nil { + m.MaxRecentlyQueriedSeriesDiskBytesRead = &QueryLimit{} } - if err := m.SeriesReadFromDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxRecentlyQueriedSeriesDiskBytesRead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BytesReadFromDisk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxRecentlyQueriedSeriesDiskRead", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -824,10 +824,10 @@ func (m *QueryLimits) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BytesReadFromDisk == nil { - m.BytesReadFromDisk = &QueryLimit{} + if m.MaxRecentlyQueriedSeriesDiskRead == nil { + m.MaxRecentlyQueriedSeriesDiskRead = &QueryLimit{} } - if err := m.BytesReadFromDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxRecentlyQueriedSeriesDiskRead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1070,27 +1070,28 @@ func init() { } var fileDescriptorKv = []byte{ - // 347 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xe3, 0x30, - 0x14, 0x45, 0x27, 0x93, 0x99, 0x6a, 0xfa, 0xaa, 0x99, 0xc9, 0x58, 0xd5, 0xa8, 0xab, 0xa8, 0x8a, - 0x58, 0x74, 0x15, 0x4b, 0xed, 0x16, 0x21, 0x54, 0x15, 0x36, 0x14, 0x09, 0x5c, 0xc1, 0x3e, 0xb1, - 0x1f, 0x6d, 0x94, 0xa4, 0xae, 0x6c, 0xa7, 0x90, 0xbf, 0xe0, 0x9f, 0xd8, 0xb0, 0xe4, 0x13, 0x50, - 0xf9, 0x11, 0x64, 0xb7, 0x52, 0x69, 0x29, 0xbb, 0x7b, 0xaf, 0xae, 0x8f, 0x9f, 0xfd, 0xe0, 0x78, - 0x9a, 0x99, 0x59, 0x95, 0xc6, 0x5c, 0x96, 0xb4, 0x1c, 0x88, 0x94, 0x96, 0x03, 0xaa, 0x15, 0xa7, - 0xbc, 0xa8, 0xb4, 0x41, 0x45, 0xa7, 0x38, 0x47, 0x95, 0x18, 0x14, 0x74, 0xa1, 0xa4, 0x91, 0x34, - 0x5f, 0x2e, 0x52, 0x9a, 0x2f, 0x63, 0xe7, 0xc8, 0x0f, 0x6b, 0xa3, 0x2b, 0xf8, 0x73, 0x81, 0xf5, - 0x6d, 0x52, 0x54, 0x78, 0xb3, 0x10, 0x89, 0x41, 0x12, 0x80, 0x9f, 0x63, 0xdd, 0xf1, 0xba, 0x5e, - 0xaf, 0xc9, 0xac, 0x24, 0x6d, 0xf8, 0xb9, 0xb4, 0x85, 0xce, 0x77, 0x97, 0xad, 0x0d, 0xf9, 0x0f, - 0x0d, 0x2e, 0xcb, 0x32, 0x33, 0x1d, 0xbf, 0xeb, 0xf5, 0x7e, 0xb1, 0x8d, 0x8b, 0xc6, 0xd0, 0xde, - 0x25, 0x32, 0xd4, 0x55, 0x61, 0x0e, 0x70, 0x03, 0xf0, 0x65, 0x21, 0x36, 0x54, 0x2b, 0x6d, 0x32, - 0xc7, 0x7b, 0x07, 0x6c, 0x32, 0x2b, 0xa3, 0x27, 0x0f, 0x5a, 0xd7, 0x15, 0xaa, 0x7a, 0x9c, 0x95, - 0x99, 0xd1, 0xa4, 0x0f, 0xad, 0x91, 0xe4, 0xfa, 0x32, 0x31, 0x7c, 0x86, 0xc2, 0xd1, 0x5a, 0xfd, - 0x20, 0xb6, 0x6f, 0x89, 0xb7, 0x3d, 0xf6, 0xb1, 0x44, 0x4e, 0x81, 0x4c, 0x50, 0x65, 0xa8, 0x19, - 0x26, 0xe2, 0x5c, 0xc9, 0x72, 0x94, 0xe9, 0xdc, 0x5d, 0x7b, 0xe8, 0xe8, 0x81, 0x2e, 0x39, 0x81, - 0x7f, 0xc3, 0xda, 0xec, 0x01, 0xfc, 0x2f, 0x00, 0x9f, 0xab, 0x91, 0x02, 0xd8, 0x16, 0xec, 0x7f, - 0x16, 0x56, 0xb8, 0xe9, 0x7d, 0xb6, 0x36, 0xa4, 0x07, 0x7f, 0x0b, 0x29, 0xf3, 0x34, 0xe1, 0xf9, - 0x04, 0xb9, 0x9c, 0x0b, 0xed, 0x46, 0xf4, 0xd9, 0x7e, 0x4c, 0x8e, 0xe0, 0xf7, 0x9d, 0x54, 0x1c, - 0xcf, 0x1e, 0x38, 0xa2, 0x40, 0xb1, 0x59, 0xc0, 0x6e, 0x38, 0x0c, 0x9e, 0x57, 0xa1, 0xf7, 0xb2, - 0x0a, 0xbd, 0xd7, 0x55, 0xe8, 0x3d, 0xbe, 0x85, 0xdf, 0xd2, 0x86, 0x5b, 0xfc, 0xe0, 0x3d, 0x00, - 0x00, 0xff, 0xff, 0xd3, 0x2a, 0x27, 0x0a, 0x38, 0x02, 0x00, 0x00, + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcf, 0x6e, 0xda, 0x30, + 0x18, 0x5f, 0xc8, 0x86, 0xc6, 0x87, 0xb6, 0x45, 0x16, 0x9a, 0x38, 0x45, 0x28, 0xda, 0x24, 0x4e, + 0xb1, 0x34, 0xae, 0x3b, 0xa1, 0xed, 0x54, 0x0e, 0xad, 0x51, 0xab, 0x1e, 0x7a, 0x49, 0xec, 0x0f, + 0x1a, 0xc5, 0x89, 0x51, 0xec, 0x50, 0xf2, 0x16, 0x7d, 0x91, 0xbe, 0x47, 0x8f, 0x7d, 0x84, 0x8a, + 0xbe, 0x48, 0x65, 0x83, 0x84, 0xa8, 0xa0, 0xf4, 0x12, 0x7d, 0xbf, 0x5f, 0x7e, 0x7f, 0xe2, 0x7c, + 0x86, 0xbf, 0xf3, 0xcc, 0xdc, 0xd6, 0x69, 0xcc, 0x55, 0x41, 0x8b, 0x91, 0x48, 0x69, 0x31, 0xa2, + 0xba, 0xe2, 0x94, 0xcb, 0x5a, 0x1b, 0xac, 0xe8, 0x1c, 0x4b, 0xac, 0x12, 0x83, 0x82, 0x2e, 0x2a, + 0x65, 0x14, 0xcd, 0x97, 0x8b, 0x94, 0xe6, 0xcb, 0xd8, 0x21, 0xf2, 0xd9, 0xc2, 0xe8, 0x1c, 0xbe, + 0x9f, 0x61, 0x73, 0x95, 0xc8, 0x1a, 0x2f, 0x17, 0x22, 0x31, 0x48, 0x02, 0xf0, 0x73, 0x6c, 0xfa, + 0xde, 0xc0, 0x1b, 0x76, 0x98, 0x1d, 0x49, 0x0f, 0xbe, 0x2c, 0xad, 0xa0, 0xdf, 0x72, 0xdc, 0x06, + 0x90, 0x9f, 0xd0, 0xe6, 0xaa, 0x28, 0x32, 0xd3, 0xf7, 0x07, 0xde, 0xf0, 0x2b, 0xdb, 0xa2, 0x68, + 0x02, 0xbd, 0xfd, 0x44, 0x86, 0xba, 0x96, 0xe6, 0x40, 0x6e, 0x00, 0xbe, 0x92, 0x62, 0x9b, 0x6a, + 0x47, 0xcb, 0x94, 0x78, 0xe7, 0x02, 0x3b, 0xcc, 0x8e, 0xd1, 0x43, 0x0b, 0xba, 0x17, 0x35, 0x56, + 0xcd, 0x24, 0x2b, 0x32, 0xa3, 0xc9, 0x35, 0x84, 0x45, 0xb2, 0x62, 0xc8, 0xb1, 0x34, 0xb2, 0xb1, + 0x6f, 0x32, 0x14, 0x53, 0xfb, 0xd4, 0x63, 0xa9, 0x78, 0xae, 0x5d, 0x41, 0xf7, 0x4f, 0x10, 0xdb, + 0xe3, 0xc5, 0x3b, 0x2b, 0x3b, 0xe1, 0x23, 0x33, 0xf8, 0x7d, 0x4c, 0xf1, 0x2f, 0xd3, 0xf9, 0xb8, + 0x31, 0xa8, 0x19, 0x26, 0x9b, 0xef, 0x3d, 0x54, 0xf0, 0x31, 0x3b, 0xb9, 0x81, 0xc1, 0x7b, 0x42, + 0x57, 0xe1, 0x1f, 0xa9, 0x38, 0xe9, 0x8c, 0x2a, 0x80, 0x9d, 0xde, 0x6e, 0x4e, 0xda, 0xc1, 0xfd, + 0x14, 0x9f, 0x6d, 0x00, 0x19, 0xc2, 0x0f, 0xa9, 0x54, 0x9e, 0x26, 0x3c, 0x9f, 0x22, 0x57, 0xa5, + 0xd0, 0xee, 0x4c, 0x3e, 0x7b, 0x4b, 0x93, 0x5f, 0xf0, 0x6d, 0xa6, 0x2a, 0x8e, 0xff, 0x57, 0x1c, + 0x51, 0xa0, 0xd8, 0xae, 0x7a, 0x9f, 0x1c, 0x07, 0x8f, 0xeb, 0xd0, 0x7b, 0x5a, 0x87, 0xde, 0xf3, + 0x3a, 0xf4, 0xee, 0x5f, 0xc2, 0x4f, 0x69, 0xdb, 0x5d, 0xb1, 0xd1, 0x6b, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x4e, 0xb0, 0xcd, 0x62, 0xa2, 0x02, 0x00, 0x00, } diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto index 5b658422ba..c75b236f83 100644 --- a/src/cluster/generated/proto/kvpb/kv.proto +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -34,9 +34,9 @@ message KeyValueUpdateResult { } message QueryLimits { - QueryLimit DocsMatched = 1; - QueryLimit SeriesReadFromDisk = 2; - QueryLimit BytesReadFromDisk = 3; + QueryLimit maxRecentlyQueriedSeriesBlocks = 1; + QueryLimit maxRecentlyQueriedSeriesDiskBytesRead = 2; + QueryLimit maxRecentlyQueriedSeriesDiskRead = 3; } message QueryLimit { diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 01751cd338..89dddb1d06 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1210,19 +1210,18 @@ func updateQueryLimits(logger *zap.Logger, if settings == nil { return } - if err := updateQueryLimit(logger, limits.DocsLimit(), settings.DocsMatched); err != nil { + if err := updateQueryLimit(limits.DocsLimit(), settings.MaxRecentlyQueriedSeriesBlocks); err != nil { logger.Error("error updating docs limit", zap.Error(err)) } - if err := updateQueryLimit(logger, limits.DiskSeriesReadLimit(), settings.SeriesReadFromDisk); err != nil { + if err := updateQueryLimit(limits.DiskSeriesReadLimit(), settings.MaxRecentlyQueriedSeriesDiskRead); err != nil { logger.Error("error updating series read limit", zap.Error(err)) } - if err := updateQueryLimit(logger, limits.BytesReadLimit(), settings.BytesReadFromDisk); err != nil { + if err := updateQueryLimit(limits.BytesReadLimit(), settings.MaxRecentlyQueriedSeriesDiskBytesRead); err != nil { logger.Error("error updating bytes read limit", zap.Error(err)) } } -func updateQueryLimit(logger *zap.Logger, - limit limits.LookbackLimit, +func updateQueryLimit(limit limits.LookbackLimit, limitOpts *kvpb.QueryLimit, ) error { if limitOpts == nil { diff --git a/src/query/api/v1/handler/database/common.go b/src/query/api/v1/handler/database/common.go index 70854ea06b..907cb19c67 100644 --- a/src/query/api/v1/handler/database/common.go +++ b/src/query/api/v1/handler/database/common.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +// Package database contains API endpoints for managing the database. package database import ( diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index d9479c8359..109832d337 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -23,10 +23,10 @@ package database import ( "context" "encoding/json" + "errors" "io/ioutil" "net/http" - "github.com/jhump/protoreflect/dynamic" "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" @@ -50,8 +50,6 @@ const ( // KeyValueStoreHTTPMethod is the HTTP method used with this resource. KeyValueStoreHTTPMethod = http.MethodPost - - defaultLimit = 1000 ) // KeyValueUpdate defines an update to a key's value. @@ -92,9 +90,6 @@ func NewKeyValueStoreHandler(opts options.HandlerOptions) (http.Handler, error) return nil, err } - m := dynamic.Message{} - err = m.UnmarshalJSON([]byte(`{"key":"foo","value": { "bar" : "baz" }}`)) - return &KeyValueStoreHandler{ storage: opts.Storage(), kvStore: kvStore, @@ -146,7 +141,7 @@ func (h *KeyValueStoreHandler) update( update *KeyValueUpdate, ) (*KeyValueUpdateResult, error) { old, err := h.kvStore.Get(update.Key) - if err != nil && err != kv.ErrNotFound { + if err != nil && errors.Is(err, kv.ErrNotFound) { return nil, err } diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go deleted file mode 100644 index 407c66af3b..0000000000 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package database - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/jhump/protoreflect/dynamic" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/structpb" -) - -func TestDynamic(t *testing.T) { - m := dynamic.Message{} - err := m.UnmarshalJSON([]byte(`{"key":"foo"}`)) - fmt.Println(m) - fmt.Println(err) -} - -func TestDynamic2(t *testing.T) { - m := map[string]interface{}{ - "foo": "bar", - "baz": 123, - } - b, err := json.Marshal(m) - require.NoError(t, err) - fmt.Println(string(b)) - s := &structpb.Struct{} - err = protojson.Unmarshal([]byte(`{"key":"foo"}`), s) - require.NoError(t, err) - fmt.Println(m) - fmt.Println(b) - fmt.Println(s) - - // v := &commonpb.StringProto{} - // err = protojson.Unmarshal([]byte(`{"value":"foo"}`), v) - // require.NoError(t, err) - // fmt.Println(v) -} From 38cabbd65d827c70e0752a531652e7d2fcf68b0d Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:58:53 -0500 Subject: [PATCH 54/80] Lint --- src/dbnode/persist/fs/retriever.go | 5 ++--- src/dbnode/storage/limits/query_limits.go | 2 +- src/query/api/v1/handler/database/kvstore.go | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/dbnode/persist/fs/retriever.go b/src/dbnode/persist/fs/retriever.go index 9fd1f353f2..43c95eb159 100644 --- a/src/dbnode/persist/fs/retriever.go +++ b/src/dbnode/persist/fs/retriever.go @@ -594,7 +594,6 @@ func (r *blockRetriever) streamRequest( shard uint32, id ident.ID, startTime time.Time, - nsCtx namespace.Context, ) error { req.resultWg.Add(1) if err := r.queryLimits.DiskSeriesReadLimit().Inc(1, req.source); err != nil { @@ -671,7 +670,7 @@ func (r *blockRetriever) Stream( } } - err = r.streamRequest(ctx, req, shard, id, startTime, nsCtx) + err = r.streamRequest(ctx, req, shard, id, startTime) if err != nil { req.resultWg.Done() return xio.EmptyBlockReader, err @@ -707,7 +706,7 @@ func (r *blockRetriever) StreamWideEntry( req.streamReqType = streamWideEntryReq req.wideFilter = filter - err = r.streamRequest(ctx, req, shard, id, startTime, nsCtx) + err = r.streamRequest(ctx, req, shard, id, startTime) if err != nil { req.resultWg.Done() return block.EmptyStreamedWideEntry, err diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 2dc328d31f..38a34a7ff9 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -34,7 +34,7 @@ import ( ) const ( - // DisabledLimitValue is the value, when set to + // DisabledLimitValue is the value which, when set to // a limit, disables the enforcement of that limit. DisabledLimitValue = 0 diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 109832d337..431418042c 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -66,7 +66,7 @@ type KeyValueUpdate struct { // KeyValueUpdateResult defines the result of an update to a key's value. type KeyValueUpdateResult struct { // Key to update. - Key string `json:"new"` + Key string `json:"key"` // Old is the value before the update. Old string `json:"old"` // New is the value after the update. @@ -136,7 +136,7 @@ func (h *KeyValueStoreHandler) parseBody(r *http.Request) (*KeyValueUpdate, erro } func (h *KeyValueStoreHandler) update( - ctx context.Context, + _ context.Context, logger *zap.Logger, update *KeyValueUpdate, ) (*KeyValueUpdateResult, error) { From a9621f7d2ad3940b2edf37393d7d9a1fb9fea3c8 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 19:59:47 -0500 Subject: [PATCH 55/80] Lint 2 --- src/query/api/v1/handler/database/kvstore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 431418042c..3ee76ae4aa 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -38,10 +38,10 @@ import ( xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/instrument" xhttp "github.com/m3db/m3/src/x/net/http" - "google.golang.org/protobuf/runtime/protoiface" "github.com/gogo/protobuf/jsonpb" "go.uber.org/zap" + "google.golang.org/protobuf/runtime/protoiface" ) const ( From f488d682d0aac6ca738d80bdc2c3577fa0b34ecf Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 20:05:25 -0500 Subject: [PATCH 56/80] Lint 3 --- src/dbnode/persist/fs/retriever_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dbnode/persist/fs/retriever_test.go b/src/dbnode/persist/fs/retriever_test.go index 063d81c710..0fdca0196c 100644 --- a/src/dbnode/persist/fs/retriever_test.go +++ b/src/dbnode/persist/fs/retriever_test.go @@ -833,8 +833,8 @@ func TestLimitSeriesReadFromDisk(t *testing.T) { require.NoError(t, err) req := &retrieveRequest{} retriever := publicRetriever.(*blockRetriever) - _ = retriever.streamRequest(context.NewContext(), req, 0, ident.StringID("id"), time.Now(), namespace.Context{}) - err = retriever.streamRequest(context.NewContext(), req, 0, ident.StringID("id"), time.Now(), namespace.Context{}) + _ = retriever.streamRequest(context.NewContext(), req, 0, ident.StringID("id"), time.Now()) + err = retriever.streamRequest(context.NewContext(), req, 0, ident.StringID("id"), time.Now()) require.Error(t, err) require.Contains(t, err.Error(), "query aborted due to limit") From c5e06831683be4bb52905e7dfef9b5770c8a3987 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 20:45:47 -0500 Subject: [PATCH 57/80] Cleanup 2 --- src/dbnode/storage/limits/query_limits.go | 13 +++----- src/query/api/v1/handler/database/common.go | 14 +++++++++ src/query/api/v1/handler/database/kvstore.go | 32 +++++++------------- 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 38a34a7ff9..85d66e2d7b 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -34,11 +34,8 @@ import ( ) const ( - // DisabledLimitValue is the value which, when set to - // a limit, disables the enforcement of that limit. - DisabledLimitValue = 0 - - defaultLookback = time.Second * 15 + disabledLimitValue = 0 + defaultLookback = time.Second * 15 ) type queryLimits struct { @@ -77,7 +74,7 @@ var ( func DefaultLookbackLimitOptions() LookbackLimitOptions { return LookbackLimitOptions{ // Default to no limit. - Limit: DisabledLimitValue, + Limit: disabledLimitValue, Lookback: defaultLookback, } } @@ -252,7 +249,7 @@ func (q *lookbackLimit) checkLimit(recent int64) error { "query aborted due to forced limit: name=%s", q.name))) } - if currentOpts.Limit == DisabledLimitValue { + if currentOpts.Limit == disabledLimitValue { return nil } diff --git a/src/query/api/v1/handler/database/common.go b/src/query/api/v1/handler/database/common.go index 907cb19c67..0d2fc2e93c 100644 --- a/src/query/api/v1/handler/database/common.go +++ b/src/query/api/v1/handler/database/common.go @@ -49,12 +49,19 @@ func RegisterRoutes( instrumentOpts instrument.Options, namespaceValidator options.NamespaceValidator, ) error { + kvStore, err := client.KV() + if err != nil { + return err + } + createHandler, err := NewCreateHandler(client, cfg, embeddedDbCfg, defaults, instrumentOpts, namespaceValidator) if err != nil { return err } + kvStoreHandler := NewKeyValueStoreHandler(kvStore, instrumentOpts) + // Register the same handler under two different endpoints. This just makes explaining things in // our documentation easier so we can separate out concepts, but share the underlying code. if err := r.Register(queryhttp.RegisterOptions{ @@ -71,6 +78,13 @@ func RegisterRoutes( }); err != nil { return err } + if err := r.Register(queryhttp.RegisterOptions{ + Path: KeyValueStoreURL, + Handler: kvStoreHandler, + Methods: []string{KeyValueStoreHTTPMethod}, + }); err != nil { + return err + } return nil } diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 3ee76ae4aa..562535b7e9 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -31,9 +31,6 @@ import ( "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" "github.com/m3db/m3/src/dbnode/kvconfig" - "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions" - "github.com/m3db/m3/src/query/api/v1/options" - "github.com/m3db/m3/src/query/storage" "github.com/m3db/m3/src/query/util/logging" xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/instrument" @@ -46,8 +43,7 @@ import ( const ( // KeyValueStoreURL is the url to edit key/value configuration values. - KeyValueStoreURL = "/search" - + KeyValueStoreURL = "/kvstore" // KeyValueStoreHTTPMethod is the HTTP method used with this resource. KeyValueStoreHTTPMethod = http.MethodPost ) @@ -75,27 +71,21 @@ type KeyValueUpdateResult struct { Version int `json:"version"` } -// KeyValueStoreHandler represents a handler for the search endpoint +// KeyValueStoreHandler represents a handler for the key/value store endpoint type KeyValueStoreHandler struct { - storage storage.Storage - kvStore kv.Store - fetchOptionsBuilder handleroptions.FetchOptionsBuilder - instrumentOpts instrument.Options + kvStore kv.Store + instrumentOpts instrument.Options } // NewKeyValueStoreHandler returns a new instance of handler -func NewKeyValueStoreHandler(opts options.HandlerOptions) (http.Handler, error) { - kvStore, err := opts.ClusterClient().KV() - if err != nil { - return nil, err - } - +func NewKeyValueStoreHandler( + kvStore kv.Store, + instrumentOpts instrument.Options, +) http.Handler { return &KeyValueStoreHandler{ - storage: opts.Storage(), - kvStore: kvStore, - fetchOptionsBuilder: opts.FetchOptionsBuilder(), - instrumentOpts: opts.InstrumentOpts(), - }, nil + kvStore: kvStore, + instrumentOpts: instrumentOpts, + } } func (h *KeyValueStoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { From 316a7d352e85c0bcbf8821b0c21ab18601dfa43d Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Tue, 19 Jan 2021 20:58:38 -0500 Subject: [PATCH 58/80] Dep order --- src/dbnode/storage/limits/query_limits.go | 6 +++--- src/query/api/v1/handler/database/kvstore.go | 17 ++++++++++------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 85d66e2d7b..672cf3000e 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -25,12 +25,12 @@ import ( "sync" "time" - xerrors "github.com/m3db/m3/src/x/errors" - "github.com/m3db/m3/src/x/instrument" - "github.com/uber-go/tally" "go.uber.org/atomic" "go.uber.org/zap" + + xerrors "github.com/m3db/m3/src/x/errors" + "github.com/m3db/m3/src/x/instrument" ) const ( diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 562535b7e9..e882b8368c 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -27,6 +27,10 @@ import ( "io/ioutil" "net/http" + "github.com/gogo/protobuf/jsonpb" + "go.uber.org/zap" + "google.golang.org/protobuf/runtime/protoiface" + "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" @@ -35,10 +39,6 @@ import ( xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/instrument" xhttp "github.com/m3db/m3/src/x/net/http" - - "github.com/gogo/protobuf/jsonpb" - "go.uber.org/zap" - "google.golang.org/protobuf/runtime/protoiface" ) const ( @@ -148,9 +148,12 @@ func (h *KeyValueStoreHandler) update( return nil, err } - version, err := h.kvStore.Set(update.Key, newProto) - if err != nil { - return nil, err + var version int + if update.Commit { + version, err = h.kvStore.Set(update.Key, newProto) + if err != nil { + return nil, err + } } result := KeyValueUpdateResult{ From e3480a85e620b24eea5977fe8a8724470d947211 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 10:35:26 -0500 Subject: [PATCH 59/80] Fix stop race --- src/dbnode/storage/limits/query_limits.go | 31 +++++++++++++---------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 672cf3000e..e00f991a66 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -45,13 +45,14 @@ type queryLimits struct { } type lookbackLimit struct { - name string - options LookbackLimitOptions - metrics lookbackLimitMetrics - logger *zap.Logger - recent *atomic.Int64 - stopCh chan struct{} - lock sync.RWMutex + name string + options LookbackLimitOptions + metrics lookbackLimitMetrics + logger *zap.Logger + recent *atomic.Int64 + stopCh chan struct{} + stoppedCh chan struct{} + lock sync.RWMutex } type lookbackLimitMetrics struct { @@ -114,12 +115,13 @@ func newLookbackLimit( sourceLoggerBuilder SourceLoggerBuilder, ) *lookbackLimit { return &lookbackLimit{ - name: name, - options: opts, - metrics: newLookbackLimitMetrics(instrumentOpts, name, sourceLoggerBuilder), - logger: instrumentOpts.Logger(), - recent: atomic.NewInt64(0), - stopCh: make(chan struct{}), + name: name, + options: opts, + metrics: newLookbackLimitMetrics(instrumentOpts, name, sourceLoggerBuilder), + logger: instrumentOpts.Logger(), + recent: atomic.NewInt64(0), + stopCh: make(chan struct{}), + stoppedCh: make(chan struct{}), } } @@ -272,6 +274,7 @@ func (q *lookbackLimit) start() { q.reset() case <-q.stopCh: ticker.Stop() + q.stoppedCh <- struct{}{} return } } @@ -283,7 +286,9 @@ func (q *lookbackLimit) start() { func (q *lookbackLimit) stop() { close(q.stopCh) + <-q.stoppedCh q.stopCh = make(chan struct{}) + q.stoppedCh = make(chan struct{}) q.logger.Info("query limit interval stopped", zap.String("name", q.name)) } From 450accee37a6c97f40a112f97f8db85db02959d3 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 11:54:47 -0500 Subject: [PATCH 60/80] Fix integration test --- src/query/api/v1/handler/database/common.go | 7 +------ src/query/api/v1/handler/database/kvstore.go | 16 +++++++++++----- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/query/api/v1/handler/database/common.go b/src/query/api/v1/handler/database/common.go index 0d2fc2e93c..5572141176 100644 --- a/src/query/api/v1/handler/database/common.go +++ b/src/query/api/v1/handler/database/common.go @@ -49,18 +49,13 @@ func RegisterRoutes( instrumentOpts instrument.Options, namespaceValidator options.NamespaceValidator, ) error { - kvStore, err := client.KV() - if err != nil { - return err - } - createHandler, err := NewCreateHandler(client, cfg, embeddedDbCfg, defaults, instrumentOpts, namespaceValidator) if err != nil { return err } - kvStoreHandler := NewKeyValueStoreHandler(kvStore, instrumentOpts) + kvStoreHandler := NewKeyValueStoreHandler(client, instrumentOpts) // Register the same handler under two different endpoints. This just makes explaining things in // our documentation easier so we can separate out concepts, but share the underlying code. diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index e882b8368c..cced1d2c5a 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -31,6 +31,7 @@ import ( "go.uber.org/zap" "google.golang.org/protobuf/runtime/protoiface" + clusterclient "github.com/m3db/m3/src/cluster/client" "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" @@ -73,17 +74,17 @@ type KeyValueUpdateResult struct { // KeyValueStoreHandler represents a handler for the key/value store endpoint type KeyValueStoreHandler struct { - kvStore kv.Store + client clusterclient.Client instrumentOpts instrument.Options } // NewKeyValueStoreHandler returns a new instance of handler func NewKeyValueStoreHandler( - kvStore kv.Store, + client clusterclient.Client, instrumentOpts instrument.Options, ) http.Handler { return &KeyValueStoreHandler{ - kvStore: kvStore, + client: client, instrumentOpts: instrumentOpts, } } @@ -130,7 +131,12 @@ func (h *KeyValueStoreHandler) update( logger *zap.Logger, update *KeyValueUpdate, ) (*KeyValueUpdateResult, error) { - old, err := h.kvStore.Get(update.Key) + kvStore, err := h.client.KV() + if err != nil { + return nil, err + } + + old, err := kvStore.Get(update.Key) if err != nil && errors.Is(err, kv.ErrNotFound) { return nil, err } @@ -150,7 +156,7 @@ func (h *KeyValueStoreHandler) update( var version int if update.Commit { - version, err = h.kvStore.Set(update.Key, newProto) + version, err = kvStore.Set(update.Key, newProto) if err != nil { return nil, err } From 5208b2c5f188fef6b4e4c3d62110b5c6f4026a4b Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 13:09:35 -0500 Subject: [PATCH 61/80] PR feedback --- src/dbnode/kvconfig/keys.go | 2 +- src/dbnode/server/server.go | 2 ++ src/dbnode/storage/limits/query_limits.go | 8 ++++---- src/dbnode/storage/limits/query_limits_test.go | 4 ++-- src/query/api/v1/handler/database/kvstore.go | 8 ++++---- 5 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/dbnode/kvconfig/keys.go b/src/dbnode/kvconfig/keys.go index 92c5d27292..7d90d48cab 100644 --- a/src/dbnode/kvconfig/keys.go +++ b/src/dbnode/kvconfig/keys.go @@ -51,5 +51,5 @@ const ( ClientWriteConsistencyLevel = "m3db.client.write-consistency-level" // QueryLimits is the KV config key for query limits enforced on each dbnode. - QueryLimits = "m3db.querylimits" + QueryLimits = "m3db.query.limits" ) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 89dddb1d06..f442ec4e2b 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1181,6 +1181,8 @@ func kvWatchQueryLimit( } } else if !errors.Is(err, kv.ErrNotFound) { logger.Warn("error resolving query limit", zap.Error(err)) + } else { + logger.Info("query limit key not set") } watch, err := store.Watch(kvconfig.QueryLimits) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index e00f991a66..edf0c68208 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -25,12 +25,12 @@ import ( "sync" "time" + xerrors "github.com/m3db/m3/src/x/errors" + "github.com/m3db/m3/src/x/instrument" + "github.com/uber-go/tally" "go.uber.org/atomic" "go.uber.org/zap" - - xerrors "github.com/m3db/m3/src/x/errors" - "github.com/m3db/m3/src/x/instrument" ) const ( diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 93616d4131..6ce32708a9 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -213,7 +213,7 @@ func TestLookbackLimit(t *testing.T) { func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int64, forceExceeded bool) int64 { var exceededCount int64 err := limit.Inc(inc, nil) - if (limit.options.Limit == 0 || limit.current() < expectedLimit) && !forceExceeded { + if (expectedLimit == 0 || limit.current() < expectedLimit) && !forceExceeded { require.NoError(t, err) } else { require.Error(t, err) @@ -223,7 +223,7 @@ func verifyLimit(t *testing.T, limit *lookbackLimit, inc int, expectedLimit int6 } err = limit.exceeded() - if (limit.options.Limit == 0 || limit.current() < expectedLimit) && !forceExceeded { + if (expectedLimit == 0 || limit.current() < expectedLimit) && !forceExceeded { require.NoError(t, err) } else { require.Error(t, err) diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index cced1d2c5a..cbb7284ce9 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -27,10 +27,6 @@ import ( "io/ioutil" "net/http" - "github.com/gogo/protobuf/jsonpb" - "go.uber.org/zap" - "google.golang.org/protobuf/runtime/protoiface" - clusterclient "github.com/m3db/m3/src/cluster/client" "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" @@ -40,6 +36,10 @@ import ( xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/instrument" xhttp "github.com/m3db/m3/src/x/net/http" + + "github.com/gogo/protobuf/jsonpb" + "go.uber.org/zap" + "google.golang.org/protobuf/runtime/protoiface" ) const ( From f880627baa57f2829150bed9ee788869ad1dea9f Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 13:33:01 -0500 Subject: [PATCH 62/80] PR feedback 2 --- src/dbnode/server/server.go | 2 -- src/dbnode/storage/limits/query_limits.go | 8 ++++---- src/query/api/v1/handler/database/kvstore.go | 8 ++++---- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index f442ec4e2b..89dddb1d06 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1181,8 +1181,6 @@ func kvWatchQueryLimit( } } else if !errors.Is(err, kv.ErrNotFound) { logger.Warn("error resolving query limit", zap.Error(err)) - } else { - logger.Info("query limit key not set") } watch, err := store.Watch(kvconfig.QueryLimits) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index edf0c68208..e00f991a66 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -25,12 +25,12 @@ import ( "sync" "time" - xerrors "github.com/m3db/m3/src/x/errors" - "github.com/m3db/m3/src/x/instrument" - "github.com/uber-go/tally" "go.uber.org/atomic" "go.uber.org/zap" + + xerrors "github.com/m3db/m3/src/x/errors" + "github.com/m3db/m3/src/x/instrument" ) const ( diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index cbb7284ce9..cced1d2c5a 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -27,6 +27,10 @@ import ( "io/ioutil" "net/http" + "github.com/gogo/protobuf/jsonpb" + "go.uber.org/zap" + "google.golang.org/protobuf/runtime/protoiface" + clusterclient "github.com/m3db/m3/src/cluster/client" "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" @@ -36,10 +40,6 @@ import ( xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/instrument" xhttp "github.com/m3db/m3/src/x/net/http" - - "github.com/gogo/protobuf/jsonpb" - "go.uber.org/zap" - "google.golang.org/protobuf/runtime/protoiface" ) const ( From 4cdea4cf06757da6bcfec94d0098e55ab735d024 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 14:08:46 -0500 Subject: [PATCH 63/80] Add kvstore test --- src/query/api/v1/handler/database/kvstore.go | 19 ++-- .../api/v1/handler/database/kvstore_test.go | 106 ++++++++++++++++++ 2 files changed, 116 insertions(+), 9 deletions(-) create mode 100644 src/query/api/v1/handler/database/kvstore_test.go diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index cced1d2c5a..697a99dc05 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -21,7 +21,6 @@ package database import ( - "context" "encoding/json" "errors" "io/ioutil" @@ -99,7 +98,14 @@ func (h *KeyValueStoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) return } - results, err := h.update(r.Context(), logger, update) + kvStore, err := h.client.KV() + if err != nil { + logger.Error("unable to get kv store", zap.Error(err)) + xhttp.WriteError(w, err) + return + } + + results, err := h.update(logger, kvStore, update) if err != nil { logger.Error("kv store error", zap.Error(err), @@ -127,17 +133,12 @@ func (h *KeyValueStoreHandler) parseBody(r *http.Request) (*KeyValueUpdate, erro } func (h *KeyValueStoreHandler) update( - _ context.Context, logger *zap.Logger, + kvStore kv.Store, update *KeyValueUpdate, ) (*KeyValueUpdateResult, error) { - kvStore, err := h.client.KV() - if err != nil { - return nil, err - } - old, err := kvStore.Get(update.Key) - if err != nil && errors.Is(err, kv.ErrNotFound) { + if err != nil && !errors.Is(err, kv.ErrNotFound) { return nil, err } diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go new file mode 100644 index 0000000000..87982e115c --- /dev/null +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -0,0 +1,106 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package database + +import ( + "encoding/json" + "testing" + + "github.com/golang/mock/gomock" + "github.com/m3db/m3/src/cluster/generated/proto/kvpb" + "github.com/m3db/m3/src/cluster/kv" + "github.com/m3db/m3/src/dbnode/kvconfig" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestUpdateQueryLimits(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + tests := []struct { + name string + limits *kvpb.QueryLimits + expectedJSON string + }{ + { + name: `only block`, + limits: &kvpb.QueryLimits{ + MaxRecentlyQueriedSeriesBlocks: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + }, + expectedJSON: "maxRecentlyQueriedSeriesBlocks: ", + }, + } + + for _, test := range tests { + limitJSON, err := json.Marshal(test.limits) + require.NoError(t, err) + + update := &KeyValueUpdate{ + Key: kvconfig.QueryLimits, + Value: json.RawMessage(limitJSON), + Commit: false, + } + + storeMock := kv.NewMockStore(ctrl) + + // (A) test no old value. + storeMock.EXPECT().Get(kvconfig.QueryLimits).Return(nil, kv.ErrNotFound) + + handler := &KeyValueStoreHandler{} + r, err := handler.update(zap.NewNop(), storeMock, update) + require.NoError(t, err) + require.Equal(t, kvconfig.QueryLimits, r.Key) + require.Equal(t, "", r.Old) + require.Equal(t, test.expectedJSON, r.New) + require.Equal(t, 0, r.Version) + + // (B) test old value. + mockVal := kv.NewMockValue(ctrl) + storeMock.EXPECT().Get(kvconfig.QueryLimits).Return(mockVal, nil) + mockVal.EXPECT().Unmarshal(gomock.Any()).DoAndReturn(func(v *kvpb.QueryLimits) error { + v.MaxRecentlyQueriedSeriesBlocks = &kvpb.QueryLimit{ + Limit: 10, + LookbackSeconds: 30, + ForceExceeded: false, + } + v.MaxRecentlyQueriedSeriesDiskBytesRead = &kvpb.QueryLimit{ + Limit: 100, + LookbackSeconds: 300, + ForceExceeded: false, + } + return nil + }) + + handler = &KeyValueStoreHandler{} + r, err = handler.update(zap.NewNop(), storeMock, update) + require.NoError(t, err) + require.Equal(t, kvconfig.QueryLimits, r.Key) + require.Equal(t, "maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: ", r.Old) + require.Equal(t, test.expectedJSON, r.New) + require.Equal(t, 0, r.Version) + } +} From 9fc90b8c0ae76ad83ebd5d0af46fab7ff3b55dcd Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 14:19:26 -0500 Subject: [PATCH 64/80] More tests --- .../api/v1/handler/database/kvstore_test.go | 87 +++++++++++++++++-- 1 file changed, 82 insertions(+), 5 deletions(-) diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go index 87982e115c..144bdcf9c9 100644 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -38,12 +38,26 @@ func TestUpdateQueryLimits(t *testing.T) { defer ctrl.Finish() tests := []struct { - name string - limits *kvpb.QueryLimits - expectedJSON string + name string + limits *kvpb.QueryLimits + commit bool + expectedJSON string + expectedError string }{ { - name: `only block`, + name: `nil`, + limits: nil, + commit: true, + expectedJSON: "", + }, + { + name: `empty`, + limits: &kvpb.QueryLimits{}, + commit: true, + expectedJSON: "", + }, + { + name: `only block - commit`, limits: &kvpb.QueryLimits{ MaxRecentlyQueriedSeriesBlocks: &kvpb.QueryLimit{ Limit: 1, @@ -51,8 +65,65 @@ func TestUpdateQueryLimits(t *testing.T) { ForceExceeded: true, }, }, + commit: true, expectedJSON: "maxRecentlyQueriedSeriesBlocks: ", }, + { + name: `only block - no commit`, + limits: &kvpb.QueryLimits{ + MaxRecentlyQueriedSeriesBlocks: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + }, + commit: false, + expectedJSON: "maxRecentlyQueriedSeriesBlocks: ", + }, + { + name: `all - commit`, + limits: &kvpb.QueryLimits{ + MaxRecentlyQueriedSeriesBlocks: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + MaxRecentlyQueriedSeriesDiskBytesRead: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + }, + commit: true, + expectedJSON: "maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: ", + }, + { + name: `all - no commit`, + limits: &kvpb.QueryLimits{ + MaxRecentlyQueriedSeriesBlocks: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + MaxRecentlyQueriedSeriesDiskBytesRead: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{ + Limit: 1, + LookbackSeconds: 15, + ForceExceeded: true, + }, + }, + commit: false, + expectedJSON: "maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: ", + }, } for _, test := range tests { @@ -62,13 +133,16 @@ func TestUpdateQueryLimits(t *testing.T) { update := &KeyValueUpdate{ Key: kvconfig.QueryLimits, Value: json.RawMessage(limitJSON), - Commit: false, + Commit: test.commit, } storeMock := kv.NewMockStore(ctrl) // (A) test no old value. storeMock.EXPECT().Get(kvconfig.QueryLimits).Return(nil, kv.ErrNotFound) + if test.commit { + storeMock.EXPECT().Set(kvconfig.QueryLimits, gomock.Any()).Return(0, nil) + } handler := &KeyValueStoreHandler{} r, err := handler.update(zap.NewNop(), storeMock, update) @@ -94,6 +168,9 @@ func TestUpdateQueryLimits(t *testing.T) { } return nil }) + if test.commit { + storeMock.EXPECT().Set(kvconfig.QueryLimits, gomock.Any()).Return(0, nil) + } handler = &KeyValueStoreHandler{} r, err = handler.update(zap.NewNop(), storeMock, update) From 9b422adbf3fbc531bc54ef115422ceb937a75fc4 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 14:21:49 -0500 Subject: [PATCH 65/80] More tests 2 --- .../api/v1/handler/database/kvstore_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go index 144bdcf9c9..3943dee9d8 100644 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -24,13 +24,13 @@ import ( "encoding/json" "testing" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "github.com/golang/mock/gomock" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" "github.com/m3db/m3/src/dbnode/kvconfig" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" ) func TestUpdateQueryLimits(t *testing.T) { @@ -66,7 +66,7 @@ func TestUpdateQueryLimits(t *testing.T) { }, }, commit: true, - expectedJSON: "maxRecentlyQueriedSeriesBlocks: ", + expectedJSON: `maxRecentlyQueriedSeriesBlocks: `, }, { name: `only block - no commit`, @@ -78,7 +78,7 @@ func TestUpdateQueryLimits(t *testing.T) { }, }, commit: false, - expectedJSON: "maxRecentlyQueriedSeriesBlocks: ", + expectedJSON: `maxRecentlyQueriedSeriesBlocks: `, }, { name: `all - commit`, @@ -100,7 +100,7 @@ func TestUpdateQueryLimits(t *testing.T) { }, }, commit: true, - expectedJSON: "maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: ", + expectedJSON: `maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: `, }, { name: `all - no commit`, @@ -122,7 +122,7 @@ func TestUpdateQueryLimits(t *testing.T) { }, }, commit: false, - expectedJSON: "maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: ", + expectedJSON: `maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: `, }, } @@ -176,7 +176,7 @@ func TestUpdateQueryLimits(t *testing.T) { r, err = handler.update(zap.NewNop(), storeMock, update) require.NoError(t, err) require.Equal(t, kvconfig.QueryLimits, r.Key) - require.Equal(t, "maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: ", r.Old) + require.Equal(t, `maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: `, r.Old) require.Equal(t, test.expectedJSON, r.New) require.Equal(t, 0, r.Version) } From a3c08570b4543412542519485ceb864545eed5d3 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 14:27:15 -0500 Subject: [PATCH 66/80] More tests 3 --- src/query/api/v1/handler/database/kvstore.go | 25 +++++++++++++------ .../api/v1/handler/database/kvstore_test.go | 7 ++++-- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 697a99dc05..7772ca7cde 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -23,6 +23,7 @@ package database import ( "encoding/json" "errors" + "fmt" "io/ioutil" "net/http" @@ -34,6 +35,7 @@ import ( "github.com/m3db/m3/src/cluster/generated/proto/commonpb" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" + nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace" "github.com/m3db/m3/src/dbnode/kvconfig" "github.com/m3db/m3/src/query/util/logging" xerrors "github.com/m3db/m3/src/x/errors" @@ -142,7 +144,11 @@ func (h *KeyValueStoreHandler) update( return nil, err } - oldProto := newKVProtoMessage(update.Key) + oldProto, err := newKVProtoMessage(update.Key) + if err != nil { + return nil, err + } + if old != nil { if err := old.Unmarshal(oldProto); err != nil { // Only log so we can overwrite corrupt existing entries. @@ -150,7 +156,11 @@ func (h *KeyValueStoreHandler) update( } } - newProto := newKVProtoMessage(update.Key) + newProto, err := newKVProtoMessage(update.Key) + if err != nil { + return nil, err + } + if err := jsonpb.UnmarshalString(string([]byte(update.Value)), newProto); err != nil { return nil, err } @@ -175,18 +185,19 @@ func (h *KeyValueStoreHandler) update( return &result, nil } -func newKVProtoMessage(key string) protoiface.MessageV1 { +func newKVProtoMessage(key string) (protoiface.MessageV1, error) { switch key { case kvconfig.NamespacesKey: - case kvconfig.BootstrapperKey: + return &nsproto.Registry{}, nil case kvconfig.ClusterNewSeriesInsertLimitKey: case kvconfig.EncodersPerBlockLimitKey: + return &commonpb.Int64Proto{}, nil case kvconfig.ClientBootstrapConsistencyLevel: case kvconfig.ClientReadConsistencyLevel: case kvconfig.ClientWriteConsistencyLevel: - return &commonpb.StringProto{} + return &commonpb.StringProto{}, nil case kvconfig.QueryLimits: - return &kvpb.QueryLimits{} + return &kvpb.QueryLimits{}, nil } - return nil + return nil, fmt.Errorf("unsupported kvstore key %s", key) } diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go index 3943dee9d8..6712aa1287 100644 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -99,7 +99,8 @@ func TestUpdateQueryLimits(t *testing.T) { ForceExceeded: true, }, }, - commit: true, + commit: true, + // nolint: lll expectedJSON: `maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: `, }, { @@ -121,7 +122,8 @@ func TestUpdateQueryLimits(t *testing.T) { ForceExceeded: true, }, }, - commit: false, + commit: false, + // nolint: lll expectedJSON: `maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: maxRecentlyQueriedSeriesDiskRead: `, }, } @@ -176,6 +178,7 @@ func TestUpdateQueryLimits(t *testing.T) { r, err = handler.update(zap.NewNop(), storeMock, update) require.NoError(t, err) require.Equal(t, kvconfig.QueryLimits, r.Key) + // nolint: lll require.Equal(t, `maxRecentlyQueriedSeriesBlocks: maxRecentlyQueriedSeriesDiskBytesRead: `, r.Old) require.Equal(t, test.expectedJSON, r.New) require.Equal(t, 0, r.Version) From 34a5e3e5b3149f7196ed0959aa7e7047dfc18214 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 14:52:20 -0500 Subject: [PATCH 67/80] Lint --- src/cluster/generated/proto/kvpb/kv.proto | 3 --- src/query/api/v1/handler/database/kvstore_test.go | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto index c75b236f83..ef2b2f60d5 100644 --- a/src/cluster/generated/proto/kvpb/kv.proto +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -44,6 +44,3 @@ message QueryLimit { int64 lookbackSeconds = 2; bool forceExceeded = 3; } - - - diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go index 6712aa1287..5fdade14a9 100644 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -24,10 +24,10 @@ import ( "encoding/json" "testing" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "go.uber.org/zap" - "github.com/golang/mock/gomock" "github.com/m3db/m3/src/cluster/generated/proto/kvpb" "github.com/m3db/m3/src/cluster/kv" "github.com/m3db/m3/src/dbnode/kvconfig" From bdd3ad4491b8da3be60f726cece989fd0c92b9d5 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 17:12:45 -0500 Subject: [PATCH 68/80] Fallback to config-based limits if unset in dynamic --- src/dbnode/server/server.go | 57 ++++++++++++-------- src/query/api/v1/handler/database/kvstore.go | 5 +- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 89dddb1d06..a013c0f56a 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -994,7 +994,7 @@ func Run(runOpts RunOptions) { runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond) kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger, runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock) - kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits) + kvWatchQueryLimit(syncCfg.KVStore, logger, queryLimits, limitOpts) }() // Wait for process interrupt. @@ -1171,13 +1171,14 @@ func kvWatchQueryLimit( store kv.Store, logger *zap.Logger, limits limits.QueryLimits, + defaultOpts limits.Options, ) { value, err := store.Get(kvconfig.QueryLimits) if err == nil { - protoValue := &kvpb.QueryLimits{} - err = value.Unmarshal(protoValue) - if err == nil && protoValue != nil { - updateQueryLimits(logger, limits, protoValue) + dynamicLimits := &kvpb.QueryLimits{} + err = value.Unmarshal(dynamicLimits) + if err == nil && dynamicLimits != nil { + updateQueryLimits(logger, limits, dynamicLimits, defaultOpts) } } else if !errors.Is(err, kv.ErrNotFound) { logger.Warn("error resolving query limit", zap.Error(err)) @@ -1190,14 +1191,14 @@ func kvWatchQueryLimit( } go func() { - protoValue := &kvpb.QueryLimits{} + dynamicLimits := &kvpb.QueryLimits{} for range watch.C() { if newValue := watch.Get(); newValue != nil { - if err := newValue.Unmarshal(protoValue); err != nil { + if err := newValue.Unmarshal(dynamicLimits); err != nil { logger.Warn("unable to parse new query limits", zap.Error(err)) continue } - updateQueryLimits(logger, limits, protoValue) + updateQueryLimits(logger, limits, dynamicLimits, defaultOpts) } } }() @@ -1205,36 +1206,48 @@ func kvWatchQueryLimit( func updateQueryLimits(logger *zap.Logger, limits limits.QueryLimits, - settings *kvpb.QueryLimits, + dynamicLimits *kvpb.QueryLimits, + defaultOpts limits.Options, ) { - if settings == nil { + if dynamicLimits == nil { return } - if err := updateQueryLimit(limits.DocsLimit(), settings.MaxRecentlyQueriedSeriesBlocks); err != nil { + if err := updateQueryLimit(limits.DocsLimit(), + dynamicLimits.MaxRecentlyQueriedSeriesBlocks, + defaultOpts.DocsLimitOpts(), + ); err != nil { logger.Error("error updating docs limit", zap.Error(err)) } - if err := updateQueryLimit(limits.DiskSeriesReadLimit(), settings.MaxRecentlyQueriedSeriesDiskRead); err != nil { + if err := updateQueryLimit(limits.DiskSeriesReadLimit(), + dynamicLimits.MaxRecentlyQueriedSeriesDiskRead, + defaultOpts.DiskSeriesReadLimitOpts(), + ); err != nil { logger.Error("error updating series read limit", zap.Error(err)) } - if err := updateQueryLimit(limits.BytesReadLimit(), settings.MaxRecentlyQueriedSeriesDiskBytesRead); err != nil { + if err := updateQueryLimit(limits.BytesReadLimit(), + dynamicLimits.MaxRecentlyQueriedSeriesDiskBytesRead, + defaultOpts.BytesReadLimitOpts(), + ); err != nil { logger.Error("error updating bytes read limit", zap.Error(err)) } } func updateQueryLimit(limit limits.LookbackLimit, - limitOpts *kvpb.QueryLimit, + dynamicLimit *kvpb.QueryLimit, + defaultOpts limits.LookbackLimitOptions, ) error { - if limitOpts == nil { - return nil + // Default to the config-based limits if unset in dynamic limits. + // Otherwise, use the dynamic limit. + new := defaultOpts + if dynamicLimit != nil { + new = limits.LookbackLimitOptions{ + Limit: dynamicLimit.Limit, + Lookback: time.Duration(dynamicLimit.LookbackSeconds) * time.Second, + ForceExceeded: dynamicLimit.ForceExceeded, + } } old := limit.Options() - new := limits.LookbackLimitOptions{ - Limit: limitOpts.Limit, - Lookback: time.Second * time.Duration(limitOpts.LookbackSeconds), - ForceExceeded: limitOpts.ForceExceeded, - } - if old.Equals(new) { return nil } diff --git a/src/query/api/v1/handler/database/kvstore.go b/src/query/api/v1/handler/database/kvstore.go index 7772ca7cde..8b3c5a9b0e 100644 --- a/src/query/api/v1/handler/database/kvstore.go +++ b/src/query/api/v1/handler/database/kvstore.go @@ -37,6 +37,7 @@ import ( "github.com/m3db/m3/src/cluster/kv" nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace" "github.com/m3db/m3/src/dbnode/kvconfig" + "github.com/m3db/m3/src/query/api/v1/handler" "github.com/m3db/m3/src/query/util/logging" xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/instrument" @@ -45,7 +46,7 @@ import ( const ( // KeyValueStoreURL is the url to edit key/value configuration values. - KeyValueStoreURL = "/kvstore" + KeyValueStoreURL = handler.RoutePrefixV1 + "/kvstore" // KeyValueStoreHTTPMethod is the HTTP method used with this resource. KeyValueStoreHTTPMethod = http.MethodPost ) @@ -180,7 +181,7 @@ func (h *KeyValueStoreHandler) update( Version: version, } - logger.Error("updated kv store", zap.Any("result", result)) + logger.Info("kv store", zap.Any("update", *update), zap.Any("result", result)) return &result, nil } From aa89830b36e395a3da90385cb18d6457b2f75392 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 17:15:15 -0500 Subject: [PATCH 69/80] Fixes from feedback --- src/query/api/v1/handler/database/kvstore_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go index 5fdade14a9..4a6f047bab 100644 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal From 314e43f25a4ac7a8291f31456c82a0cd4587aac3 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 17:30:29 -0500 Subject: [PATCH 70/80] Add docs around dynamic limits --- .../operational_guide/resource_limits.md | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index f19268009f..cf23e8e2c4 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -94,6 +94,34 @@ limits: maxOutstandingReadRequests: 0 ``` +### Dynamic configuration + +Query limits can be dynamically driven by etcd to adjust limits without redeploying. By updating the `m3db.query.limits` key in etcd, specific limits can be overriden. M3Coordinator exposes an API for updating etcd key/value pairs and so this API can be used for modifying these dynamic overrides. For example, + +``` +curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ + "m3db.query.limits", + "value":{ + "maxRecentlyQueriedSeriesDiskBytesRead": { + "limit":0, + "lookbackSeconds":15, + "forceExceeded":false + }, + "maxRecentlyQueriedSeriesBlocks": { + "limit":0, + "lookbackSeconds":15, + "forceExceeded":false + } + }, + "commit":true +}' +``` + +Usage notes: +- The `commit` flag allows for dry-run API calls to see the old and new limits that would be applied. +- Omitting a limit from the `value` results in that limit to be driven by the config-based settings. +- The `forceExceeded` flag makes the limit behave as though it is permanently exceeded, thus failing all queries. This is useful for dynamically shutting down all queries in cases where load may be exceeding provisioned resources. + ## M3 Query and M3 Coordinator ### Deployment From 24b7e8768ed5e48e88024865bb6f85ca0dae3aaf Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 17:37:46 -0500 Subject: [PATCH 71/80] More docs --- site/content/operational_guide/resource_limits.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index cf23e8e2c4..8f3f9235b7 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -53,6 +53,8 @@ per second safely with your deployment and you want to use the default lookback of `15s` then you would multiply 10,000 by 15 to get 150,000 as a max value with a 15s lookback. +The third limit is `maxRecentlyQueriedSeriesDiskRead + ### Annotated configuration ```yaml @@ -82,6 +84,18 @@ limits: # and read until the lookback period resets. lookback: 15s + # If set, will enforce a maximum cap on the bytes read from disk that make up time series objects themselves (not their data). + # This limit can be used to ensure queries that match an extremely high volume of series can be limited before even + # reading the underlying series data from disk. + maxRecentlyQueriedSeriesDiskRead: + # Value sets the maximum disk bytes read to make up the time series objects. + value: 0 + # Lookback sets the time window that this limit is enforced over, every + # lookback period the global count is reset to zero and when the limit + # is reached it will reject any further time series blocks being matched + # and read until the lookback period resets. + lookback: 15s + # If set then will limit the number of parallel write batch requests to the # database and return errors if hit. maxOutstandingWriteRequests: 0 From 8596169a8abbd97dcfc48b7747014ef56919e4f1 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 20:21:46 -0500 Subject: [PATCH 72/80] More doc updates --- .../operational_guide/resource_limits.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index 8f3f9235b7..8e69134733 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -53,7 +53,15 @@ per second safely with your deployment and you want to use the default lookback of `15s` then you would multiply 10,000 by 15 to get 150,000 as a max value with a 15s lookback. -The third limit is `maxRecentlyQueriedSeriesDiskRead +The third limit is `maxRecentlyQueriedSeriesDiskRead` caps the bytes associated with +series IDs matched by a given query. This originally was distinct from +`maxRecentlyQueriedSeriesBlocks`, which also limits the memory cost of specific series +matched in-memory, because of an inefficiency in how allocations would occur even for series +known to not be present on disk for a given shard. This inefficiency has been resolved +https://github.com/m3db/m3/pull/3103 and therefore this limit should be tracking memory cost +linearly compared to `maxRecentlyQueriedSeriesBlocks`. It is recommended to defer to using +`maxRecentlyQueriedSeriesBlocks` over `maxRecentlyQueriedSeriesDiskRead` given both should +be capping the resources in the same manner now. ### Annotated configuration @@ -125,6 +133,11 @@ curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ "limit":0, "lookbackSeconds":15, "forceExceeded":false + }, + "maxRecentlyQueriedSeriesDiskRead": { + "limit":0, + "lookbackSeconds":15, + "forceExceeded":false } }, "commit":true @@ -132,7 +145,7 @@ curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ ``` Usage notes: -- The `commit` flag allows for dry-run API calls to see the old and new limits that would be applied. +- Setting the `commit` flag to false allows for dry-run API calls to see the old and new limits that would be applied. - Omitting a limit from the `value` results in that limit to be driven by the config-based settings. - The `forceExceeded` flag makes the limit behave as though it is permanently exceeded, thus failing all queries. This is useful for dynamically shutting down all queries in cases where load may be exceeding provisioned resources. From 0967ae61c1b8d32c81b10b407617cb53c4f59ec2 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 20:38:25 -0500 Subject: [PATCH 73/80] Reorganize limit setting code --- src/dbnode/server/server.go | 68 ++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index a013c0f56a..587901f5b7 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1205,54 +1205,60 @@ func kvWatchQueryLimit( } func updateQueryLimits(logger *zap.Logger, - limits limits.QueryLimits, + queryLimits limits.QueryLimits, dynamicLimits *kvpb.QueryLimits, defaultOpts limits.Options, ) { - if dynamicLimits == nil { - return + var ( + // Default to the config-based limits if unset in dynamic limits. + // Otherwise, use the dynamic limit. + docsLimitOpts = defaultOpts.DocsLimitOpts() + diskSeriesReadLimitOpts = defaultOpts.DiskSeriesReadLimitOpts() + bytesReadLimitOpts = defaultOpts.BytesReadLimitOpts() + ) + if dynamicLimits != nil { + if dynamicLimits.MaxRecentlyQueriedSeriesBlocks != nil { + docsLimitOpts = dynamicLimitToLimitOpts(dynamicLimits.MaxRecentlyQueriedSeriesBlocks) + } + if dynamicLimits.MaxRecentlyQueriedSeriesDiskRead != nil { + diskSeriesReadLimitOpts = dynamicLimitToLimitOpts(dynamicLimits.MaxRecentlyQueriedSeriesDiskRead) + } + if dynamicLimits.MaxRecentlyQueriedSeriesDiskBytesRead != nil { + bytesReadLimitOpts = dynamicLimitToLimitOpts(dynamicLimits.MaxRecentlyQueriedSeriesDiskBytesRead) + } } - if err := updateQueryLimit(limits.DocsLimit(), - dynamicLimits.MaxRecentlyQueriedSeriesBlocks, - defaultOpts.DocsLimitOpts(), - ); err != nil { + + if err := updateQueryLimit(queryLimits.DocsLimit(), docsLimitOpts); err != nil { logger.Error("error updating docs limit", zap.Error(err)) } - if err := updateQueryLimit(limits.DiskSeriesReadLimit(), - dynamicLimits.MaxRecentlyQueriedSeriesDiskRead, - defaultOpts.DiskSeriesReadLimitOpts(), - ); err != nil { + + if err := updateQueryLimit(queryLimits.DiskSeriesReadLimit(), diskSeriesReadLimitOpts); err != nil { logger.Error("error updating series read limit", zap.Error(err)) } - if err := updateQueryLimit(limits.BytesReadLimit(), - dynamicLimits.MaxRecentlyQueriedSeriesDiskBytesRead, - defaultOpts.BytesReadLimitOpts(), - ); err != nil { + + if err := updateQueryLimit(queryLimits.BytesReadLimit(), bytesReadLimitOpts); err != nil { logger.Error("error updating bytes read limit", zap.Error(err)) } } -func updateQueryLimit(limit limits.LookbackLimit, - dynamicLimit *kvpb.QueryLimit, - defaultOpts limits.LookbackLimitOptions, +func updateQueryLimit( + limit limits.LookbackLimit, + newOpts limits.LookbackLimitOptions, ) error { - // Default to the config-based limits if unset in dynamic limits. - // Otherwise, use the dynamic limit. - new := defaultOpts - if dynamicLimit != nil { - new = limits.LookbackLimitOptions{ - Limit: dynamicLimit.Limit, - Lookback: time.Duration(dynamicLimit.LookbackSeconds) * time.Second, - ForceExceeded: dynamicLimit.ForceExceeded, - } - } - old := limit.Options() - if old.Equals(new) { + if old.Equals(newOpts) { return nil } - return limit.Update(new) + return limit.Update(newOpts) +} + +func dynamicLimitToLimitOpts(dynamicLimit *kvpb.QueryLimit) limits.LookbackLimitOptions { + return limits.LookbackLimitOptions{ + Limit: dynamicLimit.Limit, + Lookback: time.Duration(dynamicLimit.LookbackSeconds) * time.Second, + ForceExceeded: dynamicLimit.ForceExceeded, + } } func kvWatchClientConsistencyLevels( From 6f540d6d20065cbdc5170c6766853f0382addfd3 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 20:40:41 -0500 Subject: [PATCH 74/80] Reorganize limit setting code 2 --- src/dbnode/server/server.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 587901f5b7..d01bb32986 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1206,25 +1206,25 @@ func kvWatchQueryLimit( func updateQueryLimits(logger *zap.Logger, queryLimits limits.QueryLimits, - dynamicLimits *kvpb.QueryLimits, - defaultOpts limits.Options, + dynamicOpts *kvpb.QueryLimits, + configOpts limits.Options, ) { var ( // Default to the config-based limits if unset in dynamic limits. // Otherwise, use the dynamic limit. - docsLimitOpts = defaultOpts.DocsLimitOpts() - diskSeriesReadLimitOpts = defaultOpts.DiskSeriesReadLimitOpts() - bytesReadLimitOpts = defaultOpts.BytesReadLimitOpts() + docsLimitOpts = configOpts.DocsLimitOpts() + diskSeriesReadLimitOpts = configOpts.DiskSeriesReadLimitOpts() + bytesReadLimitOpts = configOpts.BytesReadLimitOpts() ) - if dynamicLimits != nil { - if dynamicLimits.MaxRecentlyQueriedSeriesBlocks != nil { - docsLimitOpts = dynamicLimitToLimitOpts(dynamicLimits.MaxRecentlyQueriedSeriesBlocks) + if dynamicOpts != nil { + if dynamicOpts.MaxRecentlyQueriedSeriesBlocks != nil { + docsLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesBlocks) } - if dynamicLimits.MaxRecentlyQueriedSeriesDiskRead != nil { - diskSeriesReadLimitOpts = dynamicLimitToLimitOpts(dynamicLimits.MaxRecentlyQueriedSeriesDiskRead) + if dynamicOpts.MaxRecentlyQueriedSeriesDiskRead != nil { + diskSeriesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskRead) } - if dynamicLimits.MaxRecentlyQueriedSeriesDiskBytesRead != nil { - bytesReadLimitOpts = dynamicLimitToLimitOpts(dynamicLimits.MaxRecentlyQueriedSeriesDiskBytesRead) + if dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead != nil { + bytesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead) } } From 21732ef2dd1a5646e0bd178b8397d875a38cc790 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 20:48:41 -0500 Subject: [PATCH 75/80] Add more comprehensive locking --- src/dbnode/storage/limits/query_limits.go | 33 ++++++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index e00f991a66..eb26de9f55 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -162,15 +162,19 @@ func (q *queryLimits) DiskSeriesReadLimit() LookbackLimit { } func (q *queryLimits) Start() { - q.docsLimit.start() - q.seriesDiskReadLimit.start() - q.bytesReadLimit.start() + // Lock on explicit start to avoid any collision with asynchronous updating + // which will call stop/start if the lookback has changed. + q.docsLimit.startWithLock() + q.seriesDiskReadLimit.startWithLock() + q.bytesReadLimit.startWithLock() } func (q *queryLimits) Stop() { - q.docsLimit.stop() - q.seriesDiskReadLimit.stop() - q.bytesReadLimit.stop() + // Lock on explicit start to avoid any collision with asynchronous updating + // which will call stop/start if the lookback has changed. + q.docsLimit.stopWithLock() + q.seriesDiskReadLimit.stopWithLock() + q.bytesReadLimit.stopWithLock() } func (q *queryLimits) AnyExceeded() error { @@ -184,7 +188,10 @@ func (q *queryLimits) AnyExceeded() error { } func (q *lookbackLimit) Options() LookbackLimitOptions { - return q.options + q.lock.RLock() + o := q.options + q.lock.RUnlock() + return o } // Update updates the limit. @@ -264,6 +271,18 @@ func (q *lookbackLimit) checkLimit(recent int64) error { return nil } +func (q *lookbackLimit) startWithLock() { + q.lock.Lock() + defer q.lock.Unlock() + q.start() +} + +func (q *lookbackLimit) stopWithLock() { + q.lock.Lock() + defer q.lock.Unlock() + q.stop() +} + func (q *lookbackLimit) start() { ticker := time.NewTicker(q.options.Lookback) go func() { From 4a4d5cdff289b6d33bc8c220665d754339bf43a9 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 20:51:04 -0500 Subject: [PATCH 76/80] Update wording --- site/content/operational_guide/resource_limits.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index 8e69134733..2416eeba8e 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -53,13 +53,13 @@ per second safely with your deployment and you want to use the default lookback of `15s` then you would multiply 10,000 by 15 to get 150,000 as a max value with a 15s lookback. -The third limit is `maxRecentlyQueriedSeriesDiskRead` caps the bytes associated with -series IDs matched by a given query. This originally was distinct from +The third limit `maxRecentlyQueriedSeriesDiskRead` caps the bytes associated with +series IDs matched by incoming queries. This originally was distinct from the limit `maxRecentlyQueriedSeriesBlocks`, which also limits the memory cost of specific series matched in-memory, because of an inefficiency in how allocations would occur even for series known to not be present on disk for a given shard. This inefficiency has been resolved https://github.com/m3db/m3/pull/3103 and therefore this limit should be tracking memory cost -linearly compared to `maxRecentlyQueriedSeriesBlocks`. It is recommended to defer to using +linearly relative to `maxRecentlyQueriedSeriesBlocks`. It is recommended to defer to using `maxRecentlyQueriedSeriesBlocks` over `maxRecentlyQueriedSeriesDiskRead` given both should be capping the resources in the same manner now. From 5e00b2389bfb161c344af1babc67f350822d063a Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Wed, 20 Jan 2021 23:11:10 -0500 Subject: [PATCH 77/80] Update docs more --- site/content/operational_guide/resource_limits.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index 2416eeba8e..5a531ec1aa 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -122,7 +122,7 @@ Query limits can be dynamically driven by etcd to adjust limits without redeploy ``` curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ - "m3db.query.limits", + "key": "m3db.query.limits", "value":{ "maxRecentlyQueriedSeriesDiskBytesRead": { "limit":0, @@ -144,6 +144,15 @@ curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ }' ``` +To remove all overrides, omit all limits from the `value` +``` +curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ + "key": "m3db.query.limits", + "value":{}, + "commit":true +}' +``` + Usage notes: - Setting the `commit` flag to false allows for dry-run API calls to see the old and new limits that would be applied. - Omitting a limit from the `value` results in that limit to be driven by the config-based settings. From a943da000c72c03fe7fa7aa17811674973969732 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 21 Jan 2021 00:10:52 -0500 Subject: [PATCH 78/80] Update docs more 2 --- .../operational_guide/resource_limits.md | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index 5a531ec1aa..b9de3cd458 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -53,15 +53,14 @@ per second safely with your deployment and you want to use the default lookback of `15s` then you would multiply 10,000 by 15 to get 150,000 as a max value with a 15s lookback. -The third limit `maxRecentlyQueriedSeriesDiskRead` caps the bytes associated with -series IDs matched by incoming queries. This originally was distinct from the limit -`maxRecentlyQueriedSeriesBlocks`, which also limits the memory cost of specific series -matched in-memory, because of an inefficiency in how allocations would occur even for series -known to not be present on disk for a given shard. This inefficiency has been resolved -https://github.com/m3db/m3/pull/3103 and therefore this limit should be tracking memory cost -linearly relative to `maxRecentlyQueriedSeriesBlocks`. It is recommended to defer to using -`maxRecentlyQueriedSeriesBlocks` over `maxRecentlyQueriedSeriesDiskRead` given both should -be capping the resources in the same manner now. +The third limit `maxRecentlyQueriedSeriesDiskRead` caps the series IDs matched by incoming +queries. This originally was distinct from the limit `maxRecentlyQueriedSeriesBlocks`, which +also limits the memory cost of specific series matched, because of an inefficiency +in how allocations would occur even for series known to not be present on disk for a given +shard. This inefficiency has been resolved https://github.com/m3db/m3/pull/3103 and therefore +this limit should be tracking memory cost linearly relative to `maxRecentlyQueriedSeriesBlocks`. +It is recommended to defer to using `maxRecentlyQueriedSeriesBlocks` over +`maxRecentlyQueriedSeriesDiskRead` given both should cap the resources similarly. ### Annotated configuration @@ -92,9 +91,9 @@ limits: # and read until the lookback period resets. lookback: 15s - # If set, will enforce a maximum cap on the bytes read from disk that make up time series objects themselves (not their data). - # This limit can be used to ensure queries that match an extremely high volume of series can be limited before even - # reading the underlying series data from disk. + # If set, will enforce a maximum cap on the series read from disk. + # This limit can be used to ensure queries that match an extremely high + # volume of series can be limited before even reading the underlying series data from disk. maxRecentlyQueriedSeriesDiskRead: # Value sets the maximum disk bytes read to make up the time series objects. value: 0 From b659237c02f16f188366b3a6dadac214d1dad665 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 21 Jan 2021 10:32:38 -0500 Subject: [PATCH 79/80] PR feedback --- site/content/operational_guide/resource_limits.md | 4 ++-- src/dbnode/server/server.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index b9de3cd458..3b6041388c 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -91,11 +91,11 @@ limits: # and read until the lookback period resets. lookback: 15s - # If set, will enforce a maximum cap on the series read from disk. + # If set, will enforce a maximum on the series read from disk. # This limit can be used to ensure queries that match an extremely high # volume of series can be limited before even reading the underlying series data from disk. maxRecentlyQueriedSeriesDiskRead: - # Value sets the maximum disk bytes read to make up the time series objects. + # Value sets the maximum number of series read from disk. value: 0 # Lookback sets the time window that this limit is enforced over, every # lookback period the global count is reset to zero and when the limit diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index d01bb32986..1a9e5cd378 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -1177,7 +1177,7 @@ func kvWatchQueryLimit( if err == nil { dynamicLimits := &kvpb.QueryLimits{} err = value.Unmarshal(dynamicLimits) - if err == nil && dynamicLimits != nil { + if err == nil { updateQueryLimits(logger, limits, dynamicLimits, defaultOpts) } } else if !errors.Is(err, kv.ErrNotFound) { From e07440d21e14192aea877abff180b24d4458b7d4 Mon Sep 17 00:00:00 2001 From: Ryan Allen Date: Thu, 21 Jan 2021 10:45:10 -0500 Subject: [PATCH 80/80] PR feedback 2 --- src/dbnode/storage/limits/query_limits.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index eb26de9f55..5de39875a5 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -170,7 +170,7 @@ func (q *queryLimits) Start() { } func (q *queryLimits) Stop() { - // Lock on explicit start to avoid any collision with asynchronous updating + // Lock on explicit stop to avoid any collision with asynchronous updating // which will call stop/start if the lookback has changed. q.docsLimit.stopWithLock() q.seriesDiskReadLimit.stopWithLock()