diff --git a/app/app.go b/app/app.go index e9be7f35..3dab3c10 100644 --- a/app/app.go +++ b/app/app.go @@ -401,6 +401,7 @@ func New( keys[marketmoduletypes.StoreKey], keys[marketmoduletypes.MemStoreKey], app.GetSubspace(marketmoduletypes.ModuleName), + app.BankKeeper, ) marketModule := marketmodule.NewAppModule(appCodec, app.MarketKeeper, app.AccountKeeper, app.BankKeeper) diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 74927ba5..59c1be82 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -727,6 +727,380 @@ paths: type: string tags: - Query + '/cosmos/auth/v1beta1/module_accounts/{name}': + get: + summary: ModuleAccountByName returns the module account info by module name + operationId: CosmosAuthV1Beta1ModuleAccountByName + responses: + '200': + description: A successful response. + schema: + type: object + properties: + account: + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryModuleAccountByNameResponse is the response type for the + Query/ModuleAccountByName RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: name + in: path + required: true + type: string + tags: + - Query /cosmos/auth/v1beta1/params: get: summary: Params queries all parameters. @@ -3341,7 +3715,7 @@ paths: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. data: type: object properties: @@ -3602,8 +3976,8 @@ paths: format: byte title: original proposer of the block description: >- - Header defines the structure of a - Tendermint block header. + Header defines the structure of a block + header. commit: type: object properties: @@ -3684,7 +4058,7 @@ paths: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -3708,7 +4082,7 @@ paths: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -3740,7 +4114,7 @@ paths: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -4121,7 +4495,7 @@ paths: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. data: type: object properties: @@ -4382,8 +4756,8 @@ paths: format: byte title: original proposer of the block description: >- - Header defines the structure of a - Tendermint block header. + Header defines the structure of a block + header. commit: type: object properties: @@ -4464,7 +4838,7 @@ paths: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -4488,7 +4862,7 @@ paths: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -4520,7 +4894,7 @@ paths: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -13814,7 +14188,7 @@ paths: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. valset: type: array items: @@ -29199,10 +29573,10 @@ paths: format: uint64 tags: - Query - /market/market/params: + /market/portal/params: get: summary: Parameters queries the parameters of the module. - operationId: MarketMarketParams + operationId: MarketPortalParams responses: '200': description: A successful response. @@ -29235,21 +29609,79 @@ paths: additionalProperties: {} tags: - Query - /tendermint/spn/monitoringp/connection_channel_id: + '/pendulum-labs/market/market/book/{denomA}/{denomB}/{orderType}': get: - summary: Queries a ConnectionChannelID by index. - operationId: TendermintSpnMonitoringpConnectionChannelID + summary: Queries a list of Book items. + operationId: PendulumlabsMarketMarketBook responses: '200': description: A successful response. schema: type: object properties: - ConnectionChannelID: + book: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pagination: type: object properties: - channelID: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } default: description: An unexpected error response. schema: @@ -29268,23 +29700,103 @@ paths: '@type': type: string additionalProperties: {} + parameters: + - name: denomA + in: path + required: true + type: string + - name: denomB + in: path + required: true + type: string + - name: orderType + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /tendermint/spn/monitoringp/consumer_client_id: + '/pendulum-labs/market/market/bookends/{coinA}/{coinB}/{orderType}/{rate}': get: - summary: Queries a ConsumerClientID by index. - operationId: TendermintSpnMonitoringpConsumerClientID + summary: Queries a list of Bookends items. + operationId: PendulumlabsMarketMarketBookends responses: '200': description: A successful response. schema: type: object properties: - ConsumerClientID: - type: object - properties: - clientID: - type: string + coinA: + type: string + coinB: + type: string + orderType: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 default: description: An unexpected error response. schema: @@ -29303,48 +29815,43 @@ paths: '@type': type: string additionalProperties: {} + parameters: + - name: coinA + in: path + required: true + type: string + - name: coinB + in: path + required: true + type: string + - name: orderType + in: path + required: true + type: string + - name: rate + in: path + required: true + type: array + items: + type: string + collectionFormat: csv + minItems: 1 tags: - Query - /tendermint/spn/monitoringp/monitoring_info: + /pendulum-labs/market/market/burned: get: - summary: Queries a MonitoringInfo by index. - operationId: TendermintSpnMonitoringpMonitoringInfo + summary: Queries total burned. + operationId: PendulumlabsMarketMarketBurned responses: '200': description: A successful response. schema: type: object properties: - MonitoringInfo: - type: object - properties: - transmitted: - type: boolean - signatureCounts: - type: object - properties: - blockCount: - type: string - format: uint64 - counts: - type: array - items: - type: object - properties: - opAddress: - type: string - RelativeSignatures: - type: string - title: >- - SignatureCount contains information of signature - reporting for one specific validator with consensus - address - - RelativeSignatures is the sum of all signatures - relative to the validator set size - title: >- - SignatureCounts contains information about signature - reporting for a number of blocks + denom: + type: string + amount: + type: string default: description: An unexpected error response. schema: @@ -29365,52 +29872,52 @@ paths: additionalProperties: {} tags: - Query - /tendermint/spn/monitoringp/params: + /pendulum-labs/market/market/burnings: get: - summary: Params queries the parameters of the module. - operationId: TendermintSpnMonitoringpParams + summary: Queries a list of Burnings items. + operationId: PendulumlabsMarketMarketBurningsAll responses: '200': description: A successful response. schema: type: object properties: - params: + burnings: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + pagination: type: object properties: - lastBlockHeight: + next_key: type: string - format: int64 - consumerChainID: + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string - consumerConsensusState: - type: object - properties: - nextValidatorsHash: - type: string - timestamp: - type: string - root: - type: object - properties: - hash: - type: string - title: MerkleRoot represents a Merkle Root in ConsensusState + format: uint64 title: >- - ConsensusState represents a Consensus State + total is total number of results available if + PageRequest.count_total - it is compatible with the dumped state from `appd q ibc - client self-consensus-state` command - consumerUnbondingPeriod: - type: string - format: int64 - consumerRevisionHeight: - type: string - format: uint64 - description: Params defines the parameters for the module. - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } default: description: An unexpected error response. schema: @@ -29429,87 +29936,2223 @@ paths: '@type': type: string additionalProperties: {} - tags: - - Query -definitions: - cosmos.auth.v1beta1.Params: - type: object - properties: - max_memo_characters: - type: string - format: uint64 - tx_sig_limit: - type: string - format: uint64 - tx_size_cost_per_byte: - type: string - format: uint64 - sig_verify_cost_ed25519: - type: string - format: uint64 - sig_verify_cost_secp256k1: - type: string - format: uint64 - description: Params defines the parameters for the auth module. - cosmos.auth.v1beta1.QueryAccountResponse: - type: object - properties: - account: - description: account defines the account of the corresponding address. - type: object - properties: - '@type': - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. - protocol buffer message. This string must contain at least + It is less efficient than using key. Only one of offset or key + should - one "/" character. The last segment of the URL's path must - represent + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. - the fully qualified name of the type (as in + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include - `path/google.protobuf.Duration`). The name should be in a - canonical form + a count of the total number of items available for pagination in + UIs. - (e.g., leading "." is not accepted). + count_total is only respected when offset is used. It is ignored + when key + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. - In practice, teams usually precompile into the binary all types - that they - expect it to use in the context of Any. However, for URLs which - use the + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/burnings/{denom}': + get: + summary: Queries a Burnings by index. + operationId: PendulumlabsMarketMarketBurnings + responses: + '200': + description: A successful response. + schema: + type: object + properties: + burnings: + type: object + properties: + denom: + type: string + amount: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: denom + in: path + required: true + type: string + tags: + - Query + /pendulum-labs/market/market/drop: + get: + summary: Queries a list of Drop items. + operationId: PendulumlabsMarketMarketDropAll + responses: + '200': + description: A successful response. + schema: + type: object + properties: + drops: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + pair: + type: string + drops: + type: string + product: + type: string + active: + type: boolean + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - scheme `http`, `https`, or no scheme, one can optionally set up a - type + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the - server that maps type URLs to message definitions as follows: + corresponding request message has used PageRequest. + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. - * If no scheme is provided, `https` is assumed. + It is less efficient than using key. Only one of offset or key + should - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. - Note: this functionality is not currently available in the - official + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include - protobuf release, and it is not used for type URLs beginning with + a count of the total number of items available for pagination in + UIs. - type.googleapis.com. + count_total is only respected when offset is used. It is ignored + when key + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. - Schemes other than `http`, `https` (or the empty scheme) might be - used with implementation specific semantics. + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/drop/amounts/{uid}': + get: + summary: Queries a Drop by index. + operationId: PendulumlabsMarketMarketDropAmounts + responses: + '200': + description: A successful response. + schema: + type: object + properties: + denom1: + type: string + denom2: + type: string + amount1: + type: string + amount2: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: uid + in: path + required: true + type: string + format: uint64 + tags: + - Query + '/pendulum-labs/market/market/drop/coin/{denomA}/{denomB}/{amountA}': + get: + summary: Queries a Drop by index. + operationId: PendulumlabsMarketMarketDropCoin + responses: + '200': + description: A successful response. + schema: + type: object + properties: + drops: + type: string + amountB: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: denomA + in: path + required: true + type: string + - name: denomB + in: path + required: true + type: string + - name: amountA + in: path + required: true + type: string + tags: + - Query + '/pendulum-labs/market/market/drop/coins/{pair}/{drops}': + get: + summary: Converts drops to coin amounts + operationId: PendulumlabsMarketMarketDropsToCoins + responses: + '200': + description: A successful response. + schema: + type: object + properties: + denom1: + type: string + denom2: + type: string + amount1: + type: string + amount2: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pair + in: path + required: true + type: string + - name: drops + in: path + required: true + type: string + tags: + - Query + '/pendulum-labs/market/market/drop/pairs/{address}': + get: + summary: Queries a Drop by index. + operationId: PendulumlabsMarketMarketDropPairs + responses: + '200': + description: A successful response. + schema: + type: object + properties: + pairs: + type: array + items: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: address + in: path + required: true + type: string + tags: + - Query + '/pendulum-labs/market/market/drop/{address}/{pair}': + get: + summary: Queries a Drop by index. + operationId: PendulumlabsMarketMarketDropOwnerPair + responses: + '200': + description: A successful response. + schema: + type: object + properties: + drops: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + pair: + type: string + drops: + type: string + product: + type: string + active: + type: boolean + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: address + in: path + required: true + type: string + - name: pair + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/drop/{uid}': + get: + summary: Queries a Drop by index. + operationId: PendulumlabsMarketMarketDrop + responses: + '200': + description: A successful response. + schema: + type: object + properties: + drop: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + pair: + type: string + drops: + type: string + product: + type: string + active: + type: boolean + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: uid + in: path + required: true + type: string + format: uint64 + tags: + - Query + '/pendulum-labs/market/market/history/{pair}': + get: + summary: Queries pool trade history. + operationId: PendulumlabsMarketMarketHistory + responses: + '200': + description: A successful response. + schema: + type: object + properties: + history: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pair + in: path + required: true + type: string + - name: length + in: query + required: false + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /pendulum-labs/market/market/member: + get: + summary: Queries a list of Member items. + operationId: PendulumlabsMarketMarketMemberAll + responses: + '200': + description: A successful response. + schema: + type: object + properties: + member: + type: array + items: + type: object + properties: + pair: + type: string + denomA: + type: string + denomB: + type: string + balance: + type: string + previous: + type: string + limit: + type: string + format: uint64 + stop: + type: string + format: uint64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/member/{denomA}/{denomB}': + get: + summary: Queries a Member by index. + operationId: PendulumlabsMarketMarketMember + responses: + '200': + description: A successful response. + schema: + type: object + properties: + member: + type: object + properties: + pair: + type: string + denomA: + type: string + denomB: + type: string + balance: + type: string + previous: + type: string + limit: + type: string + format: uint64 + stop: + type: string + format: uint64 + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: denomA + in: path + required: true + type: string + - name: denomB + in: path + required: true + type: string + tags: + - Query + /pendulum-labs/market/market/order: + get: + summary: Queries a list of Order items. + operationId: PendulumlabsMarketMarketOrderAll + responses: + '200': + description: A successful response. + schema: + type: object + properties: + orders: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/order/uids/{address}': + get: + summary: Queries a list of Order items. + operationId: PendulumlabsMarketMarketOrderOwnerUids + responses: + '200': + description: A successful response. + schema: + type: object + properties: + orders: + type: object + properties: + uids: + type: array + items: + type: string + format: uint64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: address + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/order/{address}': + get: + summary: Queries a list of Order items. + operationId: PendulumlabsMarketMarketOrderOwner + responses: + '200': + description: A successful response. + schema: + type: object + properties: + orders: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: address + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/order/{uid}': + get: + summary: Queries a Order by index. + operationId: PendulumlabsMarketMarketOrder + responses: + '200': + description: A successful response. + schema: + type: object + properties: + order: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: uid + in: path + required: true + type: string + format: uint64 + tags: + - Query + /pendulum-labs/market/market/params: + get: + summary: Parameters queries the parameters of the module. + operationId: PendulumlabsMarketMarketParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + earn_rates: + type: string + title: |- + leader earnings rates + 1,2,3 Comma separated, no space + burn_rate: + type: string + title: pool burning rate + burn_coin: + type: string + title: burn coin + market_fee: + type: string + title: >- + market_fee (parameter / 10000), 9999 representing as + 99.99% + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query + /pendulum-labs/market/market/pool: + get: + summary: Queries a list of Pool items. + operationId: PendulumlabsMarketMarketPoolAll + responses: + '200': + description: A successful response. + schema: + type: object + properties: + pool: + type: array + items: + type: object + properties: + pair: + type: string + denom1: + type: string + denom2: + type: string + volume1: + type: object + properties: + denom: + type: string + amount: + type: string + volume2: + type: object + properties: + denom: + type: string + amount: + type: string + leaders: + type: array + items: + type: object + properties: + address: + type: string + drops: + type: string + drops: + type: string + history: + type: string + format: uint64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/pool/{pair}': + get: + summary: Queries a Pool by index. + operationId: PendulumlabsMarketMarketPool + responses: + '200': + description: A successful response. + schema: + type: object + properties: + pool: + type: object + properties: + pair: + type: string + denom1: + type: string + denom2: + type: string + volume1: + type: object + properties: + denom: + type: string + amount: + type: string + volume2: + type: object + properties: + denom: + type: string + amount: + type: string + leaders: + type: array + items: + type: object + properties: + address: + type: string + drops: + type: string + drops: + type: string + history: + type: string + format: uint64 + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pair + in: path + required: true + type: string + tags: + - Query + '/pendulum-labs/market/market/quote/{denomBid}/{denomAsk}/{denomAmount}/{amount}': + get: + summary: Queries pool trade history. + operationId: PendulumlabsMarketMarketQuote + responses: + '200': + description: A successful response. + schema: + type: object + properties: + denom: + type: string + amount: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: denomBid + in: path + required: true + type: string + - name: denomAsk + in: path + required: true + type: string + - name: denomAmount + in: path + required: true + type: string + - name: amount + in: path + required: true + type: string + tags: + - Query + /pendulum-labs/market/market/volume: + get: + summary: Queries all Volumes. + operationId: PendulumlabsMarketMarketVolumeAll + responses: + '200': + description: A successful response. + schema: + type: object + properties: + volumes: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + '/pendulum-labs/market/market/volume/{denom}': + get: + summary: Queries a Volume by index. + operationId: PendulumlabsMarketMarketVolume + responses: + '200': + description: A successful response. + schema: + type: object + properties: + amount: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: denom + in: path + required: true + type: string + tags: + - Query + /tendermint/spn/monitoringp/connection_channel_id: + get: + summary: Queries a ConnectionChannelID by index. + operationId: TendermintSpnMonitoringpConnectionChannelID + responses: + '200': + description: A successful response. + schema: + type: object + properties: + ConnectionChannelID: + type: object + properties: + channelID: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query + /tendermint/spn/monitoringp/consumer_client_id: + get: + summary: Queries a ConsumerClientID by index. + operationId: TendermintSpnMonitoringpConsumerClientID + responses: + '200': + description: A successful response. + schema: + type: object + properties: + ConsumerClientID: + type: object + properties: + clientID: + type: string + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query + /tendermint/spn/monitoringp/monitoring_info: + get: + summary: Queries a MonitoringInfo by index. + operationId: TendermintSpnMonitoringpMonitoringInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + MonitoringInfo: + type: object + properties: + transmitted: + type: boolean + signatureCounts: + type: object + properties: + blockCount: + type: string + format: uint64 + counts: + type: array + items: + type: object + properties: + opAddress: + type: string + RelativeSignatures: + type: string + title: >- + SignatureCount contains information of signature + reporting for one specific validator with consensus + address + + RelativeSignatures is the sum of all signatures + relative to the validator set size + title: >- + SignatureCounts contains information about signature + reporting for a number of blocks + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query + /tendermint/spn/monitoringp/params: + get: + summary: Params queries the parameters of the module. + operationId: TendermintSpnMonitoringpParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + type: object + properties: + lastBlockHeight: + type: string + format: int64 + consumerChainID: + type: string + consumerConsensusState: + type: object + properties: + nextValidatorsHash: + type: string + timestamp: + type: string + root: + type: object + properties: + hash: + type: string + title: MerkleRoot represents a Merkle Root in ConsensusState + title: >- + ConsensusState represents a Consensus State + + it is compatible with the dumped state from `appd q ibc + client self-consensus-state` command + consumerUnbondingPeriod: + type: string + format: int64 + consumerRevisionHeight: + type: string + format: uint64 + description: Params defines the parameters for the module. + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query +definitions: + cosmos.auth.v1beta1.Params: + type: object + properties: + max_memo_characters: + type: string + format: uint64 + tx_sig_limit: + type: string + format: uint64 + tx_size_cost_per_byte: + type: string + format: uint64 + sig_verify_cost_ed25519: + type: string + format: uint64 + sig_verify_cost_secp256k1: + type: string + format: uint64 + description: Params defines the parameters for the auth module. + cosmos.auth.v1beta1.QueryAccountResponse: + type: object + properties: + account: + description: account defines the account of the corresponding address. + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. additionalProperties: {} description: >- QueryAccountResponse is the response type for the Query/Account RPC @@ -29704,6 +32347,169 @@ definitions: Since: cosmos-sdk 0.43 + cosmos.auth.v1beta1.QueryModuleAccountByNameResponse: + type: object + properties: + account: + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryModuleAccountByNameResponse is the response type for the + Query/ModuleAccountByName RPC method. cosmos.auth.v1beta1.QueryParamsResponse: type: object properties: @@ -31744,7 +34550,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. data: type: object properties: @@ -32002,8 +34808,8 @@ definitions: format: byte title: original proposer of the block description: >- - Header defines the structure of a Tendermint - block header. + Header defines the structure of a block + header. commit: type: object properties: @@ -32083,7 +34889,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -32107,7 +34913,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -32139,7 +34945,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use - with Tendermint Validators + with Validators voting_power: type: string format: int64 @@ -32319,7 +35125,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. data: type: object properties: @@ -32577,8 +35383,8 @@ definitions: format: byte title: original proposer of the block description: >- - Header defines the structure of a Tendermint - block header. + Header defines the structure of a block + header. commit: type: object properties: @@ -32658,7 +35464,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -32682,7 +35488,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -32714,7 +35520,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use - with Tendermint Validators + with Validators voting_power: type: string format: int64 @@ -33510,7 +36316,7 @@ definitions: secp256k1: type: string format: byte - title: PublicKey defines the keys available for use with Tendermint Validators + title: PublicKey defines the keys available for use with Validators tendermint.p2p.DefaultNodeInfo: type: object properties: @@ -33653,7 +36459,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. data: type: object properties: @@ -33907,9 +36713,7 @@ definitions: type: string format: byte title: original proposer of the block - description: >- - Header defines the structure of a Tendermint - block header. + description: Header defines the structure of a block header. commit: type: object properties: @@ -33989,7 +36793,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -34013,7 +36817,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use - with Tendermint Validators + with Validators voting_power: type: string format: int64 @@ -34045,7 +36849,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use with - Tendermint Validators + Validators voting_power: type: string format: int64 @@ -34588,7 +37392,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. commit: type: object properties: @@ -34668,7 +37472,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use with - Tendermint Validators + Validators voting_power: type: string format: int64 @@ -34692,7 +37496,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use with - Tendermint Validators + Validators voting_power: type: string format: int64 @@ -34722,9 +37526,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -34975,9 +37777,7 @@ definitions: type: string format: byte title: original proposer of the block - description: >- - Header defines the structure of a Tendermint block - header. + description: Header defines the structure of a block header. commit: type: object properties: @@ -35057,7 +37857,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use - with Tendermint Validators + with Validators voting_power: type: string format: int64 @@ -35081,7 +37881,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use - with Tendermint Validators + with Validators voting_power: type: string format: int64 @@ -35113,7 +37913,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use with - Tendermint Validators + Validators voting_power: type: string format: int64 @@ -35214,7 +38014,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. tendermint.types.LightBlock: type: object properties: @@ -35306,7 +38106,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. commit: type: object properties: @@ -35380,9 +38180,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -35404,9 +38202,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -35512,7 +38308,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. commit: type: object properties: @@ -35590,7 +38386,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use with - Tendermint Validators + Validators voting_power: type: string format: int64 @@ -35614,7 +38410,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use with - Tendermint Validators + Validators voting_power: type: string format: int64 @@ -35644,9 +38440,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -35760,7 +38554,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. commit: type: object properties: @@ -35842,9 +38636,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -35871,9 +38663,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -35895,9 +38685,7 @@ definitions: secp256k1: type: string format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators + title: PublicKey defines the keys available for use with Validators voting_power: type: string format: int64 @@ -38787,7 +41575,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. valset: type: array items: @@ -39714,7 +42502,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. valset: type: array items: @@ -42007,7 +44795,7 @@ definitions: type: string format: byte title: original proposer of the block - description: Header defines the structure of a Tendermint block header. + description: Header defines the structure of a block header. data: type: object properties: @@ -42265,8 +45053,8 @@ definitions: format: byte title: original proposer of the block description: >- - Header defines the structure of a Tendermint - block header. + Header defines the structure of a block + header. commit: type: object properties: @@ -42346,7 +45134,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -42370,7 +45158,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for - use with Tendermint Validators + use with Validators voting_power: type: string format: int64 @@ -42402,7 +45190,7 @@ definitions: format: byte title: >- PublicKey defines the keys available for use - with Tendermint Validators + with Validators voting_power: type: string format: int64 @@ -46767,204 +49555,632 @@ definitions: A URL/resource name that uniquely identifies the type of the serialized - protocol buffer message. This string must contain at least + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + ConsensusStateWithHeight defines a consensus state with an additional + height + + field. + ibc.core.client.v1.MsgCreateClientResponse: + type: object + description: MsgCreateClientResponse defines the Msg/CreateClient response type. + ibc.core.client.v1.MsgSubmitMisbehaviourResponse: + type: object + description: |- + MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response + type. + ibc.core.client.v1.MsgUpdateClientResponse: + type: object + description: MsgUpdateClientResponse defines the Msg/UpdateClient response type. + ibc.core.client.v1.MsgUpgradeClientResponse: + type: object + description: MsgUpgradeClientResponse defines the Msg/UpgradeClient response type. + ibc.core.client.v1.Params: + type: object + properties: + allowed_clients: + type: array + items: + type: string + description: allowed_clients defines the list of allowed client state types. + description: Params defines the set of IBC light client parameters. + ibc.core.client.v1.QueryClientParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + allowed_clients: + type: array + items: + type: string + description: allowed_clients defines the list of allowed client state types. + description: >- + QueryClientParamsResponse is the response type for the Query/ClientParams + RPC + + method. + ibc.core.client.v1.QueryClientStateResponse: + type: object + properties: + client_state: + title: client state associated with the request identifier + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + proof: + type: string + format: byte + title: merkle proof of existence + proof_height: + title: height at which the proof was retrieved + type: object + properties: + revision_number: + type: string + format: uint64 + title: the revision that the client is currently on + revision_height: + type: string + format: uint64 + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height while + keeping + + RevisionNumber the same. However some consensus algorithms may choose + to + + reset the height in certain conditions e.g. hard forks, state-machine + + breaking changes In these cases, the RevisionNumber is incremented so + that + + height continues to be monitonically increasing even as the + RevisionHeight + + gets reset + description: >- + QueryClientStateResponse is the response type for the Query/ClientState + RPC + + method. Besides the client state, it includes a proof and the height from + + which the proof was retrieved. + ibc.core.client.v1.QueryClientStatesResponse: + type: object + properties: + client_states: + type: array + items: + type: object + properties: + client_id: + type: string + title: client identifier + client_state: + title: client state + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in - one "/" character. The last segment of the URL's path must - represent + `path/google.protobuf.Duration`). The name should be in a + canonical form - the fully qualified name of the type (as in + (e.g., leading "." is not accepted). - `path/google.protobuf.Duration`). The name should be in a - canonical form - (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary all + types that they + expect it to use in the context of Any. However, for URLs + which use the - In practice, teams usually precompile into the binary all types - that they + scheme `http`, `https`, or no scheme, one can optionally set + up a type - expect it to use in the context of Any. However, for URLs which - use the + server that maps type URLs to message definitions as + follows: - scheme `http`, `https`, or no scheme, one can optionally set up a - type - server that maps type URLs to message definitions as follows: + * If no scheme is provided, `https` is assumed. + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - * If no scheme is provided, `https` is assumed. + Note: this functionality is not currently available in the + official - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + protobuf release, and it is not used for type URLs beginning + with - Note: this functionality is not currently available in the - official + type.googleapis.com. - protobuf release, and it is not used for type URLs beginning with - type.googleapis.com. + Schemes other than `http`, `https` (or the empty scheme) + might be + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - Schemes other than `http`, `https` (or the empty scheme) might be + URL that describes the type of the serialized message. - used with implementation specific semantics. - additionalProperties: {} - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - URL that describes the type of the serialized message. + Protobuf library provides support to pack/unpack Any values in + the form + of utility functions or additional generated methods of the Any + type. - Protobuf library provides support to pack/unpack Any values in the - form - of utility functions or additional generated methods of the Any type. + Example 1: Pack and unpack a message in C++. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Example 1: Pack and unpack a message in C++. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 2: Pack and unpack a message in Java. + Example 3: Pack and unpack a message in Python. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Example 3: Pack and unpack a message in Python. + Example 4: Pack and unpack a message in Go - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - Example 4: Pack and unpack a message in Go + The pack methods provided by protobuf library will by default + use - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - The pack methods provided by protobuf library will by default use + methods only use the fully qualified type name after the last + '/' - 'type.googleapis.com/full.type.name' as the type URL and the unpack + in the type URL, for example "foo.bar.com/x/y.z" will yield type - methods only use the fully qualified type name after the last '/' + name "y.z". - in the type URL, for example "foo.bar.com/x/y.z" will yield type - name "y.z". + JSON + ==== - JSON + The JSON representation of an `Any` value uses the regular - ==== + representation of the deserialized, embedded message, with an - The JSON representation of an `Any` value uses the regular + additional field `@type` which contains the type URL. Example: - representation of the deserialized, embedded message, with an + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - additional field `@type` which contains the type URL. Example: + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + If the embedded message type is well-known and has a custom JSON - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + representation, that representation will be embedded adding a + field - If the embedded message type is well-known and has a custom JSON + `value` which holds the custom JSON in addition to the `@type` - representation, that representation will be embedded adding a field + field. Example (for message [google.protobuf.Duration][]): - `value` which holds the custom JSON in addition to the `@type` + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + IdentifiedClientState defines a client state with an additional + client - field. Example (for message [google.protobuf.Duration][]): + identifier field. + description: list of stored ClientStates of the chain. + pagination: + title: pagination response + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } description: >- - ConsensusStateWithHeight defines a consensus state with an additional - height + QueryClientStatesResponse is the response type for the Query/ClientStates + RPC - field. - ibc.core.client.v1.MsgCreateClientResponse: - type: object - description: MsgCreateClientResponse defines the Msg/CreateClient response type. - ibc.core.client.v1.MsgSubmitMisbehaviourResponse: - type: object - description: |- - MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response - type. - ibc.core.client.v1.MsgUpdateClientResponse: - type: object - description: MsgUpdateClientResponse defines the Msg/UpdateClient response type. - ibc.core.client.v1.MsgUpgradeClientResponse: - type: object - description: MsgUpgradeClientResponse defines the Msg/UpgradeClient response type. - ibc.core.client.v1.Params: - type: object - properties: - allowed_clients: - type: array - items: - type: string - description: allowed_clients defines the list of allowed client state types. - description: Params defines the set of IBC light client parameters. - ibc.core.client.v1.QueryClientParamsResponse: + method. + ibc.core.client.v1.QueryClientStatusResponse: type: object properties: - params: - description: params defines the parameters of the module. - type: object - properties: - allowed_clients: - type: array - items: - type: string - description: allowed_clients defines the list of allowed client state types. + status: + type: string description: >- - QueryClientParamsResponse is the response type for the Query/ClientParams + QueryClientStatusResponse is the response type for the Query/ClientStatus RPC - method. - ibc.core.client.v1.QueryClientStateResponse: + method. It returns the current status of the IBC client. + ibc.core.client.v1.QueryConsensusStateResponse: type: object properties: - client_state: - title: client state associated with the request identifier + consensus_state: + title: >- + consensus state associated with the client identifier at the given + height type: object properties: '@type': @@ -47153,26 +50369,50 @@ definitions: RevisionHeight gets reset - description: >- - QueryClientStateResponse is the response type for the Query/ClientState - RPC - - method. Besides the client state, it includes a proof and the height from + title: >- + QueryConsensusStateResponse is the response type for the + Query/ConsensusState - which the proof was retrieved. - ibc.core.client.v1.QueryClientStatesResponse: + RPC method + ibc.core.client.v1.QueryConsensusStatesResponse: type: object properties: - client_states: + consensus_states: type: array items: type: object properties: - client_id: - type: string - title: client identifier - client_state: - title: client state + height: + title: consensus state height + type: object + properties: + revision_number: + type: string + format: uint64 + title: the revision that the client is currently on + revision_height: + type: string + format: uint64 + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height while + keeping + + RevisionNumber the same. However some consensus algorithms may + choose to + + reset the height in certain conditions e.g. hard forks, + state-machine + + breaking changes In these cases, the RevisionNumber is + incremented so that + + height continues to be monitonically increasing even as the + RevisionHeight + + gets reset + consensus_state: + title: consensus state type: object properties: '@type': @@ -47278,121 +50518,271 @@ definitions: Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + ConsensusStateWithHeight defines a consensus state with an + additional height + + field. + title: consensus states associated with the identifier + pagination: + title: pagination response + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: |- + QueryConsensusStatesResponse is the response type for the + Query/ConsensusStates RPC method + ibc.core.client.v1.QueryUpgradedClientStateResponse: + type: object + properties: + upgraded_client_state: + title: client state associated with the request identifier + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. - The pack methods provided by protobuf library will by default - use + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + Example 3: Pack and unpack a message in Python. - methods only use the fully qualified type name after the last - '/' + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - in the type URL, for example "foo.bar.com/x/y.z" will yield type + Example 4: Pack and unpack a message in Go - name "y.z". + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + The pack methods provided by protobuf library will by default use + 'type.googleapis.com/full.type.name' as the type URL and the unpack - JSON + methods only use the fully qualified type name after the last '/' - ==== + in the type URL, for example "foo.bar.com/x/y.z" will yield type - The JSON representation of an `Any` value uses the regular + name "y.z". - representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + JSON - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + ==== - If the embedded message type is well-known and has a custom JSON + The JSON representation of an `Any` value uses the regular - representation, that representation will be embedded adding a - field + representation of the deserialized, embedded message, with an - `value` which holds the custom JSON in addition to the `@type` + additional field `@type` which contains the type URL. Example: - field. Example (for message [google.protobuf.Duration][]): + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: >- - IdentifiedClientState defines a client state with an additional - client + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - identifier field. - description: list of stored ClientStates of the chain. - pagination: - title: pagination response - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + If the embedded message type is well-known and has a custom JSON - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + representation, that representation will be embedded adding a field - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - description: >- - QueryClientStatesResponse is the response type for the Query/ClientStates - RPC + `value` which holds the custom JSON in addition to the `@type` - method. - ibc.core.client.v1.QueryClientStatusResponse: - type: object - properties: - status: - type: string - description: >- - QueryClientStatusResponse is the response type for the Query/ClientStatus - RPC + field. Example (for message [google.protobuf.Duration][]): - method. It returns the current status of the IBC client. - ibc.core.client.v1.QueryConsensusStateResponse: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: |- + QueryUpgradedClientStateResponse is the response type for the + Query/UpgradedClientState RPC method. + ibc.core.client.v1.QueryUpgradedConsensusStateResponse: type: object properties: - consensus_state: - title: >- - consensus state associated with the client identifier at the given - height + upgraded_consensus_state: + title: Consensus state associated with the request identifier type: object properties: '@type': @@ -47549,12 +50939,252 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + description: |- + QueryUpgradedConsensusStateResponse is the response type for the + Query/UpgradedConsensusState RPC method. + ibc.core.commitment.v1.MerklePrefix: + type: object + properties: + key_prefix: + type: string + format: byte + title: |- + MerklePrefix is merkle path prefixed to the key. + The constructed key from the Path and the key will be append(Path.KeyPath, + append(Path.KeyPrefix, key...)) + ibc.core.connection.v1.ConnectionEnd: + type: object + properties: + client_id: + type: string + description: client associated with this connection. + versions: + type: array + items: + type: object + properties: + identifier: + type: string + title: unique version identifier + features: + type: array + items: + type: string + title: list of features compatible with the specified identifier + description: >- + Version defines the versioning scheme used to negotiate the IBC + verison in + + the connection handshake. + description: >- + IBC version which can be utilised to determine encodings or protocols + for + + channels or packets utilising this connection. + state: + description: current state of the connection end. + type: string + enum: + - STATE_UNINITIALIZED_UNSPECIFIED + - STATE_INIT + - STATE_TRYOPEN + - STATE_OPEN + default: STATE_UNINITIALIZED_UNSPECIFIED + counterparty: + description: counterparty chain associated with this connection. + type: object + properties: + client_id: + type: string + description: >- + identifies the client on the counterparty chain associated with a + given + + connection. + connection_id: + type: string + description: >- + identifies the connection end on the counterparty chain associated + with a + + given connection. + prefix: + description: commitment merkle prefix of the counterparty chain. + type: object + properties: + key_prefix: + type: string + format: byte + title: >- + MerklePrefix is merkle path prefixed to the key. + + The constructed key from the Path and the key will be + append(Path.KeyPath, + + append(Path.KeyPrefix, key...)) + delay_period: + type: string + format: uint64 + description: >- + delay period that must pass before a consensus state can be used for + + packet-verification NOTE: delay period logic is only implemented by + some + + clients. + description: |- + ConnectionEnd defines a stateful object on a chain connected to another + separate one. + NOTE: there must only be 2 defined ConnectionEnds to establish + a connection between two chains. + ibc.core.connection.v1.Counterparty: + type: object + properties: + client_id: + type: string + description: >- + identifies the client on the counterparty chain associated with a + given + + connection. + connection_id: + type: string + description: >- + identifies the connection end on the counterparty chain associated + with a + + given connection. + prefix: + description: commitment merkle prefix of the counterparty chain. + type: object + properties: + key_prefix: + type: string + format: byte + title: >- + MerklePrefix is merkle path prefixed to the key. + + The constructed key from the Path and the key will be + append(Path.KeyPath, + + append(Path.KeyPrefix, key...)) + description: >- + Counterparty defines the counterparty chain associated with a connection + end. + ibc.core.connection.v1.IdentifiedConnection: + type: object + properties: + id: + type: string + description: connection identifier. + client_id: + type: string + description: client associated with this connection. + versions: + type: array + items: + type: object + properties: + identifier: + type: string + title: unique version identifier + features: + type: array + items: + type: string + title: list of features compatible with the specified identifier + description: >- + Version defines the versioning scheme used to negotiate the IBC + verison in + + the connection handshake. + title: >- + IBC version which can be utilised to determine encodings or protocols + for + + channels or packets utilising this connection + state: + description: current state of the connection end. + type: string + enum: + - STATE_UNINITIALIZED_UNSPECIFIED + - STATE_INIT + - STATE_TRYOPEN + - STATE_OPEN + default: STATE_UNINITIALIZED_UNSPECIFIED + counterparty: + description: counterparty chain associated with this connection. + type: object + properties: + client_id: + type: string + description: >- + identifies the client on the counterparty chain associated with a + given + + connection. + connection_id: + type: string + description: >- + identifies the connection end on the counterparty chain associated + with a + + given connection. + prefix: + description: commitment merkle prefix of the counterparty chain. + type: object + properties: + key_prefix: + type: string + format: byte + title: >- + MerklePrefix is merkle path prefixed to the key. + + The constructed key from the Path and the key will be + append(Path.KeyPath, + + append(Path.KeyPrefix, key...)) + delay_period: + type: string + format: uint64 + description: delay period associated with this connection. + description: |- + IdentifiedConnection defines a connection with additional connection + identifier field. + ibc.core.connection.v1.MsgConnectionOpenAckResponse: + type: object + description: >- + MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response + type. + ibc.core.connection.v1.MsgConnectionOpenConfirmResponse: + type: object + description: |- + MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm + response type. + ibc.core.connection.v1.MsgConnectionOpenInitResponse: + type: object + description: |- + MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response + type. + ibc.core.connection.v1.MsgConnectionOpenTryResponse: + type: object + description: >- + MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response + type. + ibc.core.connection.v1.QueryClientConnectionsResponse: + type: object + properties: + connection_paths: + type: array + items: + type: string + description: slice of all the connection paths associated with a client. proof: type: string format: byte title: merkle proof of existence proof_height: - title: height at which the proof was retrieved + title: height at which the proof was generated type: object properties: revision_number: @@ -47581,256 +51211,226 @@ definitions: RevisionHeight gets reset - title: >- - QueryConsensusStateResponse is the response type for the - Query/ConsensusState - - RPC method - ibc.core.client.v1.QueryConsensusStatesResponse: + title: |- + QueryClientConnectionsResponse is the response type for the + Query/ClientConnections RPC method + ibc.core.connection.v1.QueryConnectionClientStateResponse: type: object properties: - consensus_states: - type: array - items: - type: object - properties: - height: - title: consensus state height - type: object - properties: - revision_number: - type: string - format: uint64 - title: the revision that the client is currently on - revision_height: - type: string - format: uint64 - title: the height within the given revision - description: >- - Normally the RevisionHeight is incremented at each height while - keeping - - RevisionNumber the same. However some consensus algorithms may - choose to - - reset the height in certain conditions e.g. hard forks, - state-machine - - breaking changes In these cases, the RevisionNumber is - incremented so that - - height continues to be monitonically increasing even as the - RevisionHeight - - gets reset - consensus_state: - title: consensus state - type: object - properties: - '@type': - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + identified_client_state: + title: client state associated with the channel + type: object + properties: + client_id: + type: string + title: client identifier + client_state: + title: client state + type: object + properties: + '@type': + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - protocol buffer message. This string must contain at least + protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + one "/" character. The last segment of the URL's path must + represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in a - canonical form + `path/google.protobuf.Duration`). The name should be in a + canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all - types that they + In practice, teams usually precompile into the binary all + types that they - expect it to use in the context of Any. However, for URLs - which use the + expect it to use in the context of Any. However, for URLs + which use the - scheme `http`, `https`, or no scheme, one can optionally set - up a type + scheme `http`, `https`, or no scheme, one can optionally set + up a type - server that maps type URLs to message definitions as - follows: + server that maps type URLs to message definitions as follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in the - official + Note: this functionality is not currently available in the + official - protobuf release, and it is not used for type URLs beginning - with + protobuf release, and it is not used for type URLs beginning + with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) - might be + Schemes other than `http`, `https` (or the empty scheme) might + be - used with implementation specific semantics. - additionalProperties: {} - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + used with implementation specific semantics. + additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - URL that describes the type of the serialized message. + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values in - the form + Protobuf library provides support to pack/unpack Any values in the + form - of utility functions or additional generated methods of the Any - type. + of utility functions or additional generated methods of the Any + type. - Example 1: Pack and unpack a message in C++. + Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { ... - if (any.UnpackTo(&foo)) { - ... - } + } - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default - use + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + The pack methods provided by protobuf library will by default use - methods only use the fully qualified type name after the last - '/' + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - in the type URL, for example "foo.bar.com/x/y.z" will yield type + methods only use the fully qualified type name after the last '/' - name "y.z". + in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - JSON - ==== + JSON - The JSON representation of an `Any` value uses the regular + ==== - representation of the deserialized, embedded message, with an + The JSON representation of an `Any` value uses the regular - additional field `@type` which contains the type URL. Example: + representation of the deserialized, embedded message, with an - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + additional field `@type` which contains the type URL. Example: - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - If the embedded message type is well-known and has a custom JSON + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - representation, that representation will be embedded adding a - field + If the embedded message type is well-known and has a custom JSON - `value` which holds the custom JSON in addition to the `@type` + representation, that representation will be embedded adding a + field - field. Example (for message [google.protobuf.Duration][]): + `value` which holds the custom JSON in addition to the `@type` - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: >- - ConsensusStateWithHeight defines a consensus state with an - additional height + field. Example (for message [google.protobuf.Duration][]): - field. - title: consensus states associated with the identifier - pagination: - title: pagination response + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: |- + IdentifiedClientState defines a client state with an additional client + identifier field. + proof: + type: string + format: byte + title: merkle proof of existence + proof_height: + title: height at which the proof was retrieved type: object properties: - next_key: + revision_number: type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: + format: uint64 + title: the revision that the client is currently on + revision_height: type: string format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height while + keeping - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + RevisionNumber the same. However some consensus algorithms may choose + to - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } + reset the height in certain conditions e.g. hard forks, state-machine + + breaking changes In these cases, the RevisionNumber is incremented so + that + + height continues to be monitonically increasing even as the + RevisionHeight + + gets reset title: |- - QueryConsensusStatesResponse is the response type for the - Query/ConsensusStates RPC method - ibc.core.client.v1.QueryUpgradedClientStateResponse: + QueryConnectionClientStateResponse is the response type for the + Query/ConnectionClientState RPC method + ibc.core.connection.v1.QueryConnectionConsensusStateResponse: type: object properties: - upgraded_client_state: - title: client state associated with the request identifier + consensus_state: + title: consensus state associated with the channel type: object properties: '@type': @@ -47987,1077 +51587,1185 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - description: |- - QueryUpgradedClientStateResponse is the response type for the - Query/UpgradedClientState RPC method. - ibc.core.client.v1.QueryUpgradedConsensusStateResponse: - type: object - properties: - upgraded_consensus_state: - title: Consensus state associated with the request identifier + client_id: + type: string + title: client ID associated with the consensus state + proof: + type: string + format: byte + title: merkle proof of existence + proof_height: + title: height at which the proof was retrieved type: object properties: - '@type': + revision_number: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - + format: uint64 + title: the revision that the client is currently on + revision_height: + type: string + format: uint64 + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height while + keeping - In practice, teams usually precompile into the binary all types - that they + RevisionNumber the same. However some consensus algorithms may choose + to - expect it to use in the context of Any. However, for URLs which - use the + reset the height in certain conditions e.g. hard forks, state-machine - scheme `http`, `https`, or no scheme, one can optionally set up a - type + breaking changes In these cases, the RevisionNumber is incremented so + that - server that maps type URLs to message definitions as follows: + height continues to be monitonically increasing even as the + RevisionHeight + gets reset + title: |- + QueryConnectionConsensusStateResponse is the response type for the + Query/ConnectionConsensusState RPC method + ibc.core.connection.v1.QueryConnectionResponse: + type: object + properties: + connection: + title: connection associated with the request identifier + type: object + properties: + client_id: + type: string + description: client associated with this connection. + versions: + type: array + items: + type: object + properties: + identifier: + type: string + title: unique version identifier + features: + type: array + items: + type: string + title: list of features compatible with the specified identifier + description: >- + Version defines the versioning scheme used to negotiate the IBC + verison in - * If no scheme is provided, `https` is assumed. + the connection handshake. + description: >- + IBC version which can be utilised to determine encodings or + protocols for - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + channels or packets utilising this connection. + state: + description: current state of the connection end. + type: string + enum: + - STATE_UNINITIALIZED_UNSPECIFIED + - STATE_INIT + - STATE_TRYOPEN + - STATE_OPEN + default: STATE_UNINITIALIZED_UNSPECIFIED + counterparty: + description: counterparty chain associated with this connection. + type: object + properties: + client_id: + type: string + description: >- + identifies the client on the counterparty chain associated + with a given - Note: this functionality is not currently available in the - official + connection. + connection_id: + type: string + description: >- + identifies the connection end on the counterparty chain + associated with a - protobuf release, and it is not used for type URLs beginning with + given connection. + prefix: + description: commitment merkle prefix of the counterparty chain. + type: object + properties: + key_prefix: + type: string + format: byte + title: >- + MerklePrefix is merkle path prefixed to the key. - type.googleapis.com. + The constructed key from the Path and the key will be + append(Path.KeyPath, + append(Path.KeyPrefix, key...)) + delay_period: + type: string + format: uint64 + description: >- + delay period that must pass before a consensus state can be used + for - Schemes other than `http`, `https` (or the empty scheme) might be + packet-verification NOTE: delay period logic is only implemented + by some - used with implementation specific semantics. - additionalProperties: {} + clients. description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + ConnectionEnd defines a stateful object on a chain connected to + another - Example 2: Pack and unpack a message in Java. + separate one. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + NOTE: there must only be 2 defined ConnectionEnds to establish - Example 3: Pack and unpack a message in Python. + a connection between two chains. + proof: + type: string + format: byte + title: merkle proof of existence + proof_height: + title: height at which the proof was retrieved + type: object + properties: + revision_number: + type: string + format: uint64 + title: the revision that the client is currently on + revision_height: + type: string + format: uint64 + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height while + keeping - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + RevisionNumber the same. However some consensus algorithms may choose + to - Example 4: Pack and unpack a message in Go + reset the height in certain conditions e.g. hard forks, state-machine - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + breaking changes In these cases, the RevisionNumber is incremented so + that - The pack methods provided by protobuf library will by default use + height continues to be monitonically increasing even as the + RevisionHeight - 'type.googleapis.com/full.type.name' as the type URL and the unpack + gets reset + description: >- + QueryConnectionResponse is the response type for the Query/Connection RPC - methods only use the fully qualified type name after the last '/' + method. Besides the connection end, it includes a proof and the height + from - in the type URL, for example "foo.bar.com/x/y.z" will yield type + which the proof was retrieved. + ibc.core.connection.v1.QueryConnectionsResponse: + type: object + properties: + connections: + type: array + items: + type: object + properties: + id: + type: string + description: connection identifier. + client_id: + type: string + description: client associated with this connection. + versions: + type: array + items: + type: object + properties: + identifier: + type: string + title: unique version identifier + features: + type: array + items: + type: string + title: list of features compatible with the specified identifier + description: >- + Version defines the versioning scheme used to negotiate the + IBC verison in - name "y.z". + the connection handshake. + title: >- + IBC version which can be utilised to determine encodings or + protocols for + channels or packets utilising this connection + state: + description: current state of the connection end. + type: string + enum: + - STATE_UNINITIALIZED_UNSPECIFIED + - STATE_INIT + - STATE_TRYOPEN + - STATE_OPEN + default: STATE_UNINITIALIZED_UNSPECIFIED + counterparty: + description: counterparty chain associated with this connection. + type: object + properties: + client_id: + type: string + description: >- + identifies the client on the counterparty chain associated + with a given + connection. + connection_id: + type: string + description: >- + identifies the connection end on the counterparty chain + associated with a - JSON + given connection. + prefix: + description: commitment merkle prefix of the counterparty chain. + type: object + properties: + key_prefix: + type: string + format: byte + title: >- + MerklePrefix is merkle path prefixed to the key. - ==== + The constructed key from the Path and the key will be + append(Path.KeyPath, - The JSON representation of an `Any` value uses the regular + append(Path.KeyPrefix, key...)) + delay_period: + type: string + format: uint64 + description: delay period associated with this connection. + description: |- + IdentifiedConnection defines a connection with additional connection + identifier field. + description: list of stored connections of the chain. + pagination: + title: pagination response + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - representation of the deserialized, embedded message, with an + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - additional field `@type` which contains the type URL. Example: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + height: + title: query block height + type: object + properties: + revision_number: + type: string + format: uint64 + title: the revision that the client is currently on + revision_height: + type: string + format: uint64 + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height while + keeping - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + RevisionNumber the same. However some consensus algorithms may choose + to - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + reset the height in certain conditions e.g. hard forks, state-machine - If the embedded message type is well-known and has a custom JSON + breaking changes In these cases, the RevisionNumber is incremented so + that - representation, that representation will be embedded adding a field + height continues to be monitonically increasing even as the + RevisionHeight - `value` which holds the custom JSON in addition to the `@type` + gets reset + description: >- + QueryConnectionsResponse is the response type for the Query/Connections + RPC - field. Example (for message [google.protobuf.Duration][]): + method. + ibc.core.connection.v1.State: + type: string + enum: + - STATE_UNINITIALIZED_UNSPECIFIED + - STATE_INIT + - STATE_TRYOPEN + - STATE_OPEN + default: STATE_UNINITIALIZED_UNSPECIFIED + description: |- + State defines if a connection is in one of the following states: + INIT, TRYOPEN, OPEN or UNINITIALIZED. - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } + - STATE_UNINITIALIZED_UNSPECIFIED: Default State + - STATE_INIT: A connection end has just started the opening handshake. + - STATE_TRYOPEN: A connection end has acknowledged the handshake step on the counterparty + chain. + - STATE_OPEN: A connection end has completed the handshake. + ibc.core.connection.v1.Version: + type: object + properties: + identifier: + type: string + title: unique version identifier + features: + type: array + items: + type: string + title: list of features compatible with the specified identifier description: |- - QueryUpgradedConsensusStateResponse is the response type for the - Query/UpgradedConsensusState RPC method. - ibc.core.commitment.v1.MerklePrefix: + Version defines the versioning scheme used to negotiate the IBC verison in + the connection handshake. + market.portal.Params: + type: object + description: Params defines the parameters for the module. + market.portal.QueryParamsResponse: type: object properties: - key_prefix: + params: + description: params holds all the parameters of this module. + type: object + description: QueryParamsResponse is response type for the Query/Params RPC method. + pendulumlabs.market.market.Burnings: + type: object + properties: + denom: type: string - format: byte - title: |- - MerklePrefix is merkle path prefixed to the key. - The constructed key from the Path and the key will be append(Path.KeyPath, - append(Path.KeyPrefix, key...)) - ibc.core.connection.v1.ConnectionEnd: + amount: + type: string + pendulumlabs.market.market.Drop: type: object properties: - client_id: + uid: type: string - description: client associated with this connection. - versions: + format: uint64 + owner: + type: string + pair: + type: string + drops: + type: string + product: + type: string + active: + type: boolean + pendulumlabs.market.market.Leader: + type: object + properties: + address: + type: string + drops: + type: string + pendulumlabs.market.market.Member: + type: object + properties: + pair: + type: string + denomA: + type: string + denomB: + type: string + balance: + type: string + previous: + type: string + limit: + type: string + format: uint64 + stop: + type: string + format: uint64 + pendulumlabs.market.market.MsgCancelOrderResponse: + type: object + pendulumlabs.market.market.MsgCreateDropResponse: + type: object + pendulumlabs.market.market.MsgCreateOrderResponse: + type: object + properties: + uid: + type: string + format: uint64 + pendulumlabs.market.market.MsgCreatePoolResponse: + type: object + pendulumlabs.market.market.MsgMarketOrderResponse: + type: object + properties: + amountBid: + type: string + amountAsk: + type: string + slippage: + type: string + pendulumlabs.market.market.MsgRedeemDropResponse: + type: object + pendulumlabs.market.market.Order: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: type: array items: - type: object - properties: - identifier: - type: string - title: unique version identifier - features: - type: array - items: - type: string - title: list of features compatible with the specified identifier - description: >- - Version defines the versioning scheme used to negotiate the IBC - verison in - - the connection handshake. - description: >- - IBC version which can be utilised to determine encodings or protocols - for - - channels or packets utilising this connection. - state: - description: current state of the connection end. + type: string + prev: type: string - enum: - - STATE_UNINITIALIZED_UNSPECIFIED - - STATE_INIT - - STATE_TRYOPEN - - STATE_OPEN - default: STATE_UNINITIALIZED_UNSPECIFIED - counterparty: - description: counterparty chain associated with this connection. + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pendulumlabs.market.market.OrderResponse: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pendulumlabs.market.market.Orders: + type: object + properties: + uids: + type: array + items: + type: string + format: uint64 + pendulumlabs.market.market.Params: + type: object + properties: + earn_rates: + type: string + title: |- + leader earnings rates + 1,2,3 Comma separated, no space + burn_rate: + type: string + title: pool burning rate + burn_coin: + type: string + title: burn coin + market_fee: + type: string + title: 'market_fee (parameter / 10000), 9999 representing as 99.99%' + description: Params defines the parameters for the module. + pendulumlabs.market.market.Pool: + type: object + properties: + pair: + type: string + denom1: + type: string + denom2: + type: string + volume1: type: object properties: - client_id: + denom: type: string - description: >- - identifies the client on the counterparty chain associated with a - given - - connection. - connection_id: + amount: type: string - description: >- - identifies the connection end on the counterparty chain associated - with a - - given connection. - prefix: - description: commitment merkle prefix of the counterparty chain. - type: object - properties: - key_prefix: - type: string - format: byte - title: >- - MerklePrefix is merkle path prefixed to the key. - - The constructed key from the Path and the key will be - append(Path.KeyPath, - - append(Path.KeyPrefix, key...)) - delay_period: + volume2: + type: object + properties: + denom: + type: string + amount: + type: string + leaders: + type: array + items: + type: object + properties: + address: + type: string + drops: + type: string + drops: + type: string + history: type: string format: uint64 - description: >- - delay period that must pass before a consensus state can be used for - - packet-verification NOTE: delay period logic is only implemented by - some - - clients. - description: |- - ConnectionEnd defines a stateful object on a chain connected to another - separate one. - NOTE: there must only be 2 defined ConnectionEnds to establish - a connection between two chains. - ibc.core.connection.v1.Counterparty: + pendulumlabs.market.market.QueryAllBurningsResponse: type: object properties: - client_id: - type: string - description: >- - identifies the client on the counterparty chain associated with a - given - - connection. - connection_id: - type: string - description: >- - identifies the connection end on the counterparty chain associated - with a - - given connection. - prefix: - description: commitment merkle prefix of the counterparty chain. + burnings: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + pagination: type: object properties: - key_prefix: + next_key: type: string format: byte - title: >- - MerklePrefix is merkle path prefixed to the key. + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - The constructed key from the Path and the key will be - append(Path.KeyPath, + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - append(Path.KeyPrefix, key...)) - description: >- - Counterparty defines the counterparty chain associated with a connection - end. - ibc.core.connection.v1.IdentifiedConnection: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryAllMemberResponse: type: object properties: - id: - type: string - description: connection identifier. - client_id: - type: string - description: client associated with this connection. - versions: + member: type: array items: type: object properties: - identifier: + pair: type: string - title: unique version identifier - features: - type: array - items: - type: string - title: list of features compatible with the specified identifier - description: >- - Version defines the versioning scheme used to negotiate the IBC - verison in - - the connection handshake. - title: >- - IBC version which can be utilised to determine encodings or protocols - for - - channels or packets utilising this connection - state: - description: current state of the connection end. - type: string - enum: - - STATE_UNINITIALIZED_UNSPECIFIED - - STATE_INIT - - STATE_TRYOPEN - - STATE_OPEN - default: STATE_UNINITIALIZED_UNSPECIFIED - counterparty: - description: counterparty chain associated with this connection. + denomA: + type: string + denomB: + type: string + balance: + type: string + previous: + type: string + limit: + type: string + format: uint64 + stop: + type: string + format: uint64 + pagination: type: object properties: - client_id: + next_key: type: string - description: >- - identifies the client on the counterparty chain associated with a - given - - connection. - connection_id: + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string - description: >- - identifies the connection end on the counterparty chain associated - with a - - given connection. - prefix: - description: commitment merkle prefix of the counterparty chain. - type: object - properties: - key_prefix: - type: string - format: byte + format: uint64 title: >- - MerklePrefix is merkle path prefixed to the key. + total is total number of results available if + PageRequest.count_total - The constructed key from the Path and the key will be - append(Path.KeyPath, + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - append(Path.KeyPrefix, key...)) - delay_period: - type: string - format: uint64 - description: delay period associated with this connection. - description: |- - IdentifiedConnection defines a connection with additional connection - identifier field. - ibc.core.connection.v1.MsgConnectionOpenAckResponse: - type: object - description: >- - MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response - type. - ibc.core.connection.v1.MsgConnectionOpenConfirmResponse: - type: object - description: |- - MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm - response type. - ibc.core.connection.v1.MsgConnectionOpenInitResponse: - type: object - description: |- - MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response - type. - ibc.core.connection.v1.MsgConnectionOpenTryResponse: - type: object - description: >- - MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response - type. - ibc.core.connection.v1.QueryClientConnectionsResponse: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryAllPoolResponse: type: object properties: - connection_paths: + pool: type: array items: - type: string - description: slice of all the connection paths associated with a client. - proof: - type: string - format: byte - title: merkle proof of existence - proof_height: - title: height at which the proof was generated + type: object + properties: + pair: + type: string + denom1: + type: string + denom2: + type: string + volume1: + type: object + properties: + denom: + type: string + amount: + type: string + volume2: + type: object + properties: + denom: + type: string + amount: + type: string + leaders: + type: array + items: + type: object + properties: + address: + type: string + drops: + type: string + drops: + type: string + history: + type: string + format: uint64 + pagination: type: object properties: - revision_number: + next_key: type: string - format: uint64 - title: the revision that the client is currently on - revision_height: + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string format: uint64 - title: the height within the given revision - description: >- - Normally the RevisionHeight is incremented at each height while - keeping - - RevisionNumber the same. However some consensus algorithms may choose - to - - reset the height in certain conditions e.g. hard forks, state-machine - - breaking changes In these cases, the RevisionNumber is incremented so - that + title: >- + total is total number of results available if + PageRequest.count_total - height continues to be monitonically increasing even as the - RevisionHeight + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - gets reset - title: |- - QueryClientConnectionsResponse is the response type for the - Query/ClientConnections RPC method - ibc.core.connection.v1.QueryConnectionClientStateResponse: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryAllVolumeResponse: type: object properties: - identified_client_state: - title: client state associated with the channel - type: object - properties: - client_id: - type: string - title: client identifier - client_state: - title: client state - type: object - properties: - '@type': - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all - types that they - - expect it to use in the context of Any. However, for URLs - which use the - - scheme `http`, `https`, or no scheme, one can optionally set - up a type - - server that maps type URLs to message definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning - with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) might - be - - used with implementation specific semantics. - additionalProperties: {} - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any - type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with an - - additional field `@type` which contains the type URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom JSON - - representation, that representation will be embedded adding a - field - - `value` which holds the custom JSON in addition to the `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: |- - IdentifiedClientState defines a client state with an additional client - identifier field. - proof: - type: string - format: byte - title: merkle proof of existence - proof_height: - title: height at which the proof was retrieved + volumes: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + pagination: type: object properties: - revision_number: + next_key: type: string - format: uint64 - title: the revision that the client is currently on - revision_height: + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string format: uint64 - title: the height within the given revision - description: >- - Normally the RevisionHeight is incremented at each height while - keeping - - RevisionNumber the same. However some consensus algorithms may choose - to - - reset the height in certain conditions e.g. hard forks, state-machine - - breaking changes In these cases, the RevisionNumber is incremented so - that + title: >- + total is total number of results available if + PageRequest.count_total - height continues to be monitonically increasing even as the - RevisionHeight + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - gets reset - title: |- - QueryConnectionClientStateResponse is the response type for the - Query/ConnectionClientState RPC method - ibc.core.connection.v1.QueryConnectionConsensusStateResponse: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryBookResponse: type: object properties: - consensus_state: - title: consensus state associated with the channel + book: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pagination: type: object properties: - '@type': + next_key: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all types - that they - - expect it to use in the context of Any. However, for URLs which - use the - - scheme `http`, `https`, or no scheme, one can optionally set up a - type - - server that maps type URLs to message definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) might be - - used with implementation specific semantics. - additionalProperties: {} - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default use - - 'type.googleapis.com/full.type.name' as the type URL and the unpack - - methods only use the fully qualified type name after the last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with an - - additional field `@type` which contains the type URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom JSON - - representation, that representation will be embedded adding a field - - `value` which holds the custom JSON in addition to the `@type` + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - field. Example (for message [google.protobuf.Duration][]): + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - client_id: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryBookendsResponse: + type: object + properties: + coinA: + type: string + coinB: + type: string + orderType: type: string - title: client ID associated with the consensus state - proof: + rate: + type: array + items: + type: string + prev: type: string - format: byte - title: merkle proof of existence - proof_height: - title: height at which the proof was retrieved + format: uint64 + next: + type: string + format: uint64 + pendulumlabs.market.market.QueryBurnedResponse: + type: object + properties: + denom: + type: string + amount: + type: string + pendulumlabs.market.market.QueryDropAmountsResponse: + type: object + properties: + denom1: + type: string + denom2: + type: string + amount1: + type: string + amount2: + type: string + pendulumlabs.market.market.QueryDropCoinResponse: + type: object + properties: + drops: + type: string + amountB: + type: string + pendulumlabs.market.market.QueryDropPairsResponse: + type: object + properties: + pairs: + type: array + items: + type: string + pendulumlabs.market.market.QueryDropResponse: + type: object + properties: + drop: type: object properties: - revision_number: + uid: type: string format: uint64 - title: the revision that the client is currently on - revision_height: + owner: + type: string + pair: + type: string + drops: + type: string + product: + type: string + active: + type: boolean + pendulumlabs.market.market.QueryDropsResponse: + type: object + properties: + drops: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + pair: + type: string + drops: + type: string + product: + type: string + active: + type: boolean + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string format: uint64 - title: the height within the given revision - description: >- - Normally the RevisionHeight is incremented at each height while - keeping - - RevisionNumber the same. However some consensus algorithms may choose - to - - reset the height in certain conditions e.g. hard forks, state-machine - - breaking changes In these cases, the RevisionNumber is incremented so - that + title: >- + total is total number of results available if + PageRequest.count_total - height continues to be monitonically increasing even as the - RevisionHeight + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - gets reset - title: |- - QueryConnectionConsensusStateResponse is the response type for the - Query/ConnectionConsensusState RPC method - ibc.core.connection.v1.QueryConnectionResponse: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryGetBurningsResponse: type: object properties: - connection: - title: connection associated with the request identifier + burnings: type: object properties: - client_id: + denom: type: string - description: client associated with this connection. - versions: + amount: + type: string + pendulumlabs.market.market.QueryGetMemberResponse: + type: object + properties: + member: + type: object + properties: + pair: + type: string + denomA: + type: string + denomB: + type: string + balance: + type: string + previous: + type: string + limit: + type: string + format: uint64 + stop: + type: string + format: uint64 + pendulumlabs.market.market.QueryGetPoolResponse: + type: object + properties: + pool: + type: object + properties: + pair: + type: string + denom1: + type: string + denom2: + type: string + volume1: + type: object + properties: + denom: + type: string + amount: + type: string + volume2: + type: object + properties: + denom: + type: string + amount: + type: string + leaders: type: array items: type: object properties: - identifier: + address: type: string - title: unique version identifier - features: - type: array - items: - type: string - title: list of features compatible with the specified identifier - description: >- - Version defines the versioning scheme used to negotiate the IBC - verison in - - the connection handshake. - description: >- - IBC version which can be utilised to determine encodings or - protocols for - - channels or packets utilising this connection. - state: - description: current state of the connection end. + drops: + type: string + drops: type: string - enum: - - STATE_UNINITIALIZED_UNSPECIFIED - - STATE_INIT - - STATE_TRYOPEN - - STATE_OPEN - default: STATE_UNINITIALIZED_UNSPECIFIED - counterparty: - description: counterparty chain associated with this connection. - type: object - properties: - client_id: - type: string - description: >- - identifies the client on the counterparty chain associated - with a given - - connection. - connection_id: + history: + type: string + format: uint64 + pendulumlabs.market.market.QueryHistoryResponse: + type: object + properties: + history: + type: array + items: + type: object + properties: + uid: + type: string + format: uint64 + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: type: string - description: >- - identifies the connection end on the counterparty chain - associated with a - - given connection. - prefix: - description: commitment merkle prefix of the counterparty chain. - type: object - properties: - key_prefix: - type: string - format: byte - title: >- - MerklePrefix is merkle path prefixed to the key. - - The constructed key from the Path and the key will be - append(Path.KeyPath, - - append(Path.KeyPrefix, key...)) - delay_period: + prev: + type: string + format: uint64 + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: type: string format: uint64 - description: >- - delay period that must pass before a consensus state can be used - for - - packet-verification NOTE: delay period logic is only implemented - by some + title: >- + total is total number of results available if + PageRequest.count_total - clients. - description: >- - ConnectionEnd defines a stateful object on a chain connected to - another + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - separate one. + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryOrderOwnerUidsResponse: + type: object + properties: + orders: + type: object + properties: + uids: + type: array + items: + type: string + format: uint64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - NOTE: there must only be 2 defined ConnectionEnds to establish + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - a connection between two chains. - proof: - type: string - format: byte - title: merkle proof of existence - proof_height: - title: height at which the proof was retrieved + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + pendulumlabs.market.market.QueryOrderResponse: + type: object + properties: + order: type: object properties: - revision_number: + uid: type: string format: uint64 - title: the revision that the client is currently on - revision_height: + owner: + type: string + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: + type: array + items: + type: string + prev: type: string format: uint64 - title: the height within the given revision - description: >- - Normally the RevisionHeight is incremented at each height while - keeping - - RevisionNumber the same. However some consensus algorithms may choose - to - - reset the height in certain conditions e.g. hard forks, state-machine - - breaking changes In these cases, the RevisionNumber is incremented so - that - - height continues to be monitonically increasing even as the - RevisionHeight - - gets reset - description: >- - QueryConnectionResponse is the response type for the Query/Connection RPC - - method. Besides the connection end, it includes a proof and the height - from - - which the proof was retrieved. - ibc.core.connection.v1.QueryConnectionsResponse: + next: + type: string + format: uint64 + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 + pendulumlabs.market.market.QueryOrdersResponse: type: object properties: - connections: + orders: type: array items: type: object properties: - id: + uid: type: string - description: connection identifier. - client_id: + format: uint64 + owner: type: string - description: client associated with this connection. - versions: + status: + type: string + orderType: + type: string + denomAsk: + type: string + denomBid: + type: string + amount: + type: string + rate: type: array items: - type: object - properties: - identifier: - type: string - title: unique version identifier - features: - type: array - items: - type: string - title: list of features compatible with the specified identifier - description: >- - Version defines the versioning scheme used to negotiate the - IBC verison in - - the connection handshake. - title: >- - IBC version which can be utilised to determine encodings or - protocols for - - channels or packets utilising this connection - state: - description: current state of the connection end. + type: string + prev: type: string - enum: - - STATE_UNINITIALIZED_UNSPECIFIED - - STATE_INIT - - STATE_TRYOPEN - - STATE_OPEN - default: STATE_UNINITIALIZED_UNSPECIFIED - counterparty: - description: counterparty chain associated with this connection. - type: object - properties: - client_id: - type: string - description: >- - identifies the client on the counterparty chain associated - with a given - - connection. - connection_id: - type: string - description: >- - identifies the connection end on the counterparty chain - associated with a - - given connection. - prefix: - description: commitment merkle prefix of the counterparty chain. - type: object - properties: - key_prefix: - type: string - format: byte - title: >- - MerklePrefix is merkle path prefixed to the key. - - The constructed key from the Path and the key will be - append(Path.KeyPath, - - append(Path.KeyPrefix, key...)) - delay_period: + format: uint64 + next: type: string format: uint64 - description: delay period associated with this connection. - description: |- - IdentifiedConnection defines a connection with additional connection - identifier field. - description: list of stored connections of the chain. + beg_time: + type: string + format: int64 + upd_time: + type: string + format: int64 pagination: - title: pagination response type: object properties: next_key: @@ -49082,80 +52790,47 @@ definitions: repeated Bar results = 1; PageResponse page = 2; } - height: - title: query block height + pendulumlabs.market.market.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. type: object properties: - revision_number: + earn_rates: type: string - format: uint64 - title: the revision that the client is currently on - revision_height: + title: |- + leader earnings rates + 1,2,3 Comma separated, no space + burn_rate: type: string - format: uint64 - title: the height within the given revision - description: >- - Normally the RevisionHeight is incremented at each height while - keeping - - RevisionNumber the same. However some consensus algorithms may choose - to - - reset the height in certain conditions e.g. hard forks, state-machine - - breaking changes In these cases, the RevisionNumber is incremented so - that - - height continues to be monitonically increasing even as the - RevisionHeight - - gets reset - description: >- - QueryConnectionsResponse is the response type for the Query/Connections - RPC - - method. - ibc.core.connection.v1.State: - type: string - enum: - - STATE_UNINITIALIZED_UNSPECIFIED - - STATE_INIT - - STATE_TRYOPEN - - STATE_OPEN - default: STATE_UNINITIALIZED_UNSPECIFIED - description: |- - State defines if a connection is in one of the following states: - INIT, TRYOPEN, OPEN or UNINITIALIZED. - - - STATE_UNINITIALIZED_UNSPECIFIED: Default State - - STATE_INIT: A connection end has just started the opening handshake. - - STATE_TRYOPEN: A connection end has acknowledged the handshake step on the counterparty - chain. - - STATE_OPEN: A connection end has completed the handshake. - ibc.core.connection.v1.Version: + title: pool burning rate + burn_coin: + type: string + title: burn coin + market_fee: + type: string + title: 'market_fee (parameter / 10000), 9999 representing as 99.99%' + description: QueryParamsResponse is response type for the Query/Params RPC method. + pendulumlabs.market.market.QueryQuoteResponse: type: object properties: - identifier: + denom: type: string - title: unique version identifier - features: - type: array - items: - type: string - title: list of features compatible with the specified identifier - description: |- - Version defines the versioning scheme used to negotiate the IBC verison in - the connection handshake. - market.market.Params: + amount: + type: string + pendulumlabs.market.market.QueryVolumeResponse: type: object - description: Params defines the parameters for the module. - market.market.QueryParamsResponse: + properties: + amount: + type: string + pendulumlabs.market.market.Volume: type: object properties: - params: - description: params holds all the parameters of this module. - type: object - description: QueryParamsResponse is response type for the Query/Params RPC method. + denom: + type: string + amount: + type: string tendermint.spn.monitoringp.ConnectionChannelID: type: object properties: diff --git a/go.mod b/go.mod index ec5e39c6..a15a6412 100644 --- a/go.mod +++ b/go.mod @@ -10,9 +10,9 @@ require ( github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/ignite/cli v0.23.0 - github.com/spf13/cast v1.5.0 - github.com/spf13/cobra v1.6.1 - github.com/stretchr/testify v1.8.1 + github.com/spf13/cast v1.5.1 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.3 github.com/tendermint/spn v0.2.1-0.20220708132853-26a17f03c072 github.com/tendermint/tendermint v0.34.27 github.com/tendermint/tm-db v0.6.7 @@ -111,7 +111,7 @@ require ( github.com/iancoleman/strcase v0.2.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/improbable-eng/grpc-web v0.14.1 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/jpillora/ansi v1.0.2 // indirect @@ -127,6 +127,7 @@ require ( github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/linxGnu/grocksdb v1.7.10 // indirect github.com/magiconair/properties v1.8.6 // indirect + github.com/matryer/is v1.4.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-zglob v0.0.3 // indirect @@ -137,6 +138,8 @@ require ( github.com/moby/sys/mount v0.3.1 // indirect github.com/moby/sys/mountinfo v0.6.0 // indirect github.com/mtibben/percent v0.2.1 // indirect + github.com/onsi/ginkgo v1.16.4 // indirect + github.com/onsi/gomega v1.20.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/opencontainers/runc v1.1.3 // indirect diff --git a/go.sum b/go.sum index 81aed7e5..3fa8a093 100644 --- a/go.sum +++ b/go.sum @@ -539,7 +539,7 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -625,6 +625,7 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= @@ -845,8 +846,8 @@ github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/C github.com/improbable-eng/grpc-web v0.14.1 h1:NrN4PY71A6tAz2sKDvC5JCauENWp0ykG8Oq1H3cpFvw= github.com/improbable-eng/grpc-web v0.14.1/go.mod h1:zEjGHa8DAlkoOXmswrNvhUGEYQA9UI7DhrGeHR1DMGU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= @@ -987,8 +988,9 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -1093,8 +1095,9 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/neilotoole/errgroup v0.1.6/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -1111,8 +1114,9 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1122,8 +1126,9 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1340,14 +1345,14 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1372,7 +1377,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1384,8 +1388,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1765,6 +1769,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1892,6 +1897,7 @@ golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= diff --git a/proto/market/burnings.proto b/proto/market/burnings.proto index 62dfda77..886bd8ce 100644 --- a/proto/market/burnings.proto +++ b/proto/market/burnings.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; import "gogoproto/gogo.proto"; -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; option (gogoproto.goproto_stringer_all) = true; option (gogoproto.goproto_getters_all) = false; diff --git a/proto/market/drop.proto b/proto/market/drop.proto index d60b0176..5d4205cc 100644 --- a/proto/market/drop.proto +++ b/proto/market/drop.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; import "gogoproto/gogo.proto"; -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; option (gogoproto.goproto_stringer_all) = true; option (gogoproto.goproto_getters_all) = false; diff --git a/proto/market/genesis.proto b/proto/market/genesis.proto index 7e91b625..330aa058 100644 --- a/proto/market/genesis.proto +++ b/proto/market/genesis.proto @@ -10,7 +10,7 @@ import "market/burnings.proto"; import "market/order.proto"; // this line is used by starport scaffolding # genesis/proto/import -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; // GenesisState defines the market module's genesis state. message GenesisState { diff --git a/proto/market/member.proto b/proto/market/member.proto index bd8462d7..acc84648 100644 --- a/proto/market/member.proto +++ b/proto/market/member.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; import "gogoproto/gogo.proto"; -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; option (gogoproto.goproto_stringer_all) = true; option (gogoproto.goproto_getters_all) = false; diff --git a/proto/market/order.proto b/proto/market/order.proto index 942bedbe..851f1e77 100644 --- a/proto/market/order.proto +++ b/proto/market/order.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; import "gogoproto/gogo.proto"; -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; option (gogoproto.goproto_stringer_all) = true; option (gogoproto.goproto_getters_all) = false; diff --git a/proto/market/params.proto b/proto/market/params.proto index 2337171a..a87963fa 100644 --- a/proto/market/params.proto +++ b/proto/market/params.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; import "gogoproto/gogo.proto"; -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; // Params defines the parameters for the module. message Params { diff --git a/proto/market/pool.proto b/proto/market/pool.proto index 8c828462..a7cb8012 100644 --- a/proto/market/pool.proto +++ b/proto/market/pool.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; import "gogoproto/gogo.proto"; -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; option (gogoproto.goproto_stringer_all) = true; option (gogoproto.goproto_getters_all) = false; diff --git a/proto/market/query.proto b/proto/market/query.proto index 2040a183..d22fb8a1 100644 --- a/proto/market/query.proto +++ b/proto/market/query.proto @@ -12,7 +12,7 @@ import "market/burnings.proto"; import "market/order.proto"; // this line is used by starport scaffolding # 1 -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; // Query defines the gRPC querier service. service Query { diff --git a/proto/market/tx.proto b/proto/market/tx.proto index b78563d1..ac0691ec 100644 --- a/proto/market/tx.proto +++ b/proto/market/tx.proto @@ -3,7 +3,7 @@ package pendulumlabs.market.market; // this line is used by starport scaffolding # proto/tx/import -option go_package = "github.com/pendulum-labs/market/x/market/types"; +option go_package = "market/x/market/types"; // Msg defines the Msg service. service Msg { diff --git a/testutil/keeper/market.go b/testutil/keeper/market.go deleted file mode 100644 index 235bfc89..00000000 --- a/testutil/keeper/market.go +++ /dev/null @@ -1,52 +0,0 @@ -package keeper - -import ( - "testing" - - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/store" - storetypes "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - typesparams "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/log" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - tmdb "github.com/tendermint/tm-db" - "market/x/market/keeper" - "market/x/market/types" -) - -func MarketKeeper(t testing.TB) (*keeper.Keeper, sdk.Context) { - storeKey := sdk.NewKVStoreKey(types.StoreKey) - memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) - - db := tmdb.NewMemDB() - stateStore := store.NewCommitMultiStore(db) - stateStore.MountStoreWithDB(storeKey, sdk.StoreTypeIAVL, db) - stateStore.MountStoreWithDB(memStoreKey, sdk.StoreTypeMemory, nil) - require.NoError(t, stateStore.LoadLatestVersion()) - - registry := codectypes.NewInterfaceRegistry() - cdc := codec.NewProtoCodec(registry) - - paramsSubspace := typesparams.NewSubspace(cdc, - types.Amino, - storeKey, - memStoreKey, - "MarketParams", - ) - k := keeper.NewKeeper( - cdc, - storeKey, - memStoreKey, - paramsSubspace, - ) - - ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) - - // Initialize params - k.SetParams(ctx, types.DefaultParams()) - - return k, ctx -} diff --git a/testutil/keeper/test_common.go b/testutil/keeper/test_common.go new file mode 100644 index 00000000..dc3f8026 --- /dev/null +++ b/testutil/keeper/test_common.go @@ -0,0 +1,235 @@ +package keeper + +import ( + "fmt" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + ccodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/std" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/x/auth" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/bank" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/mint" + mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + "github.com/cosmos/cosmos-sdk/x/params" + paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "market/x/market/keeper" + markettypes "market/x/market/types" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + tmdb "github.com/tendermint/tm-db" +) + +var ( + // ModuleBasics is a mock module basic manager for testing + ModuleBasics = module.NewBasicManager( + auth.AppModuleBasic{}, + bank.AppModuleBasic{}, + mint.AppModuleBasic{}, + params.AppModuleBasic{}, + ) +) + +// TestInput stores the various keepers required to test the exchange +type TestInput struct { + AccountKeeper authkeeper.AccountKeeper + BankKeeper bankkeeper.BaseKeeper + MintKeeper mintkeeper.Keeper + Context sdk.Context + Marshaler codec.Codec + MarketKeeper *keeper.Keeper + LegacyAmino *codec.LegacyAmino +} + +// MakeTestLegacyCodec creates a legacy codec for use in testing +func MakeTestLegacyCodec() *codec.LegacyAmino { + var cdc = codec.NewLegacyAmino() + auth.AppModuleBasic{}.RegisterLegacyAminoCodec(cdc) + bank.AppModuleBasic{}.RegisterLegacyAminoCodec(cdc) + + sdk.RegisterLegacyAminoCodec(cdc) + ccodec.RegisterCrypto(cdc) + params.AppModuleBasic{}.RegisterLegacyAminoCodec(cdc) + markettypes.RegisterCodec(cdc) + return cdc +} + +// MakeTestCodec creates a proto codec for use in testing +func MakeTestCodec() codec.Codec { + interfaceRegistry := codectypes.NewInterfaceRegistry() + std.RegisterInterfaces(interfaceRegistry) + ModuleBasics.RegisterInterfaces(interfaceRegistry) + markettypes.RegisterInterfaces(interfaceRegistry) + return codec.NewProtoCodec(interfaceRegistry) +} + +func CreateTestEnvironment(t testing.TB) TestInput { + //poolKey := sdk.NewKVStoreKey(markettypes.PoolKeyPrefix) + storeKey := sdk.NewKVStoreKey(markettypes.StoreKey) + keyAuth := sdk.NewKVStoreKey(authtypes.StoreKey) + keyBank := sdk.NewKVStoreKey(banktypes.StoreKey) + keyStake := sdk.NewKVStoreKey(stakingtypes.StoreKey) + keyMint := sdk.NewKVStoreKey(minttypes.StoreKey) + keyParams := sdk.NewKVStoreKey(paramstypes.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(markettypes.MemStoreKey) + tkeyParams := sdk.NewTransientStoreKey(paramstypes.TStoreKey) + + db := tmdb.NewMemDB() + + stateStore := store.NewCommitMultiStore(db) + + //stateStore.MountStoreWithDB(poolKey, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(storeKey, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(keyAuth, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(keyBank, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(keyStake, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(keyMint, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db) + stateStore.MountStoreWithDB(memStoreKey, sdk.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + //ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) + ctx := sdk.NewContext(stateStore, tmproto.Header{ + Version: tmversion.Consensus{ + Block: 0, + App: 0, + }, + ChainID: "", + Height: 1234567, + Time: time.Date(2020, time.April, 22, 12, 0, 0, 0, time.UTC), + LastBlockId: tmproto.BlockID{ + Hash: []byte{}, + PartSetHeader: tmproto.PartSetHeader{ + Total: 0, + Hash: []byte{}, + }, + }, + LastCommitHash: []byte{}, + DataHash: []byte{}, + ValidatorsHash: []byte{}, + NextValidatorsHash: []byte{}, + ConsensusHash: []byte{}, + AppHash: []byte{}, + LastResultsHash: []byte{}, + EvidenceHash: []byte{}, + ProposerAddress: []byte{}, + }, false, log.TestingLogger()) + + cdc := MakeTestCodec() + legacyCodec := MakeTestLegacyCodec() + + paramsKeeper := paramskeeper.NewKeeper(cdc, legacyCodec, keyParams, tkeyParams) + paramsKeeper.Subspace(authtypes.ModuleName) + paramsKeeper.Subspace(banktypes.ModuleName) + paramsKeeper.Subspace(stakingtypes.ModuleName) + paramsKeeper.Subspace(minttypes.ModuleName) + paramsKeeper.Subspace(markettypes.ModuleName) + + paramsSubspace := paramstypes.NewSubspace(cdc, + markettypes.Amino, + storeKey, + memStoreKey, + "MarketParams", + ) + // this is also used to initialize module accounts for all the map keys + maccPerms := map[string][]string{ + markettypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + authtypes.FeeCollectorName: nil, + minttypes.ModuleName: {authtypes.Minter}, + stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking}, + stakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking}, + } + + accountKeeper := authkeeper.NewAccountKeeper( + cdc, + keyAuth, // target store + getSubspace(paramsKeeper, authtypes.ModuleName), + authtypes.ProtoBaseAccount, // prototype + maccPerms, + ) + + blockedAddr := make(map[string]bool, len(maccPerms)) + for acc := range maccPerms { + blockedAddr[authtypes.NewModuleAddress(acc).String()] = true + } + bankKeeper := bankkeeper.NewBaseKeeper( + cdc, + keyBank, + accountKeeper, + getSubspace(paramsKeeper, banktypes.ModuleName), + blockedAddr, + ) + bankKeeper.SetParams(ctx, banktypes.Params{ + SendEnabled: []*banktypes.SendEnabled{}, + DefaultSendEnabled: true, + }) + stakingKeeper := stakingkeeper.NewKeeper( + cdc, keyStake, accountKeeper, bankKeeper, getSubspace(paramsKeeper, stakingtypes.ModuleName), + ) + mintKeeper := mintkeeper.NewKeeper( + cdc, keyMint, getSubspace(paramsKeeper, minttypes.ModuleName), &stakingKeeper, + accountKeeper, bankKeeper, authtypes.FeeCollectorName, + ) + marketKeeper := keeper.NewKeeper( + cdc, + storeKey, + memStoreKey, + paramsSubspace, + bankKeeper, + ) + // Initialize params + //marketKeeper.setID + marketKeeper.SetParams(ctx, markettypes.DefaultParams()) + + return TestInput{ + AccountKeeper: accountKeeper, + BankKeeper: bankKeeper, + MintKeeper: mintKeeper, + Context: ctx, + Marshaler: cdc, + LegacyAmino: legacyCodec, + MarketKeeper: marketKeeper, + } +} + +// getSubspace returns a param subspace for a given module name. +func getSubspace(k paramskeeper.Keeper, moduleName string) paramstypes.Subspace { + subspace, _ := k.GetSubspace(moduleName) + return subspace +} + +// Returns an amount postfixed by `denom` that represents approximately +// the max amount in a single action that we want to support with the market module +func MaxSupportedCoin(denom string) string { + // 2^(128 - 16) - 1 + return fmt.Sprintf("5192296858534827628530496329220095%s", denom) +} + +// Equal to `MaxSupportedCoin` squared +func MaxSupportedDrop(denom string) string { + return fmt.Sprintf("26959946667150639794667015087019620289043427352885315420110951809025%s", denom) +} + +// This is for funding an account capable of `MaxSupportedCoin` +func FundMaxSupported(denom string) string { + return fmt.Sprintf("5192296858534827628530496329220095000000%s", denom) +} diff --git a/testutil/sample/sample.go b/testutil/sample/sample.go index 98f2153e..c2d85bd8 100644 --- a/testutil/sample/sample.go +++ b/testutil/sample/sample.go @@ -11,3 +11,26 @@ func AccAddress() string { addr := pk.Address() return sdk.AccAddress(addr).String() } + +// SampleCoins returns the required NewCoins +func SampleCoins(coina string, coinb string) (Coins sdk.Coins, err error) { + + coinA, err := sdk.ParseCoinNormalized(coina) + if err != nil { + return sdk.Coins{}, err + } + + coinB, err := sdk.ParseCoinNormalized(coinb) + if err != nil { + return sdk.Coins{}, err + } + + return sdk.NewCoins(coinA, coinB), nil +} + +// SampleDenoms returns the required denoms values +func SampleDenoms(coins sdk.Coins) (denomA string, denomB string) { + denom1 := coins.GetDenomByIndex(0) + denom2 := coins.GetDenomByIndex(1) + return denom1, denom2 +} diff --git a/vue/src/store/generated/index.ts b/vue/src/store/generated/index.ts index 9f316395..04904c31 100644 --- a/vue/src/store/generated/index.ts +++ b/vue/src/store/generated/index.ts @@ -22,6 +22,7 @@ import CosmosIbcGoIbcCoreConnectionV1 from './cosmos/ibc-go/ibc.core.connection. import CosmosIbcGoIbcCorePortV1 from './cosmos/ibc-go/ibc.core.port.v1' import MarketMarketMarket from './market/market.market' import MarketMarketPortal from './market/market.portal' +import MarketPendulumlabsMarketMarket from './market/pendulumlabs.market.market' export default { @@ -47,6 +48,7 @@ export default { CosmosIbcGoIbcCorePortV1: load(CosmosIbcGoIbcCorePortV1, 'ibc.core.port.v1'), MarketMarketMarket: load(MarketMarketMarket, 'market.market'), MarketMarketPortal: load(MarketMarketPortal, 'market.portal'), + MarketPendulumlabsMarketMarket: load(MarketPendulumlabsMarketMarket, 'pendulumlabs.market.market'), } diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/index.ts b/vue/src/store/generated/market/pendulumlabs.market.market/index.ts new file mode 100755 index 00000000..b29d6302 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/index.ts @@ -0,0 +1,1087 @@ +import { txClient, queryClient, MissingWalletError , registry} from './module' + +import { Burnings } from "./module/types/market/burnings" +import { Burned } from "./module/types/market/burnings" +import { Drop } from "./module/types/market/drop" +import { Drops } from "./module/types/market/drop" +import { DropPairs } from "./module/types/market/drop" +import { Member } from "./module/types/market/member" +import { Order } from "./module/types/market/order" +import { Orders } from "./module/types/market/order" +import { OrderResponse } from "./module/types/market/order" +import { Params } from "./module/types/market/params" +import { Pool } from "./module/types/market/pool" +import { Leader } from "./module/types/market/pool" +import { Volume } from "./module/types/market/pool" +import { QueryDropOwnerPairSumRequest } from "./module/types/market/query" +import { QueryDropOwnerPairSumResponse } from "./module/types/market/query" +import { QueryDropOwnerPairUidsRequest } from "./module/types/market/query" +import { QueryUidsResponse } from "./module/types/market/query" +import { QueryDropOwnerPairDetailRequest } from "./module/types/market/query" +import { QueryOrderOwnerPairRequest } from "./module/types/market/query" +import { QueryOrderOwnerPairResponse } from "./module/types/market/query" + + +export { Burnings, Burned, Drop, Drops, DropPairs, Member, Order, Orders, OrderResponse, Params, Pool, Leader, Volume, QueryDropOwnerPairSumRequest, QueryDropOwnerPairSumResponse, QueryDropOwnerPairUidsRequest, QueryUidsResponse, QueryDropOwnerPairDetailRequest, QueryOrderOwnerPairRequest, QueryOrderOwnerPairResponse }; + +async function initTxClient(vuexGetters) { + return await txClient(vuexGetters['common/wallet/signer'], { + addr: vuexGetters['common/env/apiTendermint'] + }) +} + +async function initQueryClient(vuexGetters) { + return await queryClient({ + addr: vuexGetters['common/env/apiCosmos'] + }) +} + +function mergeResults(value, next_values) { + for (let prop of Object.keys(next_values)) { + if (Array.isArray(next_values[prop])) { + value[prop]=[...value[prop], ...next_values[prop]] + }else{ + value[prop]=next_values[prop] + } + } + return value +} + +function getStructure(template) { + let structure = { fields: [] } + for (const [key, value] of Object.entries(template)) { + let field: any = {} + field.name = key + field.type = typeof value + structure.fields.push(field) + } + return structure +} + +const getDefaultState = () => { + return { + Params: {}, + Burned: {}, + Pool: {}, + PoolAll: {}, + Volume: {}, + VolumeAll: {}, + Drop: {}, + DropAmounts: {}, + DropCoin: {}, + DropsToCoins: {}, + DropPairs: {}, + DropOwnerPair: {}, + DropAll: {}, + Member: {}, + MemberAll: {}, + Burnings: {}, + BurningsAll: {}, + Order: {}, + OrderAll: {}, + OrderOwner: {}, + OrderOwnerUids: {}, + Book: {}, + Bookends: {}, + History: {}, + Quote: {}, + + _Structure: { + Burnings: getStructure(Burnings.fromPartial({})), + Burned: getStructure(Burned.fromPartial({})), + Drop: getStructure(Drop.fromPartial({})), + Drops: getStructure(Drops.fromPartial({})), + DropPairs: getStructure(DropPairs.fromPartial({})), + Member: getStructure(Member.fromPartial({})), + Order: getStructure(Order.fromPartial({})), + Orders: getStructure(Orders.fromPartial({})), + OrderResponse: getStructure(OrderResponse.fromPartial({})), + Params: getStructure(Params.fromPartial({})), + Pool: getStructure(Pool.fromPartial({})), + Leader: getStructure(Leader.fromPartial({})), + Volume: getStructure(Volume.fromPartial({})), + QueryDropOwnerPairSumRequest: getStructure(QueryDropOwnerPairSumRequest.fromPartial({})), + QueryDropOwnerPairSumResponse: getStructure(QueryDropOwnerPairSumResponse.fromPartial({})), + QueryDropOwnerPairUidsRequest: getStructure(QueryDropOwnerPairUidsRequest.fromPartial({})), + QueryUidsResponse: getStructure(QueryUidsResponse.fromPartial({})), + QueryDropOwnerPairDetailRequest: getStructure(QueryDropOwnerPairDetailRequest.fromPartial({})), + QueryOrderOwnerPairRequest: getStructure(QueryOrderOwnerPairRequest.fromPartial({})), + QueryOrderOwnerPairResponse: getStructure(QueryOrderOwnerPairResponse.fromPartial({})), + + }, + _Registry: registry, + _Subscriptions: new Set(), + } +} + +// initial state +const state = getDefaultState() + +export default { + namespaced: true, + state, + mutations: { + RESET_STATE(state) { + Object.assign(state, getDefaultState()) + }, + QUERY(state, { query, key, value }) { + state[query][JSON.stringify(key)] = value + }, + SUBSCRIBE(state, subscription) { + state._Subscriptions.add(JSON.stringify(subscription)) + }, + UNSUBSCRIBE(state, subscription) { + state._Subscriptions.delete(JSON.stringify(subscription)) + } + }, + getters: { + getParams: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Params[JSON.stringify(params)] ?? {} + }, + getBurned: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Burned[JSON.stringify(params)] ?? {} + }, + getPool: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Pool[JSON.stringify(params)] ?? {} + }, + getPoolAll: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.PoolAll[JSON.stringify(params)] ?? {} + }, + getVolume: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Volume[JSON.stringify(params)] ?? {} + }, + getVolumeAll: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.VolumeAll[JSON.stringify(params)] ?? {} + }, + getDrop: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Drop[JSON.stringify(params)] ?? {} + }, + getDropAmounts: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.DropAmounts[JSON.stringify(params)] ?? {} + }, + getDropCoin: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.DropCoin[JSON.stringify(params)] ?? {} + }, + getDropsToCoins: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.DropsToCoins[JSON.stringify(params)] ?? {} + }, + getDropPairs: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.DropPairs[JSON.stringify(params)] ?? {} + }, + getDropOwnerPair: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.DropOwnerPair[JSON.stringify(params)] ?? {} + }, + getDropAll: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.DropAll[JSON.stringify(params)] ?? {} + }, + getMember: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Member[JSON.stringify(params)] ?? {} + }, + getMemberAll: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.MemberAll[JSON.stringify(params)] ?? {} + }, + getBurnings: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Burnings[JSON.stringify(params)] ?? {} + }, + getBurningsAll: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.BurningsAll[JSON.stringify(params)] ?? {} + }, + getOrder: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Order[JSON.stringify(params)] ?? {} + }, + getOrderAll: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.OrderAll[JSON.stringify(params)] ?? {} + }, + getOrderOwner: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.OrderOwner[JSON.stringify(params)] ?? {} + }, + getOrderOwnerUids: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.OrderOwnerUids[JSON.stringify(params)] ?? {} + }, + getBook: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Book[JSON.stringify(params)] ?? {} + }, + getBookends: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Bookends[JSON.stringify(params)] ?? {} + }, + getHistory: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.History[JSON.stringify(params)] ?? {} + }, + getQuote: (state) => (params = { params: {}}) => { + if (!( params).query) { + ( params).query=null + } + return state.Quote[JSON.stringify(params)] ?? {} + }, + + getTypeStructure: (state) => (type) => { + return state._Structure[type].fields + }, + getRegistry: (state) => { + return state._Registry + } + }, + actions: { + init({ dispatch, rootGetters }) { + console.log('Vuex module: pendulumlabs.market.market initialized!') + if (rootGetters['common/env/client']) { + rootGetters['common/env/client'].on('newblock', () => { + dispatch('StoreUpdate') + }) + } + }, + resetState({ commit }) { + commit('RESET_STATE') + }, + unsubscribe({ commit }, subscription) { + commit('UNSUBSCRIBE', subscription) + }, + async StoreUpdate({ state, dispatch }) { + state._Subscriptions.forEach(async (subscription) => { + try { + const sub=JSON.parse(subscription) + await dispatch(sub.action, sub.payload) + }catch(e) { + throw new Error('Subscriptions: ' + e.message) + } + }) + }, + + + + + + + async QueryParams({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryParams()).data + + + commit('QUERY', { query: 'Params', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryParams', payload: { options: { all }, params: {...key},query }}) + return getters['getParams']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryParams API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryBurned({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryBurned()).data + + + commit('QUERY', { query: 'Burned', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryBurned', payload: { options: { all }, params: {...key},query }}) + return getters['getBurned']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryBurned API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryPool({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryPool( key.pair)).data + + + commit('QUERY', { query: 'Pool', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryPool', payload: { options: { all }, params: {...key},query }}) + return getters['getPool']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryPool API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryPoolAll({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryPoolAll(query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryPoolAll({...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'PoolAll', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryPoolAll', payload: { options: { all }, params: {...key},query }}) + return getters['getPoolAll']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryPoolAll API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryVolume({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryVolume( key.denom)).data + + + commit('QUERY', { query: 'Volume', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryVolume', payload: { options: { all }, params: {...key},query }}) + return getters['getVolume']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryVolume API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryVolumeAll({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryVolumeAll(query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryVolumeAll({...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'VolumeAll', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryVolumeAll', payload: { options: { all }, params: {...key},query }}) + return getters['getVolumeAll']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryVolumeAll API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDrop({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDrop( key.uid)).data + + + commit('QUERY', { query: 'Drop', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDrop', payload: { options: { all }, params: {...key},query }}) + return getters['getDrop']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDrop API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDropAmounts({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDropAmounts( key.uid)).data + + + commit('QUERY', { query: 'DropAmounts', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDropAmounts', payload: { options: { all }, params: {...key},query }}) + return getters['getDropAmounts']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDropAmounts API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDropCoin({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDropCoin( key.denomA, key.denomB, key.amountA)).data + + + commit('QUERY', { query: 'DropCoin', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDropCoin', payload: { options: { all }, params: {...key},query }}) + return getters['getDropCoin']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDropCoin API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDropsToCoins({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDropsToCoins( key.pair, key.drops)).data + + + commit('QUERY', { query: 'DropsToCoins', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDropsToCoins', payload: { options: { all }, params: {...key},query }}) + return getters['getDropsToCoins']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDropsToCoins API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDropPairs({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDropPairs( key.address)).data + + + commit('QUERY', { query: 'DropPairs', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDropPairs', payload: { options: { all }, params: {...key},query }}) + return getters['getDropPairs']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDropPairs API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDropOwnerPair({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDropOwnerPair( key.address, key.pair, query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryDropOwnerPair( key.address, key.pair, {...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'DropOwnerPair', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDropOwnerPair', payload: { options: { all }, params: {...key},query }}) + return getters['getDropOwnerPair']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDropOwnerPair API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryDropAll({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryDropAll(query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryDropAll({...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'DropAll', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryDropAll', payload: { options: { all }, params: {...key},query }}) + return getters['getDropAll']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryDropAll API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryMember({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryMember( key.denomA, key.denomB)).data + + + commit('QUERY', { query: 'Member', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryMember', payload: { options: { all }, params: {...key},query }}) + return getters['getMember']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryMember API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryMemberAll({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryMemberAll(query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryMemberAll({...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'MemberAll', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryMemberAll', payload: { options: { all }, params: {...key},query }}) + return getters['getMemberAll']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryMemberAll API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryBurnings({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryBurnings( key.denom)).data + + + commit('QUERY', { query: 'Burnings', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryBurnings', payload: { options: { all }, params: {...key},query }}) + return getters['getBurnings']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryBurnings API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryBurningsAll({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryBurningsAll(query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryBurningsAll({...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'BurningsAll', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryBurningsAll', payload: { options: { all }, params: {...key},query }}) + return getters['getBurningsAll']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryBurningsAll API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryOrder({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryOrder( key.uid)).data + + + commit('QUERY', { query: 'Order', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryOrder', payload: { options: { all }, params: {...key},query }}) + return getters['getOrder']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryOrder API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryOrderAll({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryOrderAll(query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryOrderAll({...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'OrderAll', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryOrderAll', payload: { options: { all }, params: {...key},query }}) + return getters['getOrderAll']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryOrderAll API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryOrderOwner({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryOrderOwner( key.address, query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryOrderOwner( key.address, {...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'OrderOwner', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryOrderOwner', payload: { options: { all }, params: {...key},query }}) + return getters['getOrderOwner']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryOrderOwner API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryOrderOwnerUids({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryOrderOwnerUids( key.address, query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryOrderOwnerUids( key.address, {...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'OrderOwnerUids', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryOrderOwnerUids', payload: { options: { all }, params: {...key},query }}) + return getters['getOrderOwnerUids']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryOrderOwnerUids API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryBook({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryBook( key.denomA, key.denomB, key.orderType, query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryBook( key.denomA, key.denomB, key.orderType, {...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'Book', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryBook', payload: { options: { all }, params: {...key},query }}) + return getters['getBook']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryBook API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryBookends({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryBookends( key.coinA, key.coinB, key.orderType, key.rate)).data + + + commit('QUERY', { query: 'Bookends', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryBookends', payload: { options: { all }, params: {...key},query }}) + return getters['getBookends']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryBookends API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryHistory({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryHistory( key.pair, query)).data + + + while (all && ( value).pagination && ( value).pagination.next_key!=null) { + let next_values=(await queryClient.queryHistory( key.pair, {...query, 'pagination.key':( value).pagination.next_key})).data + value = mergeResults(value, next_values); + } + commit('QUERY', { query: 'History', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryHistory', payload: { options: { all }, params: {...key},query }}) + return getters['getHistory']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryHistory API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + + + + + + async QueryQuote({ commit, rootGetters, getters }, { options: { subscribe, all} = { subscribe:false, all:false}, params, query=null }) { + try { + const key = params ?? {}; + const queryClient=await initQueryClient(rootGetters) + let value= (await queryClient.queryQuote( key.denomBid, key.denomAsk, key.denomAmount, key.amount)).data + + + commit('QUERY', { query: 'Quote', key: { params: {...key}, query}, value }) + if (subscribe) commit('SUBSCRIBE', { action: 'QueryQuote', payload: { options: { all }, params: {...key},query }}) + return getters['getQuote']( { params: {...key}, query}) ?? {} + } catch (e) { + throw new Error('QueryClient:QueryQuote API Node Unavailable. Could not perform query: ' + e.message) + + } + }, + + + async sendMsgCreatePool({ rootGetters }, { value, fee = [], memo = '' }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCreatePool(value) + const result = await txClient.signAndBroadcast([msg], {fee: { amount: fee, + gas: "200000" }, memo}) + return result + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCreatePool:Init Could not initialize signing client. Wallet is required.') + }else{ + throw new Error('TxClient:MsgCreatePool:Send Could not broadcast Tx: '+ e.message) + } + } + }, + async sendMsgMarketOrder({ rootGetters }, { value, fee = [], memo = '' }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgMarketOrder(value) + const result = await txClient.signAndBroadcast([msg], {fee: { amount: fee, + gas: "200000" }, memo}) + return result + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgMarketOrder:Init Could not initialize signing client. Wallet is required.') + }else{ + throw new Error('TxClient:MsgMarketOrder:Send Could not broadcast Tx: '+ e.message) + } + } + }, + async sendMsgCreateOrder({ rootGetters }, { value, fee = [], memo = '' }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCreateOrder(value) + const result = await txClient.signAndBroadcast([msg], {fee: { amount: fee, + gas: "200000" }, memo}) + return result + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCreateOrder:Init Could not initialize signing client. Wallet is required.') + }else{ + throw new Error('TxClient:MsgCreateOrder:Send Could not broadcast Tx: '+ e.message) + } + } + }, + async sendMsgCreateDrop({ rootGetters }, { value, fee = [], memo = '' }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCreateDrop(value) + const result = await txClient.signAndBroadcast([msg], {fee: { amount: fee, + gas: "200000" }, memo}) + return result + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCreateDrop:Init Could not initialize signing client. Wallet is required.') + }else{ + throw new Error('TxClient:MsgCreateDrop:Send Could not broadcast Tx: '+ e.message) + } + } + }, + async sendMsgRedeemDrop({ rootGetters }, { value, fee = [], memo = '' }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgRedeemDrop(value) + const result = await txClient.signAndBroadcast([msg], {fee: { amount: fee, + gas: "200000" }, memo}) + return result + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgRedeemDrop:Init Could not initialize signing client. Wallet is required.') + }else{ + throw new Error('TxClient:MsgRedeemDrop:Send Could not broadcast Tx: '+ e.message) + } + } + }, + async sendMsgCancelOrder({ rootGetters }, { value, fee = [], memo = '' }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCancelOrder(value) + const result = await txClient.signAndBroadcast([msg], {fee: { amount: fee, + gas: "200000" }, memo}) + return result + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCancelOrder:Init Could not initialize signing client. Wallet is required.') + }else{ + throw new Error('TxClient:MsgCancelOrder:Send Could not broadcast Tx: '+ e.message) + } + } + }, + + async MsgCreatePool({ rootGetters }, { value }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCreatePool(value) + return msg + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCreatePool:Init Could not initialize signing client. Wallet is required.') + } else{ + throw new Error('TxClient:MsgCreatePool:Create Could not create message: ' + e.message) + } + } + }, + async MsgMarketOrder({ rootGetters }, { value }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgMarketOrder(value) + return msg + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgMarketOrder:Init Could not initialize signing client. Wallet is required.') + } else{ + throw new Error('TxClient:MsgMarketOrder:Create Could not create message: ' + e.message) + } + } + }, + async MsgCreateOrder({ rootGetters }, { value }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCreateOrder(value) + return msg + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCreateOrder:Init Could not initialize signing client. Wallet is required.') + } else{ + throw new Error('TxClient:MsgCreateOrder:Create Could not create message: ' + e.message) + } + } + }, + async MsgCreateDrop({ rootGetters }, { value }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCreateDrop(value) + return msg + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCreateDrop:Init Could not initialize signing client. Wallet is required.') + } else{ + throw new Error('TxClient:MsgCreateDrop:Create Could not create message: ' + e.message) + } + } + }, + async MsgRedeemDrop({ rootGetters }, { value }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgRedeemDrop(value) + return msg + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgRedeemDrop:Init Could not initialize signing client. Wallet is required.') + } else{ + throw new Error('TxClient:MsgRedeemDrop:Create Could not create message: ' + e.message) + } + } + }, + async MsgCancelOrder({ rootGetters }, { value }) { + try { + const txClient=await initTxClient(rootGetters) + const msg = await txClient.msgCancelOrder(value) + return msg + } catch (e) { + if (e == MissingWalletError) { + throw new Error('TxClient:MsgCancelOrder:Init Could not initialize signing client. Wallet is required.') + } else{ + throw new Error('TxClient:MsgCancelOrder:Create Could not create message: ' + e.message) + } + } + }, + + } +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/index.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/index.ts new file mode 100755 index 00000000..8d9ba8e9 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/index.ts @@ -0,0 +1,75 @@ +// THIS FILE IS GENERATED AUTOMATICALLY. DO NOT MODIFY. + +import { StdFee } from "@cosmjs/launchpad"; +import { SigningStargateClient } from "@cosmjs/stargate"; +import { Registry, OfflineSigner, EncodeObject, DirectSecp256k1HdWallet } from "@cosmjs/proto-signing"; +import { Api } from "./rest"; +import { MsgCreatePool } from "./types/market/tx"; +import { MsgMarketOrder } from "./types/market/tx"; +import { MsgCreateOrder } from "./types/market/tx"; +import { MsgCreateDrop } from "./types/market/tx"; +import { MsgRedeemDrop } from "./types/market/tx"; +import { MsgCancelOrder } from "./types/market/tx"; + + +const types = [ + ["/pendulumlabs.market.market.MsgCreatePool", MsgCreatePool], + ["/pendulumlabs.market.market.MsgMarketOrder", MsgMarketOrder], + ["/pendulumlabs.market.market.MsgCreateOrder", MsgCreateOrder], + ["/pendulumlabs.market.market.MsgCreateDrop", MsgCreateDrop], + ["/pendulumlabs.market.market.MsgRedeemDrop", MsgRedeemDrop], + ["/pendulumlabs.market.market.MsgCancelOrder", MsgCancelOrder], + +]; +export const MissingWalletError = new Error("wallet is required"); + +export const registry = new Registry(types); + +const defaultFee = { + amount: [], + gas: "200000", +}; + +interface TxClientOptions { + addr: string +} + +interface SignAndBroadcastOptions { + fee: StdFee, + memo?: string +} + +const txClient = async (wallet: OfflineSigner, { addr: addr }: TxClientOptions = { addr: "http://localhost:26657" }) => { + if (!wallet) throw MissingWalletError; + let client; + if (addr) { + client = await SigningStargateClient.connectWithSigner(addr, wallet, { registry }); + }else{ + client = await SigningStargateClient.offline( wallet, { registry }); + } + const { address } = (await wallet.getAccounts())[0]; + + return { + signAndBroadcast: (msgs: EncodeObject[], { fee, memo }: SignAndBroadcastOptions = {fee: defaultFee, memo: ""}) => client.signAndBroadcast(address, msgs, fee,memo), + msgCreatePool: (data: MsgCreatePool): EncodeObject => ({ typeUrl: "/pendulumlabs.market.market.MsgCreatePool", value: MsgCreatePool.fromPartial( data ) }), + msgMarketOrder: (data: MsgMarketOrder): EncodeObject => ({ typeUrl: "/pendulumlabs.market.market.MsgMarketOrder", value: MsgMarketOrder.fromPartial( data ) }), + msgCreateOrder: (data: MsgCreateOrder): EncodeObject => ({ typeUrl: "/pendulumlabs.market.market.MsgCreateOrder", value: MsgCreateOrder.fromPartial( data ) }), + msgCreateDrop: (data: MsgCreateDrop): EncodeObject => ({ typeUrl: "/pendulumlabs.market.market.MsgCreateDrop", value: MsgCreateDrop.fromPartial( data ) }), + msgRedeemDrop: (data: MsgRedeemDrop): EncodeObject => ({ typeUrl: "/pendulumlabs.market.market.MsgRedeemDrop", value: MsgRedeemDrop.fromPartial( data ) }), + msgCancelOrder: (data: MsgCancelOrder): EncodeObject => ({ typeUrl: "/pendulumlabs.market.market.MsgCancelOrder", value: MsgCancelOrder.fromPartial( data ) }), + + }; +}; + +interface QueryClientOptions { + addr: string +} + +const queryClient = async ({ addr: addr }: QueryClientOptions = { addr: "http://localhost:1317" }) => { + return new Api({ baseUrl: addr }); +}; + +export { + txClient, + queryClient, +}; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/rest.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/rest.ts new file mode 100644 index 00000000..8b46f29a --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/rest.ts @@ -0,0 +1,1139 @@ +/* eslint-disable */ +/* tslint:disable */ +/* + * --------------------------------------------------------------- + * ## THIS FILE WAS GENERATED VIA SWAGGER-TYPESCRIPT-API ## + * ## ## + * ## AUTHOR: acacode ## + * ## SOURCE: https://github.com/acacode/swagger-typescript-api ## + * --------------------------------------------------------------- + */ + +export interface MarketBurnings { + denom?: string; + amount?: string; +} + +export interface MarketDrop { + /** @format uint64 */ + uid?: string; + owner?: string; + pair?: string; + drops?: string; + product?: string; + active?: boolean; +} + +export interface MarketLeader { + address?: string; + drops?: string; +} + +export interface MarketMember { + pair?: string; + denomA?: string; + denomB?: string; + balance?: string; + previous?: string; + + /** @format uint64 */ + limit?: string; + + /** @format uint64 */ + stop?: string; +} + +export type MarketMsgCancelOrderResponse = object; + +export type MarketMsgCreateDropResponse = object; + +export interface MarketMsgCreateOrderResponse { + /** @format uint64 */ + uid?: string; +} + +export type MarketMsgCreatePoolResponse = object; + +export interface MarketMsgMarketOrderResponse { + amountBid?: string; + amountAsk?: string; + slippage?: string; +} + +export type MarketMsgRedeemDropResponse = object; + +export interface MarketOrder { + /** @format uint64 */ + uid?: string; + owner?: string; + status?: string; + orderType?: string; + denomAsk?: string; + denomBid?: string; + amount?: string; + rate?: string[]; + + /** @format uint64 */ + prev?: string; + + /** @format uint64 */ + next?: string; + + /** @format int64 */ + beg_time?: string; + + /** @format int64 */ + upd_time?: string; +} + +export interface MarketOrderResponse { + /** @format uint64 */ + uid?: string; + owner?: string; + status?: string; + orderType?: string; + denomAsk?: string; + denomBid?: string; + amount?: string; + rate?: string[]; + + /** @format uint64 */ + prev?: string; + + /** @format uint64 */ + next?: string; + + /** @format int64 */ + beg_time?: string; + + /** @format int64 */ + upd_time?: string; +} + +export interface MarketOrders { + uids?: string[]; +} + +/** + * Params defines the parameters for the module. + */ +export interface MarketParams { + earn_rates?: string; + burn_rate?: string; + burn_coin?: string; + market_fee?: string; +} + +export interface MarketPool { + pair?: string; + denom1?: string; + denom2?: string; + volume1?: MarketVolume; + volume2?: MarketVolume; + leaders?: MarketLeader[]; + drops?: string; + + /** @format uint64 */ + history?: string; +} + +export interface MarketQueryAllBurningsResponse { + burnings?: MarketBurnings[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryAllMemberResponse { + member?: MarketMember[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryAllPoolResponse { + pool?: MarketPool[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryAllVolumeResponse { + volumes?: MarketVolume[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryBookResponse { + book?: MarketOrderResponse[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryBookendsResponse { + coinA?: string; + coinB?: string; + orderType?: string; + rate?: string[]; + + /** @format uint64 */ + prev?: string; + + /** @format uint64 */ + next?: string; +} + +export interface MarketQueryBurnedResponse { + denom?: string; + amount?: string; +} + +export interface MarketQueryDropAmountsResponse { + denom1?: string; + denom2?: string; + amount1?: string; + amount2?: string; +} + +export interface MarketQueryDropCoinResponse { + drops?: string; + amountB?: string; +} + +export interface MarketQueryDropPairsResponse { + pairs?: string[]; +} + +export interface MarketQueryDropResponse { + drop?: MarketDrop; +} + +export interface MarketQueryDropsResponse { + drops?: MarketDrop[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryGetBurningsResponse { + burnings?: MarketBurnings; +} + +export interface MarketQueryGetMemberResponse { + member?: MarketMember; +} + +export interface MarketQueryGetPoolResponse { + pool?: MarketPool; +} + +export interface MarketQueryHistoryResponse { + history?: MarketOrderResponse[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryOrderOwnerUidsResponse { + orders?: MarketOrders; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +export interface MarketQueryOrderResponse { + order?: MarketOrder; +} + +export interface MarketQueryOrdersResponse { + orders?: MarketOrder[]; + + /** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ + pagination?: V1Beta1PageResponse; +} + +/** + * QueryParamsResponse is response type for the Query/Params RPC method. + */ +export interface MarketQueryParamsResponse { + /** params holds all the parameters of this module. */ + params?: MarketParams; +} + +export interface MarketQueryQuoteResponse { + denom?: string; + amount?: string; +} + +export interface MarketQueryVolumeResponse { + amount?: string; +} + +export interface MarketVolume { + denom?: string; + amount?: string; +} + +export interface ProtobufAny { + "@type"?: string; +} + +export interface RpcStatus { + /** @format int32 */ + code?: number; + message?: string; + details?: ProtobufAny[]; +} + +/** +* message SomeRequest { + Foo some_parameter = 1; + PageRequest pagination = 2; + } +*/ +export interface V1Beta1PageRequest { + /** + * key is a value returned in PageResponse.next_key to begin + * querying the next page most efficiently. Only one of offset or key + * should be set. + * @format byte + */ + key?: string; + + /** + * offset is a numeric offset that can be used when key is unavailable. + * It is less efficient than using key. Only one of offset or key should + * be set. + * @format uint64 + */ + offset?: string; + + /** + * limit is the total number of results to be returned in the result page. + * If left empty it will default to a value to be set by each app. + * @format uint64 + */ + limit?: string; + + /** + * count_total is set to true to indicate that the result set should include + * a count of the total number of items available for pagination in UIs. + * count_total is only respected when offset is used. It is ignored when key + * is set. + */ + count_total?: boolean; + + /** + * reverse is set to true if results are to be returned in the descending order. + * + * Since: cosmos-sdk 0.43 + */ + reverse?: boolean; +} + +/** +* PageResponse is to be embedded in gRPC response messages where the +corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } +*/ +export interface V1Beta1PageResponse { + /** @format byte */ + next_key?: string; + + /** @format uint64 */ + total?: string; +} + +export type QueryParamsType = Record; +export type ResponseFormat = keyof Omit; + +export interface FullRequestParams extends Omit { + /** set parameter to `true` for call `securityWorker` for this request */ + secure?: boolean; + /** request path */ + path: string; + /** content type of request body */ + type?: ContentType; + /** query params */ + query?: QueryParamsType; + /** format of response (i.e. response.json() -> format: "json") */ + format?: keyof Omit; + /** request body */ + body?: unknown; + /** base url */ + baseUrl?: string; + /** request cancellation token */ + cancelToken?: CancelToken; +} + +export type RequestParams = Omit; + +export interface ApiConfig { + baseUrl?: string; + baseApiParams?: Omit; + securityWorker?: (securityData: SecurityDataType) => RequestParams | void; +} + +export interface HttpResponse extends Response { + data: D; + error: E; +} + +type CancelToken = Symbol | string | number; + +export enum ContentType { + Json = "application/json", + FormData = "multipart/form-data", + UrlEncoded = "application/x-www-form-urlencoded", +} + +export class HttpClient { + public baseUrl: string = ""; + private securityData: SecurityDataType = null as any; + private securityWorker: null | ApiConfig["securityWorker"] = null; + private abortControllers = new Map(); + + private baseApiParams: RequestParams = { + credentials: "same-origin", + headers: {}, + redirect: "follow", + referrerPolicy: "no-referrer", + }; + + constructor(apiConfig: ApiConfig = {}) { + Object.assign(this, apiConfig); + } + + public setSecurityData = (data: SecurityDataType) => { + this.securityData = data; + }; + + private addQueryParam(query: QueryParamsType, key: string) { + const value = query[key]; + + return ( + encodeURIComponent(key) + + "=" + + encodeURIComponent(Array.isArray(value) ? value.join(",") : typeof value === "number" ? value : `${value}`) + ); + } + + protected toQueryString(rawQuery?: QueryParamsType): string { + const query = rawQuery || {}; + const keys = Object.keys(query).filter((key) => "undefined" !== typeof query[key]); + return keys + .map((key) => + typeof query[key] === "object" && !Array.isArray(query[key]) + ? this.toQueryString(query[key] as QueryParamsType) + : this.addQueryParam(query, key), + ) + .join("&"); + } + + protected addQueryParams(rawQuery?: QueryParamsType): string { + const queryString = this.toQueryString(rawQuery); + return queryString ? `?${queryString}` : ""; + } + + private contentFormatters: Record any> = { + [ContentType.Json]: (input: any) => + input !== null && (typeof input === "object" || typeof input === "string") ? JSON.stringify(input) : input, + [ContentType.FormData]: (input: any) => + Object.keys(input || {}).reduce((data, key) => { + data.append(key, input[key]); + return data; + }, new FormData()), + [ContentType.UrlEncoded]: (input: any) => this.toQueryString(input), + }; + + private mergeRequestParams(params1: RequestParams, params2?: RequestParams): RequestParams { + return { + ...this.baseApiParams, + ...params1, + ...(params2 || {}), + headers: { + ...(this.baseApiParams.headers || {}), + ...(params1.headers || {}), + ...((params2 && params2.headers) || {}), + }, + }; + } + + private createAbortSignal = (cancelToken: CancelToken): AbortSignal | undefined => { + if (this.abortControllers.has(cancelToken)) { + const abortController = this.abortControllers.get(cancelToken); + if (abortController) { + return abortController.signal; + } + return void 0; + } + + const abortController = new AbortController(); + this.abortControllers.set(cancelToken, abortController); + return abortController.signal; + }; + + public abortRequest = (cancelToken: CancelToken) => { + const abortController = this.abortControllers.get(cancelToken); + + if (abortController) { + abortController.abort(); + this.abortControllers.delete(cancelToken); + } + }; + + public request = ({ + body, + secure, + path, + type, + query, + format = "json", + baseUrl, + cancelToken, + ...params + }: FullRequestParams): Promise> => { + const secureParams = (secure && this.securityWorker && this.securityWorker(this.securityData)) || {}; + const requestParams = this.mergeRequestParams(params, secureParams); + const queryString = query && this.toQueryString(query); + const payloadFormatter = this.contentFormatters[type || ContentType.Json]; + + return fetch(`${baseUrl || this.baseUrl || ""}${path}${queryString ? `?${queryString}` : ""}`, { + ...requestParams, + headers: { + ...(type && type !== ContentType.FormData ? { "Content-Type": type } : {}), + ...(requestParams.headers || {}), + }, + signal: cancelToken ? this.createAbortSignal(cancelToken) : void 0, + body: typeof body === "undefined" || body === null ? null : payloadFormatter(body), + }).then(async (response) => { + const r = response as HttpResponse; + r.data = (null as unknown) as T; + r.error = (null as unknown) as E; + + const data = await response[format]() + .then((data) => { + if (r.ok) { + r.data = data; + } else { + r.error = data; + } + return r; + }) + .catch((e) => { + r.error = e; + return r; + }); + + if (cancelToken) { + this.abortControllers.delete(cancelToken); + } + + if (!response.ok) throw data; + return data; + }); + }; +} + +/** + * @title market/burnings.proto + * @version version not set + */ +export class Api extends HttpClient { + /** + * No description + * + * @tags Query + * @name QueryBook + * @summary Queries a list of Book items. + * @request GET:/pendulum-labs/market/market/book/{denomA}/{denomB}/{orderType} + */ + queryBook = ( + denomA: string, + denomB: string, + orderType: string, + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/book/${denomA}/${denomB}/${orderType}`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryBookends + * @summary Queries a list of Bookends items. + * @request GET:/pendulum-labs/market/market/bookends/{coinA}/{coinB}/{orderType}/{rate} + */ + queryBookends = (coinA: string, coinB: string, orderType: string, rate: string[], params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/bookends/${coinA}/${coinB}/${orderType}/${rate}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryBurned + * @summary Queries total burned. + * @request GET:/pendulum-labs/market/market/burned + */ + queryBurned = (params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/burned`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryBurningsAll + * @summary Queries a list of Burnings items. + * @request GET:/pendulum-labs/market/market/burnings + */ + queryBurningsAll = ( + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/burnings`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryBurnings + * @summary Queries a Burnings by index. + * @request GET:/pendulum-labs/market/market/burnings/{denom} + */ + queryBurnings = (denom: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/burnings/${denom}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDropAll + * @summary Queries a list of Drop items. + * @request GET:/pendulum-labs/market/market/drop + */ + queryDropAll = ( + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/drop`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDropAmounts + * @summary Queries a Drop by index. + * @request GET:/pendulum-labs/market/market/drop/amounts/{uid} + */ + queryDropAmounts = (uid: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/drop/amounts/${uid}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDropCoin + * @summary Queries a Drop by index. + * @request GET:/pendulum-labs/market/market/drop/coin/{denomA}/{denomB}/{amountA} + */ + queryDropCoin = (denomA: string, denomB: string, amountA: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/drop/coin/${denomA}/${denomB}/${amountA}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDropsToCoins + * @summary Converts drops to coin amounts + * @request GET:/pendulum-labs/market/market/drop/coins/{pair}/{drops} + */ + queryDropsToCoins = (pair: string, drops: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/drop/coins/${pair}/${drops}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDropPairs + * @summary Queries a Drop by index. + * @request GET:/pendulum-labs/market/market/drop/pairs/{address} + */ + queryDropPairs = (address: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/drop/pairs/${address}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDropOwnerPair + * @summary Queries a Drop by index. + * @request GET:/pendulum-labs/market/market/drop/{address}/{pair} + */ + queryDropOwnerPair = ( + address: string, + pair: string, + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/drop/${address}/${pair}`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryDrop + * @summary Queries a Drop by index. + * @request GET:/pendulum-labs/market/market/drop/{uid} + */ + queryDrop = (uid: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/drop/${uid}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryHistory + * @summary Queries pool trade history. + * @request GET:/pendulum-labs/market/market/history/{pair} + */ + queryHistory = ( + pair: string, + query?: { + length?: string; + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/history/${pair}`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryMemberAll + * @summary Queries a list of Member items. + * @request GET:/pendulum-labs/market/market/member + */ + queryMemberAll = ( + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/member`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryMember + * @summary Queries a Member by index. + * @request GET:/pendulum-labs/market/market/member/{denomA}/{denomB} + */ + queryMember = (denomA: string, denomB: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/member/${denomA}/${denomB}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryOrderAll + * @summary Queries a list of Order items. + * @request GET:/pendulum-labs/market/market/order + */ + queryOrderAll = ( + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/order`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryOrderOwnerUids + * @summary Queries a list of Order items. + * @request GET:/pendulum-labs/market/market/order/uids/{address} + */ + queryOrderOwnerUids = ( + address: string, + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/order/uids/${address}`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryOrderOwner + * @summary Queries a list of Order items. + * @request GET:/pendulum-labs/market/market/order/{address} + */ + queryOrderOwner = ( + address: string, + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/order/${address}`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryOrder + * @summary Queries a Order by index. + * @request GET:/pendulum-labs/market/market/order/{uid} + */ + queryOrder = (uid: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/order/${uid}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryParams + * @summary Parameters queries the parameters of the module. + * @request GET:/pendulum-labs/market/market/params + */ + queryParams = (params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/params`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryPoolAll + * @summary Queries a list of Pool items. + * @request GET:/pendulum-labs/market/market/pool + */ + queryPoolAll = ( + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/pool`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryPool + * @summary Queries a Pool by index. + * @request GET:/pendulum-labs/market/market/pool/{pair} + */ + queryPool = (pair: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/pool/${pair}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryQuote + * @summary Queries pool trade history. + * @request GET:/pendulum-labs/market/market/quote/{denomBid}/{denomAsk}/{denomAmount}/{amount} + */ + queryQuote = (denomBid: string, denomAsk: string, denomAmount: string, amount: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/quote/${denomBid}/${denomAsk}/${denomAmount}/${amount}`, + method: "GET", + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryVolumeAll + * @summary Queries all Volumes. + * @request GET:/pendulum-labs/market/market/volume + */ + queryVolumeAll = ( + query?: { + "pagination.key"?: string; + "pagination.offset"?: string; + "pagination.limit"?: string; + "pagination.count_total"?: boolean; + "pagination.reverse"?: boolean; + }, + params: RequestParams = {}, + ) => + this.request({ + path: `/pendulum-labs/market/market/volume`, + method: "GET", + query: query, + format: "json", + ...params, + }); + + /** + * No description + * + * @tags Query + * @name QueryVolume + * @summary Queries a Volume by index. + * @request GET:/pendulum-labs/market/market/volume/{denom} + */ + queryVolume = (denom: string, params: RequestParams = {}) => + this.request({ + path: `/pendulum-labs/market/market/volume/${denom}`, + method: "GET", + format: "json", + ...params, + }); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/cosmos/base/query/v1beta1/pagination.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/cosmos/base/query/v1beta1/pagination.ts new file mode 100644 index 00000000..9c87ac0c --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/cosmos/base/query/v1beta1/pagination.ts @@ -0,0 +1,328 @@ +/* eslint-disable */ +import * as Long from "long"; +import { util, configure, Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "cosmos.base.query.v1beta1"; + +/** + * PageRequest is to be embedded in gRPC request messages for efficient + * pagination. Ex: + * + * message SomeRequest { + * Foo some_parameter = 1; + * PageRequest pagination = 2; + * } + */ +export interface PageRequest { + /** + * key is a value returned in PageResponse.next_key to begin + * querying the next page most efficiently. Only one of offset or key + * should be set. + */ + key: Uint8Array; + /** + * offset is a numeric offset that can be used when key is unavailable. + * It is less efficient than using key. Only one of offset or key should + * be set. + */ + offset: number; + /** + * limit is the total number of results to be returned in the result page. + * If left empty it will default to a value to be set by each app. + */ + limit: number; + /** + * count_total is set to true to indicate that the result set should include + * a count of the total number of items available for pagination in UIs. + * count_total is only respected when offset is used. It is ignored when key + * is set. + */ + count_total: boolean; + /** + * reverse is set to true if results are to be returned in the descending order. + * + * Since: cosmos-sdk 0.43 + */ + reverse: boolean; +} + +/** + * PageResponse is to be embedded in gRPC response messages where the + * corresponding request message has used PageRequest. + * + * message SomeResponse { + * repeated Bar results = 1; + * PageResponse page = 2; + * } + */ +export interface PageResponse { + /** + * next_key is the key to be passed to PageRequest.key to + * query the next page most efficiently + */ + next_key: Uint8Array; + /** + * total is total number of results available if PageRequest.count_total + * was set, its value is undefined otherwise + */ + total: number; +} + +const basePageRequest: object = { + offset: 0, + limit: 0, + count_total: false, + reverse: false, +}; + +export const PageRequest = { + encode(message: PageRequest, writer: Writer = Writer.create()): Writer { + if (message.key.length !== 0) { + writer.uint32(10).bytes(message.key); + } + if (message.offset !== 0) { + writer.uint32(16).uint64(message.offset); + } + if (message.limit !== 0) { + writer.uint32(24).uint64(message.limit); + } + if (message.count_total === true) { + writer.uint32(32).bool(message.count_total); + } + if (message.reverse === true) { + writer.uint32(40).bool(message.reverse); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): PageRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePageRequest } as PageRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.bytes(); + break; + case 2: + message.offset = longToNumber(reader.uint64() as Long); + break; + case 3: + message.limit = longToNumber(reader.uint64() as Long); + break; + case 4: + message.count_total = reader.bool(); + break; + case 5: + message.reverse = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PageRequest { + const message = { ...basePageRequest } as PageRequest; + if (object.key !== undefined && object.key !== null) { + message.key = bytesFromBase64(object.key); + } + if (object.offset !== undefined && object.offset !== null) { + message.offset = Number(object.offset); + } else { + message.offset = 0; + } + if (object.limit !== undefined && object.limit !== null) { + message.limit = Number(object.limit); + } else { + message.limit = 0; + } + if (object.count_total !== undefined && object.count_total !== null) { + message.count_total = Boolean(object.count_total); + } else { + message.count_total = false; + } + if (object.reverse !== undefined && object.reverse !== null) { + message.reverse = Boolean(object.reverse); + } else { + message.reverse = false; + } + return message; + }, + + toJSON(message: PageRequest): unknown { + const obj: any = {}; + message.key !== undefined && + (obj.key = base64FromBytes( + message.key !== undefined ? message.key : new Uint8Array() + )); + message.offset !== undefined && (obj.offset = message.offset); + message.limit !== undefined && (obj.limit = message.limit); + message.count_total !== undefined && + (obj.count_total = message.count_total); + message.reverse !== undefined && (obj.reverse = message.reverse); + return obj; + }, + + fromPartial(object: DeepPartial): PageRequest { + const message = { ...basePageRequest } as PageRequest; + if (object.key !== undefined && object.key !== null) { + message.key = object.key; + } else { + message.key = new Uint8Array(); + } + if (object.offset !== undefined && object.offset !== null) { + message.offset = object.offset; + } else { + message.offset = 0; + } + if (object.limit !== undefined && object.limit !== null) { + message.limit = object.limit; + } else { + message.limit = 0; + } + if (object.count_total !== undefined && object.count_total !== null) { + message.count_total = object.count_total; + } else { + message.count_total = false; + } + if (object.reverse !== undefined && object.reverse !== null) { + message.reverse = object.reverse; + } else { + message.reverse = false; + } + return message; + }, +}; + +const basePageResponse: object = { total: 0 }; + +export const PageResponse = { + encode(message: PageResponse, writer: Writer = Writer.create()): Writer { + if (message.next_key.length !== 0) { + writer.uint32(10).bytes(message.next_key); + } + if (message.total !== 0) { + writer.uint32(16).uint64(message.total); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): PageResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePageResponse } as PageResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.next_key = reader.bytes(); + break; + case 2: + message.total = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PageResponse { + const message = { ...basePageResponse } as PageResponse; + if (object.next_key !== undefined && object.next_key !== null) { + message.next_key = bytesFromBase64(object.next_key); + } + if (object.total !== undefined && object.total !== null) { + message.total = Number(object.total); + } else { + message.total = 0; + } + return message; + }, + + toJSON(message: PageResponse): unknown { + const obj: any = {}; + message.next_key !== undefined && + (obj.next_key = base64FromBytes( + message.next_key !== undefined ? message.next_key : new Uint8Array() + )); + message.total !== undefined && (obj.total = message.total); + return obj; + }, + + fromPartial(object: DeepPartial): PageResponse { + const message = { ...basePageResponse } as PageResponse; + if (object.next_key !== undefined && object.next_key !== null) { + message.next_key = object.next_key; + } else { + message.next_key = new Uint8Array(); + } + if (object.total !== undefined && object.total !== null) { + message.total = object.total; + } else { + message.total = 0; + } + return message; + }, +}; + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (let i = 0; i < arr.byteLength; ++i) { + bin.push(String.fromCharCode(arr[i])); + } + return btoa(bin.join("")); +} + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/gogoproto/gogo.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/gogoproto/gogo.ts new file mode 100644 index 00000000..3f41a047 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/gogoproto/gogo.ts @@ -0,0 +1,2 @@ +/* eslint-disable */ +export const protobufPackage = "gogoproto"; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/api/annotations.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/api/annotations.ts new file mode 100644 index 00000000..aace4787 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/api/annotations.ts @@ -0,0 +1,2 @@ +/* eslint-disable */ +export const protobufPackage = "google.api"; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/api/http.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/api/http.ts new file mode 100644 index 00000000..ccadff68 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/api/http.ts @@ -0,0 +1,706 @@ +/* eslint-disable */ +import { Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "google.api"; + +/** + * Defines the HTTP configuration for an API service. It contains a list of + * [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method + * to one or more HTTP REST API methods. + */ +export interface Http { + /** + * A list of HTTP configuration rules that apply to individual API methods. + * + * **NOTE:** All service configuration rules follow "last one wins" order. + */ + rules: HttpRule[]; + /** + * When set to true, URL path parmeters will be fully URI-decoded except in + * cases of single segment matches in reserved expansion, where "%2F" will be + * left encoded. + * + * The default behavior is to not decode RFC 6570 reserved characters in multi + * segment matches. + */ + fully_decode_reserved_expansion: boolean; +} + +/** + * `HttpRule` defines the mapping of an RPC method to one or more HTTP + * REST API methods. The mapping specifies how different portions of the RPC + * request message are mapped to URL path, URL query parameters, and + * HTTP request body. The mapping is typically specified as an + * `google.api.http` annotation on the RPC method, + * see "google/api/annotations.proto" for details. + * + * The mapping consists of a field specifying the path template and + * method kind. The path template can refer to fields in the request + * message, as in the example below which describes a REST GET + * operation on a resource collection of messages: + * + * + * service Messaging { + * rpc GetMessage(GetMessageRequest) returns (Message) { + * option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; + * } + * } + * message GetMessageRequest { + * message SubMessage { + * string subfield = 1; + * } + * string message_id = 1; // mapped to the URL + * SubMessage sub = 2; // `sub.subfield` is url-mapped + * } + * message Message { + * string text = 1; // content of the resource + * } + * + * The same http annotation can alternatively be expressed inside the + * `GRPC API Configuration` YAML file. + * + * http: + * rules: + * - selector: .Messaging.GetMessage + * get: /v1/messages/{message_id}/{sub.subfield} + * + * This definition enables an automatic, bidrectional mapping of HTTP + * JSON to RPC. Example: + * + * HTTP | RPC + * -----|----- + * `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` + * + * In general, not only fields but also field paths can be referenced + * from a path pattern. Fields mapped to the path pattern cannot be + * repeated and must have a primitive (non-message) type. + * + * Any fields in the request message which are not bound by the path + * pattern automatically become (optional) HTTP query + * parameters. Assume the following definition of the request message: + * + * + * service Messaging { + * rpc GetMessage(GetMessageRequest) returns (Message) { + * option (google.api.http).get = "/v1/messages/{message_id}"; + * } + * } + * message GetMessageRequest { + * message SubMessage { + * string subfield = 1; + * } + * string message_id = 1; // mapped to the URL + * int64 revision = 2; // becomes a parameter + * SubMessage sub = 3; // `sub.subfield` becomes a parameter + * } + * + * + * This enables a HTTP JSON to RPC mapping as below: + * + * HTTP | RPC + * -----|----- + * `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` + * + * Note that fields which are mapped to HTTP parameters must have a + * primitive type or a repeated primitive type. Message types are not + * allowed. In the case of a repeated type, the parameter can be + * repeated in the URL, as in `...?param=A¶m=B`. + * + * For HTTP method kinds which allow a request body, the `body` field + * specifies the mapping. Consider a REST update method on the + * message resource collection: + * + * + * service Messaging { + * rpc UpdateMessage(UpdateMessageRequest) returns (Message) { + * option (google.api.http) = { + * put: "/v1/messages/{message_id}" + * body: "message" + * }; + * } + * } + * message UpdateMessageRequest { + * string message_id = 1; // mapped to the URL + * Message message = 2; // mapped to the body + * } + * + * + * The following HTTP JSON to RPC mapping is enabled, where the + * representation of the JSON in the request body is determined by + * protos JSON encoding: + * + * HTTP | RPC + * -----|----- + * `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` + * + * The special name `*` can be used in the body mapping to define that + * every field not bound by the path template should be mapped to the + * request body. This enables the following alternative definition of + * the update method: + * + * service Messaging { + * rpc UpdateMessage(Message) returns (Message) { + * option (google.api.http) = { + * put: "/v1/messages/{message_id}" + * body: "*" + * }; + * } + * } + * message Message { + * string message_id = 1; + * string text = 2; + * } + * + * + * The following HTTP JSON to RPC mapping is enabled: + * + * HTTP | RPC + * -----|----- + * `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` + * + * Note that when using `*` in the body mapping, it is not possible to + * have HTTP parameters, as all fields not bound by the path end in + * the body. This makes this option more rarely used in practice of + * defining REST APIs. The common usage of `*` is in custom methods + * which don't use the URL at all for transferring data. + * + * It is possible to define multiple HTTP methods for one RPC by using + * the `additional_bindings` option. Example: + * + * service Messaging { + * rpc GetMessage(GetMessageRequest) returns (Message) { + * option (google.api.http) = { + * get: "/v1/messages/{message_id}" + * additional_bindings { + * get: "/v1/users/{user_id}/messages/{message_id}" + * } + * }; + * } + * } + * message GetMessageRequest { + * string message_id = 1; + * string user_id = 2; + * } + * + * + * This enables the following two alternative HTTP JSON to RPC + * mappings: + * + * HTTP | RPC + * -----|----- + * `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` + * `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` + * + * # Rules for HTTP mapping + * + * The rules for mapping HTTP path, query parameters, and body fields + * to the request message are as follows: + * + * 1. The `body` field specifies either `*` or a field path, or is + * omitted. If omitted, it indicates there is no HTTP request body. + * 2. Leaf fields (recursive expansion of nested messages in the + * request) can be classified into three types: + * (a) Matched in the URL template. + * (b) Covered by body (if body is `*`, everything except (a) fields; + * else everything under the body field) + * (c) All other fields. + * 3. URL query parameters found in the HTTP request are mapped to (c) fields. + * 4. Any body sent with an HTTP request can contain only (b) fields. + * + * The syntax of the path template is as follows: + * + * Template = "/" Segments [ Verb ] ; + * Segments = Segment { "/" Segment } ; + * Segment = "*" | "**" | LITERAL | Variable ; + * Variable = "{" FieldPath [ "=" Segments ] "}" ; + * FieldPath = IDENT { "." IDENT } ; + * Verb = ":" LITERAL ; + * + * The syntax `*` matches a single path segment. The syntax `**` matches zero + * or more path segments, which must be the last part of the path except the + * `Verb`. The syntax `LITERAL` matches literal text in the path. + * + * The syntax `Variable` matches part of the URL path as specified by its + * template. A variable template must not contain other variables. If a variable + * matches a single path segment, its template may be omitted, e.g. `{var}` + * is equivalent to `{var=*}`. + * + * If a variable contains exactly one path segment, such as `"{var}"` or + * `"{var=*}"`, when such a variable is expanded into a URL path, all characters + * except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the + * Discovery Document as `{var}`. + * + * If a variable contains one or more path segments, such as `"{var=foo/*}"` + * or `"{var=**}"`, when such a variable is expanded into a URL path, all + * characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables + * show up in the Discovery Document as `{+var}`. + * + * NOTE: While the single segment variable matches the semantics of + * [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 + * Simple String Expansion, the multi segment variable **does not** match + * RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion + * does not expand special characters like `?` and `#`, which would lead + * to invalid URLs. + * + * NOTE: the field paths in variables and in the `body` must not refer to + * repeated fields or map fields. + */ +export interface HttpRule { + /** + * Selects methods to which this rule applies. + * + * Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + */ + selector: string; + /** Used for listing and getting information about resources. */ + get: string | undefined; + /** Used for updating a resource. */ + put: string | undefined; + /** Used for creating a resource. */ + post: string | undefined; + /** Used for deleting a resource. */ + delete: string | undefined; + /** Used for updating a resource. */ + patch: string | undefined; + /** + * The custom pattern is used for specifying an HTTP method that is not + * included in the `pattern` field, such as HEAD, or "*" to leave the + * HTTP method unspecified for this rule. The wild-card rule is useful + * for services that provide content to Web (HTML) clients. + */ + custom: CustomHttpPattern | undefined; + /** + * The name of the request field whose value is mapped to the HTTP body, or + * `*` for mapping all fields not captured by the path pattern to the HTTP + * body. NOTE: the referred field must not be a repeated field and must be + * present at the top-level of request message type. + */ + body: string; + /** + * Optional. The name of the response field whose value is mapped to the HTTP + * body of response. Other response fields are ignored. When + * not set, the response message will be used as HTTP body of response. + */ + response_body: string; + /** + * Additional HTTP bindings for the selector. Nested bindings must + * not contain an `additional_bindings` field themselves (that is, + * the nesting may only be one level deep). + */ + additional_bindings: HttpRule[]; +} + +/** A custom pattern is used for defining custom HTTP verb. */ +export interface CustomHttpPattern { + /** The name of this custom HTTP verb. */ + kind: string; + /** The path matched by this custom verb. */ + path: string; +} + +const baseHttp: object = { fully_decode_reserved_expansion: false }; + +export const Http = { + encode(message: Http, writer: Writer = Writer.create()): Writer { + for (const v of message.rules) { + HttpRule.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.fully_decode_reserved_expansion === true) { + writer.uint32(16).bool(message.fully_decode_reserved_expansion); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Http { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHttp } as Http; + message.rules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rules.push(HttpRule.decode(reader, reader.uint32())); + break; + case 2: + message.fully_decode_reserved_expansion = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Http { + const message = { ...baseHttp } as Http; + message.rules = []; + if (object.rules !== undefined && object.rules !== null) { + for (const e of object.rules) { + message.rules.push(HttpRule.fromJSON(e)); + } + } + if ( + object.fully_decode_reserved_expansion !== undefined && + object.fully_decode_reserved_expansion !== null + ) { + message.fully_decode_reserved_expansion = Boolean( + object.fully_decode_reserved_expansion + ); + } else { + message.fully_decode_reserved_expansion = false; + } + return message; + }, + + toJSON(message: Http): unknown { + const obj: any = {}; + if (message.rules) { + obj.rules = message.rules.map((e) => + e ? HttpRule.toJSON(e) : undefined + ); + } else { + obj.rules = []; + } + message.fully_decode_reserved_expansion !== undefined && + (obj.fully_decode_reserved_expansion = + message.fully_decode_reserved_expansion); + return obj; + }, + + fromPartial(object: DeepPartial): Http { + const message = { ...baseHttp } as Http; + message.rules = []; + if (object.rules !== undefined && object.rules !== null) { + for (const e of object.rules) { + message.rules.push(HttpRule.fromPartial(e)); + } + } + if ( + object.fully_decode_reserved_expansion !== undefined && + object.fully_decode_reserved_expansion !== null + ) { + message.fully_decode_reserved_expansion = + object.fully_decode_reserved_expansion; + } else { + message.fully_decode_reserved_expansion = false; + } + return message; + }, +}; + +const baseHttpRule: object = { selector: "", body: "", response_body: "" }; + +export const HttpRule = { + encode(message: HttpRule, writer: Writer = Writer.create()): Writer { + if (message.selector !== "") { + writer.uint32(10).string(message.selector); + } + if (message.get !== undefined) { + writer.uint32(18).string(message.get); + } + if (message.put !== undefined) { + writer.uint32(26).string(message.put); + } + if (message.post !== undefined) { + writer.uint32(34).string(message.post); + } + if (message.delete !== undefined) { + writer.uint32(42).string(message.delete); + } + if (message.patch !== undefined) { + writer.uint32(50).string(message.patch); + } + if (message.custom !== undefined) { + CustomHttpPattern.encode( + message.custom, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.body !== "") { + writer.uint32(58).string(message.body); + } + if (message.response_body !== "") { + writer.uint32(98).string(message.response_body); + } + for (const v of message.additional_bindings) { + HttpRule.encode(v!, writer.uint32(90).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): HttpRule { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHttpRule } as HttpRule; + message.additional_bindings = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.selector = reader.string(); + break; + case 2: + message.get = reader.string(); + break; + case 3: + message.put = reader.string(); + break; + case 4: + message.post = reader.string(); + break; + case 5: + message.delete = reader.string(); + break; + case 6: + message.patch = reader.string(); + break; + case 8: + message.custom = CustomHttpPattern.decode(reader, reader.uint32()); + break; + case 7: + message.body = reader.string(); + break; + case 12: + message.response_body = reader.string(); + break; + case 11: + message.additional_bindings.push( + HttpRule.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): HttpRule { + const message = { ...baseHttpRule } as HttpRule; + message.additional_bindings = []; + if (object.selector !== undefined && object.selector !== null) { + message.selector = String(object.selector); + } else { + message.selector = ""; + } + if (object.get !== undefined && object.get !== null) { + message.get = String(object.get); + } else { + message.get = undefined; + } + if (object.put !== undefined && object.put !== null) { + message.put = String(object.put); + } else { + message.put = undefined; + } + if (object.post !== undefined && object.post !== null) { + message.post = String(object.post); + } else { + message.post = undefined; + } + if (object.delete !== undefined && object.delete !== null) { + message.delete = String(object.delete); + } else { + message.delete = undefined; + } + if (object.patch !== undefined && object.patch !== null) { + message.patch = String(object.patch); + } else { + message.patch = undefined; + } + if (object.custom !== undefined && object.custom !== null) { + message.custom = CustomHttpPattern.fromJSON(object.custom); + } else { + message.custom = undefined; + } + if (object.body !== undefined && object.body !== null) { + message.body = String(object.body); + } else { + message.body = ""; + } + if (object.response_body !== undefined && object.response_body !== null) { + message.response_body = String(object.response_body); + } else { + message.response_body = ""; + } + if ( + object.additional_bindings !== undefined && + object.additional_bindings !== null + ) { + for (const e of object.additional_bindings) { + message.additional_bindings.push(HttpRule.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: HttpRule): unknown { + const obj: any = {}; + message.selector !== undefined && (obj.selector = message.selector); + message.get !== undefined && (obj.get = message.get); + message.put !== undefined && (obj.put = message.put); + message.post !== undefined && (obj.post = message.post); + message.delete !== undefined && (obj.delete = message.delete); + message.patch !== undefined && (obj.patch = message.patch); + message.custom !== undefined && + (obj.custom = message.custom + ? CustomHttpPattern.toJSON(message.custom) + : undefined); + message.body !== undefined && (obj.body = message.body); + message.response_body !== undefined && + (obj.response_body = message.response_body); + if (message.additional_bindings) { + obj.additional_bindings = message.additional_bindings.map((e) => + e ? HttpRule.toJSON(e) : undefined + ); + } else { + obj.additional_bindings = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): HttpRule { + const message = { ...baseHttpRule } as HttpRule; + message.additional_bindings = []; + if (object.selector !== undefined && object.selector !== null) { + message.selector = object.selector; + } else { + message.selector = ""; + } + if (object.get !== undefined && object.get !== null) { + message.get = object.get; + } else { + message.get = undefined; + } + if (object.put !== undefined && object.put !== null) { + message.put = object.put; + } else { + message.put = undefined; + } + if (object.post !== undefined && object.post !== null) { + message.post = object.post; + } else { + message.post = undefined; + } + if (object.delete !== undefined && object.delete !== null) { + message.delete = object.delete; + } else { + message.delete = undefined; + } + if (object.patch !== undefined && object.patch !== null) { + message.patch = object.patch; + } else { + message.patch = undefined; + } + if (object.custom !== undefined && object.custom !== null) { + message.custom = CustomHttpPattern.fromPartial(object.custom); + } else { + message.custom = undefined; + } + if (object.body !== undefined && object.body !== null) { + message.body = object.body; + } else { + message.body = ""; + } + if (object.response_body !== undefined && object.response_body !== null) { + message.response_body = object.response_body; + } else { + message.response_body = ""; + } + if ( + object.additional_bindings !== undefined && + object.additional_bindings !== null + ) { + for (const e of object.additional_bindings) { + message.additional_bindings.push(HttpRule.fromPartial(e)); + } + } + return message; + }, +}; + +const baseCustomHttpPattern: object = { kind: "", path: "" }; + +export const CustomHttpPattern = { + encode(message: CustomHttpPattern, writer: Writer = Writer.create()): Writer { + if (message.kind !== "") { + writer.uint32(10).string(message.kind); + } + if (message.path !== "") { + writer.uint32(18).string(message.path); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): CustomHttpPattern { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCustomHttpPattern } as CustomHttpPattern; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kind = reader.string(); + break; + case 2: + message.path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CustomHttpPattern { + const message = { ...baseCustomHttpPattern } as CustomHttpPattern; + if (object.kind !== undefined && object.kind !== null) { + message.kind = String(object.kind); + } else { + message.kind = ""; + } + if (object.path !== undefined && object.path !== null) { + message.path = String(object.path); + } else { + message.path = ""; + } + return message; + }, + + toJSON(message: CustomHttpPattern): unknown { + const obj: any = {}; + message.kind !== undefined && (obj.kind = message.kind); + message.path !== undefined && (obj.path = message.path); + return obj; + }, + + fromPartial(object: DeepPartial): CustomHttpPattern { + const message = { ...baseCustomHttpPattern } as CustomHttpPattern; + if (object.kind !== undefined && object.kind !== null) { + message.kind = object.kind; + } else { + message.kind = ""; + } + if (object.path !== undefined && object.path !== null) { + message.path = object.path; + } else { + message.path = ""; + } + return message; + }, +}; + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/protobuf/descriptor.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/protobuf/descriptor.ts new file mode 100644 index 00000000..a0167cb2 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/google/protobuf/descriptor.ts @@ -0,0 +1,5314 @@ +/* eslint-disable */ +import * as Long from "long"; +import { util, configure, Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "google.protobuf"; + +/** + * The protocol compiler can output a FileDescriptorSet containing the .proto + * files it parses. + */ +export interface FileDescriptorSet { + file: FileDescriptorProto[]; +} + +/** Describes a complete .proto file. */ +export interface FileDescriptorProto { + /** file name, relative to root of source tree */ + name: string; + /** e.g. "foo", "foo.bar", etc. */ + package: string; + /** Names of files imported by this file. */ + dependency: string[]; + /** Indexes of the public imported files in the dependency list above. */ + public_dependency: number[]; + /** + * Indexes of the weak imported files in the dependency list. + * For Google-internal migration only. Do not use. + */ + weak_dependency: number[]; + /** All top-level definitions in this file. */ + message_type: DescriptorProto[]; + enum_type: EnumDescriptorProto[]; + service: ServiceDescriptorProto[]; + extension: FieldDescriptorProto[]; + options: FileOptions | undefined; + /** + * This field contains optional information about the original source code. + * You may safely remove this entire field without harming runtime + * functionality of the descriptors -- the information is needed only by + * development tools. + */ + source_code_info: SourceCodeInfo | undefined; + /** + * The syntax of the proto file. + * The supported values are "proto2" and "proto3". + */ + syntax: string; +} + +/** Describes a message type. */ +export interface DescriptorProto { + name: string; + field: FieldDescriptorProto[]; + extension: FieldDescriptorProto[]; + nested_type: DescriptorProto[]; + enum_type: EnumDescriptorProto[]; + extension_range: DescriptorProto_ExtensionRange[]; + oneof_decl: OneofDescriptorProto[]; + options: MessageOptions | undefined; + reserved_range: DescriptorProto_ReservedRange[]; + /** + * Reserved field names, which may not be used by fields in the same message. + * A given name may only be reserved once. + */ + reserved_name: string[]; +} + +export interface DescriptorProto_ExtensionRange { + /** Inclusive. */ + start: number; + /** Exclusive. */ + end: number; + options: ExtensionRangeOptions | undefined; +} + +/** + * Range of reserved tag numbers. Reserved tag numbers may not be used by + * fields or extension ranges in the same message. Reserved ranges may + * not overlap. + */ +export interface DescriptorProto_ReservedRange { + /** Inclusive. */ + start: number; + /** Exclusive. */ + end: number; +} + +export interface ExtensionRangeOptions { + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +/** Describes a field within a message. */ +export interface FieldDescriptorProto { + name: string; + number: number; + label: FieldDescriptorProto_Label; + /** + * If type_name is set, this need not be set. If both this and type_name + * are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + */ + type: FieldDescriptorProto_Type; + /** + * For message and enum types, this is the name of the type. If the name + * starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + * rules are used to find the type (i.e. first the nested types within this + * message are searched, then within the parent, on up to the root + * namespace). + */ + type_name: string; + /** + * For extensions, this is the name of the type being extended. It is + * resolved in the same manner as type_name. + */ + extendee: string; + /** + * For numeric types, contains the original text representation of the value. + * For booleans, "true" or "false". + * For strings, contains the default text contents (not escaped in any way). + * For bytes, contains the C escaped value. All bytes >= 128 are escaped. + * TODO(kenton): Base-64 encode? + */ + default_value: string; + /** + * If set, gives the index of a oneof in the containing type's oneof_decl + * list. This field is a member of that oneof. + */ + oneof_index: number; + /** + * JSON name of this field. The value is set by protocol compiler. If the + * user has set a "json_name" option on this field, that option's value + * will be used. Otherwise, it's deduced from the field's name by converting + * it to camelCase. + */ + json_name: string; + options: FieldOptions | undefined; + /** + * If true, this is a proto3 "optional". When a proto3 field is optional, it + * tracks presence regardless of field type. + * + * When proto3_optional is true, this field must be belong to a oneof to + * signal to old proto3 clients that presence is tracked for this field. This + * oneof is known as a "synthetic" oneof, and this field must be its sole + * member (each proto3 optional field gets its own synthetic oneof). Synthetic + * oneofs exist in the descriptor only, and do not generate any API. Synthetic + * oneofs must be ordered after all "real" oneofs. + * + * For message fields, proto3_optional doesn't create any semantic change, + * since non-repeated message fields always track presence. However it still + * indicates the semantic detail of whether the user wrote "optional" or not. + * This can be useful for round-tripping the .proto file. For consistency we + * give message fields a synthetic oneof also, even though it is not required + * to track presence. This is especially important because the parser can't + * tell if a field is a message or an enum, so it must always create a + * synthetic oneof. + * + * Proto2 optional fields do not set this flag, because they already indicate + * optional with `LABEL_OPTIONAL`. + */ + proto3_optional: boolean; +} + +export enum FieldDescriptorProto_Type { + /** + * TYPE_DOUBLE - 0 is reserved for errors. + * Order is weird for historical reasons. + */ + TYPE_DOUBLE = 1, + TYPE_FLOAT = 2, + /** + * TYPE_INT64 - Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + * negative values are likely. + */ + TYPE_INT64 = 3, + TYPE_UINT64 = 4, + /** + * TYPE_INT32 - Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + * negative values are likely. + */ + TYPE_INT32 = 5, + TYPE_FIXED64 = 6, + TYPE_FIXED32 = 7, + TYPE_BOOL = 8, + TYPE_STRING = 9, + /** + * TYPE_GROUP - Tag-delimited aggregate. + * Group type is deprecated and not supported in proto3. However, Proto3 + * implementations should still be able to parse the group wire format and + * treat group fields as unknown fields. + */ + TYPE_GROUP = 10, + /** TYPE_MESSAGE - Length-delimited aggregate. */ + TYPE_MESSAGE = 11, + /** TYPE_BYTES - New in version 2. */ + TYPE_BYTES = 12, + TYPE_UINT32 = 13, + TYPE_ENUM = 14, + TYPE_SFIXED32 = 15, + TYPE_SFIXED64 = 16, + /** TYPE_SINT32 - Uses ZigZag encoding. */ + TYPE_SINT32 = 17, + /** TYPE_SINT64 - Uses ZigZag encoding. */ + TYPE_SINT64 = 18, + UNRECOGNIZED = -1, +} + +export function fieldDescriptorProto_TypeFromJSON( + object: any +): FieldDescriptorProto_Type { + switch (object) { + case 1: + case "TYPE_DOUBLE": + return FieldDescriptorProto_Type.TYPE_DOUBLE; + case 2: + case "TYPE_FLOAT": + return FieldDescriptorProto_Type.TYPE_FLOAT; + case 3: + case "TYPE_INT64": + return FieldDescriptorProto_Type.TYPE_INT64; + case 4: + case "TYPE_UINT64": + return FieldDescriptorProto_Type.TYPE_UINT64; + case 5: + case "TYPE_INT32": + return FieldDescriptorProto_Type.TYPE_INT32; + case 6: + case "TYPE_FIXED64": + return FieldDescriptorProto_Type.TYPE_FIXED64; + case 7: + case "TYPE_FIXED32": + return FieldDescriptorProto_Type.TYPE_FIXED32; + case 8: + case "TYPE_BOOL": + return FieldDescriptorProto_Type.TYPE_BOOL; + case 9: + case "TYPE_STRING": + return FieldDescriptorProto_Type.TYPE_STRING; + case 10: + case "TYPE_GROUP": + return FieldDescriptorProto_Type.TYPE_GROUP; + case 11: + case "TYPE_MESSAGE": + return FieldDescriptorProto_Type.TYPE_MESSAGE; + case 12: + case "TYPE_BYTES": + return FieldDescriptorProto_Type.TYPE_BYTES; + case 13: + case "TYPE_UINT32": + return FieldDescriptorProto_Type.TYPE_UINT32; + case 14: + case "TYPE_ENUM": + return FieldDescriptorProto_Type.TYPE_ENUM; + case 15: + case "TYPE_SFIXED32": + return FieldDescriptorProto_Type.TYPE_SFIXED32; + case 16: + case "TYPE_SFIXED64": + return FieldDescriptorProto_Type.TYPE_SFIXED64; + case 17: + case "TYPE_SINT32": + return FieldDescriptorProto_Type.TYPE_SINT32; + case 18: + case "TYPE_SINT64": + return FieldDescriptorProto_Type.TYPE_SINT64; + case -1: + case "UNRECOGNIZED": + default: + return FieldDescriptorProto_Type.UNRECOGNIZED; + } +} + +export function fieldDescriptorProto_TypeToJSON( + object: FieldDescriptorProto_Type +): string { + switch (object) { + case FieldDescriptorProto_Type.TYPE_DOUBLE: + return "TYPE_DOUBLE"; + case FieldDescriptorProto_Type.TYPE_FLOAT: + return "TYPE_FLOAT"; + case FieldDescriptorProto_Type.TYPE_INT64: + return "TYPE_INT64"; + case FieldDescriptorProto_Type.TYPE_UINT64: + return "TYPE_UINT64"; + case FieldDescriptorProto_Type.TYPE_INT32: + return "TYPE_INT32"; + case FieldDescriptorProto_Type.TYPE_FIXED64: + return "TYPE_FIXED64"; + case FieldDescriptorProto_Type.TYPE_FIXED32: + return "TYPE_FIXED32"; + case FieldDescriptorProto_Type.TYPE_BOOL: + return "TYPE_BOOL"; + case FieldDescriptorProto_Type.TYPE_STRING: + return "TYPE_STRING"; + case FieldDescriptorProto_Type.TYPE_GROUP: + return "TYPE_GROUP"; + case FieldDescriptorProto_Type.TYPE_MESSAGE: + return "TYPE_MESSAGE"; + case FieldDescriptorProto_Type.TYPE_BYTES: + return "TYPE_BYTES"; + case FieldDescriptorProto_Type.TYPE_UINT32: + return "TYPE_UINT32"; + case FieldDescriptorProto_Type.TYPE_ENUM: + return "TYPE_ENUM"; + case FieldDescriptorProto_Type.TYPE_SFIXED32: + return "TYPE_SFIXED32"; + case FieldDescriptorProto_Type.TYPE_SFIXED64: + return "TYPE_SFIXED64"; + case FieldDescriptorProto_Type.TYPE_SINT32: + return "TYPE_SINT32"; + case FieldDescriptorProto_Type.TYPE_SINT64: + return "TYPE_SINT64"; + default: + return "UNKNOWN"; + } +} + +export enum FieldDescriptorProto_Label { + /** LABEL_OPTIONAL - 0 is reserved for errors */ + LABEL_OPTIONAL = 1, + LABEL_REQUIRED = 2, + LABEL_REPEATED = 3, + UNRECOGNIZED = -1, +} + +export function fieldDescriptorProto_LabelFromJSON( + object: any +): FieldDescriptorProto_Label { + switch (object) { + case 1: + case "LABEL_OPTIONAL": + return FieldDescriptorProto_Label.LABEL_OPTIONAL; + case 2: + case "LABEL_REQUIRED": + return FieldDescriptorProto_Label.LABEL_REQUIRED; + case 3: + case "LABEL_REPEATED": + return FieldDescriptorProto_Label.LABEL_REPEATED; + case -1: + case "UNRECOGNIZED": + default: + return FieldDescriptorProto_Label.UNRECOGNIZED; + } +} + +export function fieldDescriptorProto_LabelToJSON( + object: FieldDescriptorProto_Label +): string { + switch (object) { + case FieldDescriptorProto_Label.LABEL_OPTIONAL: + return "LABEL_OPTIONAL"; + case FieldDescriptorProto_Label.LABEL_REQUIRED: + return "LABEL_REQUIRED"; + case FieldDescriptorProto_Label.LABEL_REPEATED: + return "LABEL_REPEATED"; + default: + return "UNKNOWN"; + } +} + +/** Describes a oneof. */ +export interface OneofDescriptorProto { + name: string; + options: OneofOptions | undefined; +} + +/** Describes an enum type. */ +export interface EnumDescriptorProto { + name: string; + value: EnumValueDescriptorProto[]; + options: EnumOptions | undefined; + /** + * Range of reserved numeric values. Reserved numeric values may not be used + * by enum values in the same enum declaration. Reserved ranges may not + * overlap. + */ + reserved_range: EnumDescriptorProto_EnumReservedRange[]; + /** + * Reserved enum value names, which may not be reused. A given name may only + * be reserved once. + */ + reserved_name: string[]; +} + +/** + * Range of reserved numeric values. Reserved values may not be used by + * entries in the same enum. Reserved ranges may not overlap. + * + * Note that this is distinct from DescriptorProto.ReservedRange in that it + * is inclusive such that it can appropriately represent the entire int32 + * domain. + */ +export interface EnumDescriptorProto_EnumReservedRange { + /** Inclusive. */ + start: number; + /** Inclusive. */ + end: number; +} + +/** Describes a value within an enum. */ +export interface EnumValueDescriptorProto { + name: string; + number: number; + options: EnumValueOptions | undefined; +} + +/** Describes a service. */ +export interface ServiceDescriptorProto { + name: string; + method: MethodDescriptorProto[]; + options: ServiceOptions | undefined; +} + +/** Describes a method of a service. */ +export interface MethodDescriptorProto { + name: string; + /** + * Input and output type names. These are resolved in the same way as + * FieldDescriptorProto.type_name, but must refer to a message type. + */ + input_type: string; + output_type: string; + options: MethodOptions | undefined; + /** Identifies if client streams multiple client messages */ + client_streaming: boolean; + /** Identifies if server streams multiple server messages */ + server_streaming: boolean; +} + +export interface FileOptions { + /** + * Sets the Java package where classes generated from this .proto will be + * placed. By default, the proto package is used, but this is often + * inappropriate because proto packages do not normally start with backwards + * domain names. + */ + java_package: string; + /** + * Controls the name of the wrapper Java class generated for the .proto file. + * That class will always contain the .proto file's getDescriptor() method as + * well as any top-level extensions defined in the .proto file. + * If java_multiple_files is disabled, then all the other classes from the + * .proto file will be nested inside the single wrapper outer class. + */ + java_outer_classname: string; + /** + * If enabled, then the Java code generator will generate a separate .java + * file for each top-level message, enum, and service defined in the .proto + * file. Thus, these types will *not* be nested inside the wrapper class + * named by java_outer_classname. However, the wrapper class will still be + * generated to contain the file's getDescriptor() method as well as any + * top-level extensions defined in the file. + */ + java_multiple_files: boolean; + /** + * This option does nothing. + * + * @deprecated + */ + java_generate_equals_and_hash: boolean; + /** + * If set true, then the Java2 code generator will generate code that + * throws an exception whenever an attempt is made to assign a non-UTF-8 + * byte sequence to a string field. + * Message reflection will do the same. + * However, an extension field still accepts non-UTF-8 byte sequences. + * This option has no effect on when used with the lite runtime. + */ + java_string_check_utf8: boolean; + optimize_for: FileOptions_OptimizeMode; + /** + * Sets the Go package where structs generated from this .proto will be + * placed. If omitted, the Go package will be derived from the following: + * - The basename of the package import path, if provided. + * - Otherwise, the package statement in the .proto file, if present. + * - Otherwise, the basename of the .proto file, without extension. + */ + go_package: string; + /** + * Should generic services be generated in each language? "Generic" services + * are not specific to any particular RPC system. They are generated by the + * main code generators in each language (without additional plugins). + * Generic services were the only kind of service generation supported by + * early versions of google.protobuf. + * + * Generic services are now considered deprecated in favor of using plugins + * that generate code specific to your particular RPC system. Therefore, + * these default to false. Old code which depends on generic services should + * explicitly set them to true. + */ + cc_generic_services: boolean; + java_generic_services: boolean; + py_generic_services: boolean; + php_generic_services: boolean; + /** + * Is this file deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for everything in the file, or it will be completely ignored; in the very + * least, this is a formalization for deprecating files. + */ + deprecated: boolean; + /** + * Enables the use of arenas for the proto messages in this file. This applies + * only to generated classes for C++. + */ + cc_enable_arenas: boolean; + /** + * Sets the objective c class prefix which is prepended to all objective c + * generated classes from this .proto. There is no default. + */ + objc_class_prefix: string; + /** Namespace for generated classes; defaults to the package. */ + csharp_namespace: string; + /** + * By default Swift generators will take the proto package and CamelCase it + * replacing '.' with underscore and use that to prefix the types/symbols + * defined. When this options is provided, they will use this value instead + * to prefix the types/symbols defined. + */ + swift_prefix: string; + /** + * Sets the php class prefix which is prepended to all php generated classes + * from this .proto. Default is empty. + */ + php_class_prefix: string; + /** + * Use this option to change the namespace of php generated classes. Default + * is empty. When this option is empty, the package name will be used for + * determining the namespace. + */ + php_namespace: string; + /** + * Use this option to change the namespace of php generated metadata classes. + * Default is empty. When this option is empty, the proto file name will be + * used for determining the namespace. + */ + php_metadata_namespace: string; + /** + * Use this option to change the package of ruby generated classes. Default + * is empty. When this option is not set, the package name will be used for + * determining the ruby package. + */ + ruby_package: string; + /** + * The parser stores options it doesn't recognize here. + * See the documentation for the "Options" section above. + */ + uninterpreted_option: UninterpretedOption[]; +} + +/** Generated classes can be optimized for speed or code size. */ +export enum FileOptions_OptimizeMode { + /** SPEED - Generate complete code for parsing, serialization, */ + SPEED = 1, + /** CODE_SIZE - etc. */ + CODE_SIZE = 2, + /** LITE_RUNTIME - Generate code using MessageLite and the lite runtime. */ + LITE_RUNTIME = 3, + UNRECOGNIZED = -1, +} + +export function fileOptions_OptimizeModeFromJSON( + object: any +): FileOptions_OptimizeMode { + switch (object) { + case 1: + case "SPEED": + return FileOptions_OptimizeMode.SPEED; + case 2: + case "CODE_SIZE": + return FileOptions_OptimizeMode.CODE_SIZE; + case 3: + case "LITE_RUNTIME": + return FileOptions_OptimizeMode.LITE_RUNTIME; + case -1: + case "UNRECOGNIZED": + default: + return FileOptions_OptimizeMode.UNRECOGNIZED; + } +} + +export function fileOptions_OptimizeModeToJSON( + object: FileOptions_OptimizeMode +): string { + switch (object) { + case FileOptions_OptimizeMode.SPEED: + return "SPEED"; + case FileOptions_OptimizeMode.CODE_SIZE: + return "CODE_SIZE"; + case FileOptions_OptimizeMode.LITE_RUNTIME: + return "LITE_RUNTIME"; + default: + return "UNKNOWN"; + } +} + +export interface MessageOptions { + /** + * Set true to use the old proto1 MessageSet wire format for extensions. + * This is provided for backwards-compatibility with the MessageSet wire + * format. You should not use this for any other reason: It's less + * efficient, has fewer features, and is more complicated. + * + * The message must be defined exactly as follows: + * message Foo { + * option message_set_wire_format = true; + * extensions 4 to max; + * } + * Note that the message cannot have any defined fields; MessageSets only + * have extensions. + * + * All extensions of your type must be singular messages; e.g. they cannot + * be int32s, enums, or repeated messages. + * + * Because this is an option, the above two restrictions are not enforced by + * the protocol compiler. + */ + message_set_wire_format: boolean; + /** + * Disables the generation of the standard "descriptor()" accessor, which can + * conflict with a field of the same name. This is meant to make migration + * from proto1 easier; new code should avoid fields named "descriptor". + */ + no_standard_descriptor_accessor: boolean; + /** + * Is this message deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for the message, or it will be completely ignored; in the very least, + * this is a formalization for deprecating messages. + */ + deprecated: boolean; + /** + * Whether the message is an automatically generated map entry type for the + * maps field. + * + * For maps fields: + * map map_field = 1; + * The parsed descriptor looks like: + * message MapFieldEntry { + * option map_entry = true; + * optional KeyType key = 1; + * optional ValueType value = 2; + * } + * repeated MapFieldEntry map_field = 1; + * + * Implementations may choose not to generate the map_entry=true message, but + * use a native map in the target language to hold the keys and values. + * The reflection APIs in such implementations still need to work as + * if the field is a repeated message field. + * + * NOTE: Do not set the option in .proto files. Always use the maps syntax + * instead. The option should only be implicitly set by the proto compiler + * parser. + */ + map_entry: boolean; + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +export interface FieldOptions { + /** + * The ctype option instructs the C++ code generator to use a different + * representation of the field than it normally would. See the specific + * options below. This option is not yet implemented in the open source + * release -- sorry, we'll try to include it in a future version! + */ + ctype: FieldOptions_CType; + /** + * The packed option can be enabled for repeated primitive fields to enable + * a more efficient representation on the wire. Rather than repeatedly + * writing the tag and type for each element, the entire array is encoded as + * a single length-delimited blob. In proto3, only explicit setting it to + * false will avoid using packed encoding. + */ + packed: boolean; + /** + * The jstype option determines the JavaScript type used for values of the + * field. The option is permitted only for 64 bit integral and fixed types + * (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + * is represented as JavaScript string, which avoids loss of precision that + * can happen when a large value is converted to a floating point JavaScript. + * Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + * use the JavaScript "number" type. The behavior of the default option + * JS_NORMAL is implementation dependent. + * + * This option is an enum to permit additional types to be added, e.g. + * goog.math.Integer. + */ + jstype: FieldOptions_JSType; + /** + * Should this field be parsed lazily? Lazy applies only to message-type + * fields. It means that when the outer message is initially parsed, the + * inner message's contents will not be parsed but instead stored in encoded + * form. The inner message will actually be parsed when it is first accessed. + * + * This is only a hint. Implementations are free to choose whether to use + * eager or lazy parsing regardless of the value of this option. However, + * setting this option true suggests that the protocol author believes that + * using lazy parsing on this field is worth the additional bookkeeping + * overhead typically needed to implement it. + * + * This option does not affect the public interface of any generated code; + * all method signatures remain the same. Furthermore, thread-safety of the + * interface is not affected by this option; const methods remain safe to + * call from multiple threads concurrently, while non-const methods continue + * to require exclusive access. + * + * + * Note that implementations may choose not to check required fields within + * a lazy sub-message. That is, calling IsInitialized() on the outer message + * may return true even if the inner message has missing required fields. + * This is necessary because otherwise the inner message would have to be + * parsed in order to perform the check, defeating the purpose of lazy + * parsing. An implementation which chooses not to check required fields + * must be consistent about it. That is, for any particular sub-message, the + * implementation must either *always* check its required fields, or *never* + * check its required fields, regardless of whether or not the message has + * been parsed. + */ + lazy: boolean; + /** + * Is this field deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for accessors, or it will be completely ignored; in the very least, this + * is a formalization for deprecating fields. + */ + deprecated: boolean; + /** For Google-internal migration only. Do not use. */ + weak: boolean; + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +export enum FieldOptions_CType { + /** STRING - Default mode. */ + STRING = 0, + CORD = 1, + STRING_PIECE = 2, + UNRECOGNIZED = -1, +} + +export function fieldOptions_CTypeFromJSON(object: any): FieldOptions_CType { + switch (object) { + case 0: + case "STRING": + return FieldOptions_CType.STRING; + case 1: + case "CORD": + return FieldOptions_CType.CORD; + case 2: + case "STRING_PIECE": + return FieldOptions_CType.STRING_PIECE; + case -1: + case "UNRECOGNIZED": + default: + return FieldOptions_CType.UNRECOGNIZED; + } +} + +export function fieldOptions_CTypeToJSON(object: FieldOptions_CType): string { + switch (object) { + case FieldOptions_CType.STRING: + return "STRING"; + case FieldOptions_CType.CORD: + return "CORD"; + case FieldOptions_CType.STRING_PIECE: + return "STRING_PIECE"; + default: + return "UNKNOWN"; + } +} + +export enum FieldOptions_JSType { + /** JS_NORMAL - Use the default type. */ + JS_NORMAL = 0, + /** JS_STRING - Use JavaScript strings. */ + JS_STRING = 1, + /** JS_NUMBER - Use JavaScript numbers. */ + JS_NUMBER = 2, + UNRECOGNIZED = -1, +} + +export function fieldOptions_JSTypeFromJSON(object: any): FieldOptions_JSType { + switch (object) { + case 0: + case "JS_NORMAL": + return FieldOptions_JSType.JS_NORMAL; + case 1: + case "JS_STRING": + return FieldOptions_JSType.JS_STRING; + case 2: + case "JS_NUMBER": + return FieldOptions_JSType.JS_NUMBER; + case -1: + case "UNRECOGNIZED": + default: + return FieldOptions_JSType.UNRECOGNIZED; + } +} + +export function fieldOptions_JSTypeToJSON(object: FieldOptions_JSType): string { + switch (object) { + case FieldOptions_JSType.JS_NORMAL: + return "JS_NORMAL"; + case FieldOptions_JSType.JS_STRING: + return "JS_STRING"; + case FieldOptions_JSType.JS_NUMBER: + return "JS_NUMBER"; + default: + return "UNKNOWN"; + } +} + +export interface OneofOptions { + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +export interface EnumOptions { + /** + * Set this option to true to allow mapping different tag names to the same + * value. + */ + allow_alias: boolean; + /** + * Is this enum deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for the enum, or it will be completely ignored; in the very least, this + * is a formalization for deprecating enums. + */ + deprecated: boolean; + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +export interface EnumValueOptions { + /** + * Is this enum value deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for the enum value, or it will be completely ignored; in the very least, + * this is a formalization for deprecating enum values. + */ + deprecated: boolean; + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +export interface ServiceOptions { + /** + * Is this service deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for the service, or it will be completely ignored; in the very least, + * this is a formalization for deprecating services. + */ + deprecated: boolean; + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +export interface MethodOptions { + /** + * Is this method deprecated? + * Depending on the target platform, this can emit Deprecated annotations + * for the method, or it will be completely ignored; in the very least, + * this is a formalization for deprecating methods. + */ + deprecated: boolean; + idempotency_level: MethodOptions_IdempotencyLevel; + /** The parser stores options it doesn't recognize here. See above. */ + uninterpreted_option: UninterpretedOption[]; +} + +/** + * Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + * or neither? HTTP based RPC implementation may choose GET verb for safe + * methods, and PUT verb for idempotent methods instead of the default POST. + */ +export enum MethodOptions_IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0, + /** NO_SIDE_EFFECTS - implies idempotent */ + NO_SIDE_EFFECTS = 1, + /** IDEMPOTENT - idempotent, but may have side effects */ + IDEMPOTENT = 2, + UNRECOGNIZED = -1, +} + +export function methodOptions_IdempotencyLevelFromJSON( + object: any +): MethodOptions_IdempotencyLevel { + switch (object) { + case 0: + case "IDEMPOTENCY_UNKNOWN": + return MethodOptions_IdempotencyLevel.IDEMPOTENCY_UNKNOWN; + case 1: + case "NO_SIDE_EFFECTS": + return MethodOptions_IdempotencyLevel.NO_SIDE_EFFECTS; + case 2: + case "IDEMPOTENT": + return MethodOptions_IdempotencyLevel.IDEMPOTENT; + case -1: + case "UNRECOGNIZED": + default: + return MethodOptions_IdempotencyLevel.UNRECOGNIZED; + } +} + +export function methodOptions_IdempotencyLevelToJSON( + object: MethodOptions_IdempotencyLevel +): string { + switch (object) { + case MethodOptions_IdempotencyLevel.IDEMPOTENCY_UNKNOWN: + return "IDEMPOTENCY_UNKNOWN"; + case MethodOptions_IdempotencyLevel.NO_SIDE_EFFECTS: + return "NO_SIDE_EFFECTS"; + case MethodOptions_IdempotencyLevel.IDEMPOTENT: + return "IDEMPOTENT"; + default: + return "UNKNOWN"; + } +} + +/** + * A message representing a option the parser does not recognize. This only + * appears in options protos created by the compiler::Parser class. + * DescriptorPool resolves these when building Descriptor objects. Therefore, + * options protos in descriptor objects (e.g. returned by Descriptor::options(), + * or produced by Descriptor::CopyTo()) will never have UninterpretedOptions + * in them. + */ +export interface UninterpretedOption { + name: UninterpretedOption_NamePart[]; + /** + * The value of the uninterpreted option, in whatever type the tokenizer + * identified it as during parsing. Exactly one of these should be set. + */ + identifier_value: string; + positive_int_value: number; + negative_int_value: number; + double_value: number; + string_value: Uint8Array; + aggregate_value: string; +} + +/** + * The name of the uninterpreted option. Each string represents a segment in + * a dot-separated name. is_extension is true iff a segment represents an + * extension (denoted with parentheses in options specs in .proto files). + * E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + * "foo.(bar.baz).qux". + */ +export interface UninterpretedOption_NamePart { + name_part: string; + is_extension: boolean; +} + +/** + * Encapsulates information about the original source file from which a + * FileDescriptorProto was generated. + */ +export interface SourceCodeInfo { + /** + * A Location identifies a piece of source code in a .proto file which + * corresponds to a particular definition. This information is intended + * to be useful to IDEs, code indexers, documentation generators, and similar + * tools. + * + * For example, say we have a file like: + * message Foo { + * optional string foo = 1; + * } + * Let's look at just the field definition: + * optional string foo = 1; + * ^ ^^ ^^ ^ ^^^ + * a bc de f ghi + * We have the following locations: + * span path represents + * [a,i) [ 4, 0, 2, 0 ] The whole field definition. + * [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + * [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + * [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + * [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + * + * Notes: + * - A location may refer to a repeated field itself (i.e. not to any + * particular index within it). This is used whenever a set of elements are + * logically enclosed in a single code segment. For example, an entire + * extend block (possibly containing multiple extension definitions) will + * have an outer location whose path refers to the "extensions" repeated + * field without an index. + * - Multiple locations may have the same path. This happens when a single + * logical declaration is spread out across multiple places. The most + * obvious example is the "extend" block again -- there may be multiple + * extend blocks in the same scope, each of which will have the same path. + * - A location's span is not always a subset of its parent's span. For + * example, the "extendee" of an extension declaration appears at the + * beginning of the "extend" block and is shared by all extensions within + * the block. + * - Just because a location's span is a subset of some other location's span + * does not mean that it is a descendant. For example, a "group" defines + * both a type and a field in a single declaration. Thus, the locations + * corresponding to the type and field and their components will overlap. + * - Code which tries to interpret locations should probably be designed to + * ignore those that it doesn't understand, as more types of locations could + * be recorded in the future. + */ + location: SourceCodeInfo_Location[]; +} + +export interface SourceCodeInfo_Location { + /** + * Identifies which part of the FileDescriptorProto was defined at this + * location. + * + * Each element is a field number or an index. They form a path from + * the root FileDescriptorProto to the place where the definition. For + * example, this path: + * [ 4, 3, 2, 7, 1 ] + * refers to: + * file.message_type(3) // 4, 3 + * .field(7) // 2, 7 + * .name() // 1 + * This is because FileDescriptorProto.message_type has field number 4: + * repeated DescriptorProto message_type = 4; + * and DescriptorProto.field has field number 2: + * repeated FieldDescriptorProto field = 2; + * and FieldDescriptorProto.name has field number 1: + * optional string name = 1; + * + * Thus, the above path gives the location of a field name. If we removed + * the last element: + * [ 4, 3, 2, 7 ] + * this path refers to the whole field declaration (from the beginning + * of the label to the terminating semicolon). + */ + path: number[]; + /** + * Always has exactly three or four elements: start line, start column, + * end line (optional, otherwise assumed same as start line), end column. + * These are packed into a single field for efficiency. Note that line + * and column numbers are zero-based -- typically you will want to add + * 1 to each before displaying to a user. + */ + span: number[]; + /** + * If this SourceCodeInfo represents a complete declaration, these are any + * comments appearing before and after the declaration which appear to be + * attached to the declaration. + * + * A series of line comments appearing on consecutive lines, with no other + * tokens appearing on those lines, will be treated as a single comment. + * + * leading_detached_comments will keep paragraphs of comments that appear + * before (but not connected to) the current element. Each paragraph, + * separated by empty lines, will be one comment element in the repeated + * field. + * + * Only the comment content is provided; comment markers (e.g. //) are + * stripped out. For block comments, leading whitespace and an asterisk + * will be stripped from the beginning of each line other than the first. + * Newlines are included in the output. + * + * Examples: + * + * optional int32 foo = 1; // Comment attached to foo. + * // Comment attached to bar. + * optional int32 bar = 2; + * + * optional string baz = 3; + * // Comment attached to baz. + * // Another line attached to baz. + * + * // Comment attached to qux. + * // + * // Another line attached to qux. + * optional double qux = 4; + * + * // Detached comment for corge. This is not leading or trailing comments + * // to qux or corge because there are blank lines separating it from + * // both. + * + * // Detached comment for corge paragraph 2. + * + * optional string corge = 5; + * /* Block comment attached + * * to corge. Leading asterisks + * * will be removed. * / + * /* Block comment attached to + * * grault. * / + * optional int32 grault = 6; + * + * // ignored detached comments. + */ + leading_comments: string; + trailing_comments: string; + leading_detached_comments: string[]; +} + +/** + * Describes the relationship between generated code and its original source + * file. A GeneratedCodeInfo message is associated with only one generated + * source file, but may contain references to different source .proto files. + */ +export interface GeneratedCodeInfo { + /** + * An Annotation connects some span of text in generated code to an element + * of its generating .proto file. + */ + annotation: GeneratedCodeInfo_Annotation[]; +} + +export interface GeneratedCodeInfo_Annotation { + /** + * Identifies the element in the original source .proto file. This field + * is formatted the same as SourceCodeInfo.Location.path. + */ + path: number[]; + /** Identifies the filesystem path to the original source .proto. */ + source_file: string; + /** + * Identifies the starting offset in bytes in the generated code + * that relates to the identified object. + */ + begin: number; + /** + * Identifies the ending offset in bytes in the generated code that + * relates to the identified offset. The end offset should be one past + * the last relevant byte (so the length of the text = end - begin). + */ + end: number; +} + +const baseFileDescriptorSet: object = {}; + +export const FileDescriptorSet = { + encode(message: FileDescriptorSet, writer: Writer = Writer.create()): Writer { + for (const v of message.file) { + FileDescriptorProto.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): FileDescriptorSet { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFileDescriptorSet } as FileDescriptorSet; + message.file = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.file.push( + FileDescriptorProto.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FileDescriptorSet { + const message = { ...baseFileDescriptorSet } as FileDescriptorSet; + message.file = []; + if (object.file !== undefined && object.file !== null) { + for (const e of object.file) { + message.file.push(FileDescriptorProto.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: FileDescriptorSet): unknown { + const obj: any = {}; + if (message.file) { + obj.file = message.file.map((e) => + e ? FileDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.file = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): FileDescriptorSet { + const message = { ...baseFileDescriptorSet } as FileDescriptorSet; + message.file = []; + if (object.file !== undefined && object.file !== null) { + for (const e of object.file) { + message.file.push(FileDescriptorProto.fromPartial(e)); + } + } + return message; + }, +}; + +const baseFileDescriptorProto: object = { + name: "", + package: "", + dependency: "", + public_dependency: 0, + weak_dependency: 0, + syntax: "", +}; + +export const FileDescriptorProto = { + encode( + message: FileDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.package !== "") { + writer.uint32(18).string(message.package); + } + for (const v of message.dependency) { + writer.uint32(26).string(v!); + } + writer.uint32(82).fork(); + for (const v of message.public_dependency) { + writer.int32(v); + } + writer.ldelim(); + writer.uint32(90).fork(); + for (const v of message.weak_dependency) { + writer.int32(v); + } + writer.ldelim(); + for (const v of message.message_type) { + DescriptorProto.encode(v!, writer.uint32(34).fork()).ldelim(); + } + for (const v of message.enum_type) { + EnumDescriptorProto.encode(v!, writer.uint32(42).fork()).ldelim(); + } + for (const v of message.service) { + ServiceDescriptorProto.encode(v!, writer.uint32(50).fork()).ldelim(); + } + for (const v of message.extension) { + FieldDescriptorProto.encode(v!, writer.uint32(58).fork()).ldelim(); + } + if (message.options !== undefined) { + FileOptions.encode(message.options, writer.uint32(66).fork()).ldelim(); + } + if (message.source_code_info !== undefined) { + SourceCodeInfo.encode( + message.source_code_info, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.syntax !== "") { + writer.uint32(98).string(message.syntax); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): FileDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFileDescriptorProto } as FileDescriptorProto; + message.dependency = []; + message.public_dependency = []; + message.weak_dependency = []; + message.message_type = []; + message.enum_type = []; + message.service = []; + message.extension = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.package = reader.string(); + break; + case 3: + message.dependency.push(reader.string()); + break; + case 10: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.public_dependency.push(reader.int32()); + } + } else { + message.public_dependency.push(reader.int32()); + } + break; + case 11: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.weak_dependency.push(reader.int32()); + } + } else { + message.weak_dependency.push(reader.int32()); + } + break; + case 4: + message.message_type.push( + DescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 5: + message.enum_type.push( + EnumDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 6: + message.service.push( + ServiceDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 7: + message.extension.push( + FieldDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 8: + message.options = FileOptions.decode(reader, reader.uint32()); + break; + case 9: + message.source_code_info = SourceCodeInfo.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.syntax = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FileDescriptorProto { + const message = { ...baseFileDescriptorProto } as FileDescriptorProto; + message.dependency = []; + message.public_dependency = []; + message.weak_dependency = []; + message.message_type = []; + message.enum_type = []; + message.service = []; + message.extension = []; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.package !== undefined && object.package !== null) { + message.package = String(object.package); + } else { + message.package = ""; + } + if (object.dependency !== undefined && object.dependency !== null) { + for (const e of object.dependency) { + message.dependency.push(String(e)); + } + } + if ( + object.public_dependency !== undefined && + object.public_dependency !== null + ) { + for (const e of object.public_dependency) { + message.public_dependency.push(Number(e)); + } + } + if ( + object.weak_dependency !== undefined && + object.weak_dependency !== null + ) { + for (const e of object.weak_dependency) { + message.weak_dependency.push(Number(e)); + } + } + if (object.message_type !== undefined && object.message_type !== null) { + for (const e of object.message_type) { + message.message_type.push(DescriptorProto.fromJSON(e)); + } + } + if (object.enum_type !== undefined && object.enum_type !== null) { + for (const e of object.enum_type) { + message.enum_type.push(EnumDescriptorProto.fromJSON(e)); + } + } + if (object.service !== undefined && object.service !== null) { + for (const e of object.service) { + message.service.push(ServiceDescriptorProto.fromJSON(e)); + } + } + if (object.extension !== undefined && object.extension !== null) { + for (const e of object.extension) { + message.extension.push(FieldDescriptorProto.fromJSON(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = FileOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + if ( + object.source_code_info !== undefined && + object.source_code_info !== null + ) { + message.source_code_info = SourceCodeInfo.fromJSON( + object.source_code_info + ); + } else { + message.source_code_info = undefined; + } + if (object.syntax !== undefined && object.syntax !== null) { + message.syntax = String(object.syntax); + } else { + message.syntax = ""; + } + return message; + }, + + toJSON(message: FileDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.package !== undefined && (obj.package = message.package); + if (message.dependency) { + obj.dependency = message.dependency.map((e) => e); + } else { + obj.dependency = []; + } + if (message.public_dependency) { + obj.public_dependency = message.public_dependency.map((e) => e); + } else { + obj.public_dependency = []; + } + if (message.weak_dependency) { + obj.weak_dependency = message.weak_dependency.map((e) => e); + } else { + obj.weak_dependency = []; + } + if (message.message_type) { + obj.message_type = message.message_type.map((e) => + e ? DescriptorProto.toJSON(e) : undefined + ); + } else { + obj.message_type = []; + } + if (message.enum_type) { + obj.enum_type = message.enum_type.map((e) => + e ? EnumDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.enum_type = []; + } + if (message.service) { + obj.service = message.service.map((e) => + e ? ServiceDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.service = []; + } + if (message.extension) { + obj.extension = message.extension.map((e) => + e ? FieldDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.extension = []; + } + message.options !== undefined && + (obj.options = message.options + ? FileOptions.toJSON(message.options) + : undefined); + message.source_code_info !== undefined && + (obj.source_code_info = message.source_code_info + ? SourceCodeInfo.toJSON(message.source_code_info) + : undefined); + message.syntax !== undefined && (obj.syntax = message.syntax); + return obj; + }, + + fromPartial(object: DeepPartial): FileDescriptorProto { + const message = { ...baseFileDescriptorProto } as FileDescriptorProto; + message.dependency = []; + message.public_dependency = []; + message.weak_dependency = []; + message.message_type = []; + message.enum_type = []; + message.service = []; + message.extension = []; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.package !== undefined && object.package !== null) { + message.package = object.package; + } else { + message.package = ""; + } + if (object.dependency !== undefined && object.dependency !== null) { + for (const e of object.dependency) { + message.dependency.push(e); + } + } + if ( + object.public_dependency !== undefined && + object.public_dependency !== null + ) { + for (const e of object.public_dependency) { + message.public_dependency.push(e); + } + } + if ( + object.weak_dependency !== undefined && + object.weak_dependency !== null + ) { + for (const e of object.weak_dependency) { + message.weak_dependency.push(e); + } + } + if (object.message_type !== undefined && object.message_type !== null) { + for (const e of object.message_type) { + message.message_type.push(DescriptorProto.fromPartial(e)); + } + } + if (object.enum_type !== undefined && object.enum_type !== null) { + for (const e of object.enum_type) { + message.enum_type.push(EnumDescriptorProto.fromPartial(e)); + } + } + if (object.service !== undefined && object.service !== null) { + for (const e of object.service) { + message.service.push(ServiceDescriptorProto.fromPartial(e)); + } + } + if (object.extension !== undefined && object.extension !== null) { + for (const e of object.extension) { + message.extension.push(FieldDescriptorProto.fromPartial(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = FileOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + if ( + object.source_code_info !== undefined && + object.source_code_info !== null + ) { + message.source_code_info = SourceCodeInfo.fromPartial( + object.source_code_info + ); + } else { + message.source_code_info = undefined; + } + if (object.syntax !== undefined && object.syntax !== null) { + message.syntax = object.syntax; + } else { + message.syntax = ""; + } + return message; + }, +}; + +const baseDescriptorProto: object = { name: "", reserved_name: "" }; + +export const DescriptorProto = { + encode(message: DescriptorProto, writer: Writer = Writer.create()): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + for (const v of message.field) { + FieldDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.extension) { + FieldDescriptorProto.encode(v!, writer.uint32(50).fork()).ldelim(); + } + for (const v of message.nested_type) { + DescriptorProto.encode(v!, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.enum_type) { + EnumDescriptorProto.encode(v!, writer.uint32(34).fork()).ldelim(); + } + for (const v of message.extension_range) { + DescriptorProto_ExtensionRange.encode( + v!, + writer.uint32(42).fork() + ).ldelim(); + } + for (const v of message.oneof_decl) { + OneofDescriptorProto.encode(v!, writer.uint32(66).fork()).ldelim(); + } + if (message.options !== undefined) { + MessageOptions.encode(message.options, writer.uint32(58).fork()).ldelim(); + } + for (const v of message.reserved_range) { + DescriptorProto_ReservedRange.encode( + v!, + writer.uint32(74).fork() + ).ldelim(); + } + for (const v of message.reserved_name) { + writer.uint32(82).string(v!); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): DescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDescriptorProto } as DescriptorProto; + message.field = []; + message.extension = []; + message.nested_type = []; + message.enum_type = []; + message.extension_range = []; + message.oneof_decl = []; + message.reserved_range = []; + message.reserved_name = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.field.push( + FieldDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 6: + message.extension.push( + FieldDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 3: + message.nested_type.push( + DescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 4: + message.enum_type.push( + EnumDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 5: + message.extension_range.push( + DescriptorProto_ExtensionRange.decode(reader, reader.uint32()) + ); + break; + case 8: + message.oneof_decl.push( + OneofDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 7: + message.options = MessageOptions.decode(reader, reader.uint32()); + break; + case 9: + message.reserved_range.push( + DescriptorProto_ReservedRange.decode(reader, reader.uint32()) + ); + break; + case 10: + message.reserved_name.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DescriptorProto { + const message = { ...baseDescriptorProto } as DescriptorProto; + message.field = []; + message.extension = []; + message.nested_type = []; + message.enum_type = []; + message.extension_range = []; + message.oneof_decl = []; + message.reserved_range = []; + message.reserved_name = []; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.field !== undefined && object.field !== null) { + for (const e of object.field) { + message.field.push(FieldDescriptorProto.fromJSON(e)); + } + } + if (object.extension !== undefined && object.extension !== null) { + for (const e of object.extension) { + message.extension.push(FieldDescriptorProto.fromJSON(e)); + } + } + if (object.nested_type !== undefined && object.nested_type !== null) { + for (const e of object.nested_type) { + message.nested_type.push(DescriptorProto.fromJSON(e)); + } + } + if (object.enum_type !== undefined && object.enum_type !== null) { + for (const e of object.enum_type) { + message.enum_type.push(EnumDescriptorProto.fromJSON(e)); + } + } + if ( + object.extension_range !== undefined && + object.extension_range !== null + ) { + for (const e of object.extension_range) { + message.extension_range.push( + DescriptorProto_ExtensionRange.fromJSON(e) + ); + } + } + if (object.oneof_decl !== undefined && object.oneof_decl !== null) { + for (const e of object.oneof_decl) { + message.oneof_decl.push(OneofDescriptorProto.fromJSON(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = MessageOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + if (object.reserved_range !== undefined && object.reserved_range !== null) { + for (const e of object.reserved_range) { + message.reserved_range.push(DescriptorProto_ReservedRange.fromJSON(e)); + } + } + if (object.reserved_name !== undefined && object.reserved_name !== null) { + for (const e of object.reserved_name) { + message.reserved_name.push(String(e)); + } + } + return message; + }, + + toJSON(message: DescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + if (message.field) { + obj.field = message.field.map((e) => + e ? FieldDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.field = []; + } + if (message.extension) { + obj.extension = message.extension.map((e) => + e ? FieldDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.extension = []; + } + if (message.nested_type) { + obj.nested_type = message.nested_type.map((e) => + e ? DescriptorProto.toJSON(e) : undefined + ); + } else { + obj.nested_type = []; + } + if (message.enum_type) { + obj.enum_type = message.enum_type.map((e) => + e ? EnumDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.enum_type = []; + } + if (message.extension_range) { + obj.extension_range = message.extension_range.map((e) => + e ? DescriptorProto_ExtensionRange.toJSON(e) : undefined + ); + } else { + obj.extension_range = []; + } + if (message.oneof_decl) { + obj.oneof_decl = message.oneof_decl.map((e) => + e ? OneofDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.oneof_decl = []; + } + message.options !== undefined && + (obj.options = message.options + ? MessageOptions.toJSON(message.options) + : undefined); + if (message.reserved_range) { + obj.reserved_range = message.reserved_range.map((e) => + e ? DescriptorProto_ReservedRange.toJSON(e) : undefined + ); + } else { + obj.reserved_range = []; + } + if (message.reserved_name) { + obj.reserved_name = message.reserved_name.map((e) => e); + } else { + obj.reserved_name = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): DescriptorProto { + const message = { ...baseDescriptorProto } as DescriptorProto; + message.field = []; + message.extension = []; + message.nested_type = []; + message.enum_type = []; + message.extension_range = []; + message.oneof_decl = []; + message.reserved_range = []; + message.reserved_name = []; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.field !== undefined && object.field !== null) { + for (const e of object.field) { + message.field.push(FieldDescriptorProto.fromPartial(e)); + } + } + if (object.extension !== undefined && object.extension !== null) { + for (const e of object.extension) { + message.extension.push(FieldDescriptorProto.fromPartial(e)); + } + } + if (object.nested_type !== undefined && object.nested_type !== null) { + for (const e of object.nested_type) { + message.nested_type.push(DescriptorProto.fromPartial(e)); + } + } + if (object.enum_type !== undefined && object.enum_type !== null) { + for (const e of object.enum_type) { + message.enum_type.push(EnumDescriptorProto.fromPartial(e)); + } + } + if ( + object.extension_range !== undefined && + object.extension_range !== null + ) { + for (const e of object.extension_range) { + message.extension_range.push( + DescriptorProto_ExtensionRange.fromPartial(e) + ); + } + } + if (object.oneof_decl !== undefined && object.oneof_decl !== null) { + for (const e of object.oneof_decl) { + message.oneof_decl.push(OneofDescriptorProto.fromPartial(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = MessageOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + if (object.reserved_range !== undefined && object.reserved_range !== null) { + for (const e of object.reserved_range) { + message.reserved_range.push( + DescriptorProto_ReservedRange.fromPartial(e) + ); + } + } + if (object.reserved_name !== undefined && object.reserved_name !== null) { + for (const e of object.reserved_name) { + message.reserved_name.push(e); + } + } + return message; + }, +}; + +const baseDescriptorProto_ExtensionRange: object = { start: 0, end: 0 }; + +export const DescriptorProto_ExtensionRange = { + encode( + message: DescriptorProto_ExtensionRange, + writer: Writer = Writer.create() + ): Writer { + if (message.start !== 0) { + writer.uint32(8).int32(message.start); + } + if (message.end !== 0) { + writer.uint32(16).int32(message.end); + } + if (message.options !== undefined) { + ExtensionRangeOptions.encode( + message.options, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): DescriptorProto_ExtensionRange { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDescriptorProto_ExtensionRange, + } as DescriptorProto_ExtensionRange; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.int32(); + break; + case 2: + message.end = reader.int32(); + break; + case 3: + message.options = ExtensionRangeOptions.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DescriptorProto_ExtensionRange { + const message = { + ...baseDescriptorProto_ExtensionRange, + } as DescriptorProto_ExtensionRange; + if (object.start !== undefined && object.start !== null) { + message.start = Number(object.start); + } else { + message.start = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = Number(object.end); + } else { + message.end = 0; + } + if (object.options !== undefined && object.options !== null) { + message.options = ExtensionRangeOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + return message; + }, + + toJSON(message: DescriptorProto_ExtensionRange): unknown { + const obj: any = {}; + message.start !== undefined && (obj.start = message.start); + message.end !== undefined && (obj.end = message.end); + message.options !== undefined && + (obj.options = message.options + ? ExtensionRangeOptions.toJSON(message.options) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): DescriptorProto_ExtensionRange { + const message = { + ...baseDescriptorProto_ExtensionRange, + } as DescriptorProto_ExtensionRange; + if (object.start !== undefined && object.start !== null) { + message.start = object.start; + } else { + message.start = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = object.end; + } else { + message.end = 0; + } + if (object.options !== undefined && object.options !== null) { + message.options = ExtensionRangeOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + return message; + }, +}; + +const baseDescriptorProto_ReservedRange: object = { start: 0, end: 0 }; + +export const DescriptorProto_ReservedRange = { + encode( + message: DescriptorProto_ReservedRange, + writer: Writer = Writer.create() + ): Writer { + if (message.start !== 0) { + writer.uint32(8).int32(message.start); + } + if (message.end !== 0) { + writer.uint32(16).int32(message.end); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): DescriptorProto_ReservedRange { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDescriptorProto_ReservedRange, + } as DescriptorProto_ReservedRange; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.int32(); + break; + case 2: + message.end = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DescriptorProto_ReservedRange { + const message = { + ...baseDescriptorProto_ReservedRange, + } as DescriptorProto_ReservedRange; + if (object.start !== undefined && object.start !== null) { + message.start = Number(object.start); + } else { + message.start = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = Number(object.end); + } else { + message.end = 0; + } + return message; + }, + + toJSON(message: DescriptorProto_ReservedRange): unknown { + const obj: any = {}; + message.start !== undefined && (obj.start = message.start); + message.end !== undefined && (obj.end = message.end); + return obj; + }, + + fromPartial( + object: DeepPartial + ): DescriptorProto_ReservedRange { + const message = { + ...baseDescriptorProto_ReservedRange, + } as DescriptorProto_ReservedRange; + if (object.start !== undefined && object.start !== null) { + message.start = object.start; + } else { + message.start = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = object.end; + } else { + message.end = 0; + } + return message; + }, +}; + +const baseExtensionRangeOptions: object = {}; + +export const ExtensionRangeOptions = { + encode( + message: ExtensionRangeOptions, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): ExtensionRangeOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExtensionRangeOptions } as ExtensionRangeOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExtensionRangeOptions { + const message = { ...baseExtensionRangeOptions } as ExtensionRangeOptions; + message.uninterpreted_option = []; + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: ExtensionRangeOptions): unknown { + const obj: any = {}; + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial( + object: DeepPartial + ): ExtensionRangeOptions { + const message = { ...baseExtensionRangeOptions } as ExtensionRangeOptions; + message.uninterpreted_option = []; + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseFieldDescriptorProto: object = { + name: "", + number: 0, + label: 1, + type: 1, + type_name: "", + extendee: "", + default_value: "", + oneof_index: 0, + json_name: "", + proto3_optional: false, +}; + +export const FieldDescriptorProto = { + encode( + message: FieldDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.number !== 0) { + writer.uint32(24).int32(message.number); + } + if (message.label !== 1) { + writer.uint32(32).int32(message.label); + } + if (message.type !== 1) { + writer.uint32(40).int32(message.type); + } + if (message.type_name !== "") { + writer.uint32(50).string(message.type_name); + } + if (message.extendee !== "") { + writer.uint32(18).string(message.extendee); + } + if (message.default_value !== "") { + writer.uint32(58).string(message.default_value); + } + if (message.oneof_index !== 0) { + writer.uint32(72).int32(message.oneof_index); + } + if (message.json_name !== "") { + writer.uint32(82).string(message.json_name); + } + if (message.options !== undefined) { + FieldOptions.encode(message.options, writer.uint32(66).fork()).ldelim(); + } + if (message.proto3_optional === true) { + writer.uint32(136).bool(message.proto3_optional); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): FieldDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFieldDescriptorProto } as FieldDescriptorProto; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 3: + message.number = reader.int32(); + break; + case 4: + message.label = reader.int32() as any; + break; + case 5: + message.type = reader.int32() as any; + break; + case 6: + message.type_name = reader.string(); + break; + case 2: + message.extendee = reader.string(); + break; + case 7: + message.default_value = reader.string(); + break; + case 9: + message.oneof_index = reader.int32(); + break; + case 10: + message.json_name = reader.string(); + break; + case 8: + message.options = FieldOptions.decode(reader, reader.uint32()); + break; + case 17: + message.proto3_optional = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FieldDescriptorProto { + const message = { ...baseFieldDescriptorProto } as FieldDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.number !== undefined && object.number !== null) { + message.number = Number(object.number); + } else { + message.number = 0; + } + if (object.label !== undefined && object.label !== null) { + message.label = fieldDescriptorProto_LabelFromJSON(object.label); + } else { + message.label = 1; + } + if (object.type !== undefined && object.type !== null) { + message.type = fieldDescriptorProto_TypeFromJSON(object.type); + } else { + message.type = 1; + } + if (object.type_name !== undefined && object.type_name !== null) { + message.type_name = String(object.type_name); + } else { + message.type_name = ""; + } + if (object.extendee !== undefined && object.extendee !== null) { + message.extendee = String(object.extendee); + } else { + message.extendee = ""; + } + if (object.default_value !== undefined && object.default_value !== null) { + message.default_value = String(object.default_value); + } else { + message.default_value = ""; + } + if (object.oneof_index !== undefined && object.oneof_index !== null) { + message.oneof_index = Number(object.oneof_index); + } else { + message.oneof_index = 0; + } + if (object.json_name !== undefined && object.json_name !== null) { + message.json_name = String(object.json_name); + } else { + message.json_name = ""; + } + if (object.options !== undefined && object.options !== null) { + message.options = FieldOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + if ( + object.proto3_optional !== undefined && + object.proto3_optional !== null + ) { + message.proto3_optional = Boolean(object.proto3_optional); + } else { + message.proto3_optional = false; + } + return message; + }, + + toJSON(message: FieldDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.number !== undefined && (obj.number = message.number); + message.label !== undefined && + (obj.label = fieldDescriptorProto_LabelToJSON(message.label)); + message.type !== undefined && + (obj.type = fieldDescriptorProto_TypeToJSON(message.type)); + message.type_name !== undefined && (obj.type_name = message.type_name); + message.extendee !== undefined && (obj.extendee = message.extendee); + message.default_value !== undefined && + (obj.default_value = message.default_value); + message.oneof_index !== undefined && + (obj.oneof_index = message.oneof_index); + message.json_name !== undefined && (obj.json_name = message.json_name); + message.options !== undefined && + (obj.options = message.options + ? FieldOptions.toJSON(message.options) + : undefined); + message.proto3_optional !== undefined && + (obj.proto3_optional = message.proto3_optional); + return obj; + }, + + fromPartial(object: DeepPartial): FieldDescriptorProto { + const message = { ...baseFieldDescriptorProto } as FieldDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.number !== undefined && object.number !== null) { + message.number = object.number; + } else { + message.number = 0; + } + if (object.label !== undefined && object.label !== null) { + message.label = object.label; + } else { + message.label = 1; + } + if (object.type !== undefined && object.type !== null) { + message.type = object.type; + } else { + message.type = 1; + } + if (object.type_name !== undefined && object.type_name !== null) { + message.type_name = object.type_name; + } else { + message.type_name = ""; + } + if (object.extendee !== undefined && object.extendee !== null) { + message.extendee = object.extendee; + } else { + message.extendee = ""; + } + if (object.default_value !== undefined && object.default_value !== null) { + message.default_value = object.default_value; + } else { + message.default_value = ""; + } + if (object.oneof_index !== undefined && object.oneof_index !== null) { + message.oneof_index = object.oneof_index; + } else { + message.oneof_index = 0; + } + if (object.json_name !== undefined && object.json_name !== null) { + message.json_name = object.json_name; + } else { + message.json_name = ""; + } + if (object.options !== undefined && object.options !== null) { + message.options = FieldOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + if ( + object.proto3_optional !== undefined && + object.proto3_optional !== null + ) { + message.proto3_optional = object.proto3_optional; + } else { + message.proto3_optional = false; + } + return message; + }, +}; + +const baseOneofDescriptorProto: object = { name: "" }; + +export const OneofDescriptorProto = { + encode( + message: OneofDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.options !== undefined) { + OneofOptions.encode(message.options, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): OneofDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOneofDescriptorProto } as OneofDescriptorProto; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.options = OneofOptions.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OneofDescriptorProto { + const message = { ...baseOneofDescriptorProto } as OneofDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.options !== undefined && object.options !== null) { + message.options = OneofOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + return message; + }, + + toJSON(message: OneofDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.options !== undefined && + (obj.options = message.options + ? OneofOptions.toJSON(message.options) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): OneofDescriptorProto { + const message = { ...baseOneofDescriptorProto } as OneofDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.options !== undefined && object.options !== null) { + message.options = OneofOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + return message; + }, +}; + +const baseEnumDescriptorProto: object = { name: "", reserved_name: "" }; + +export const EnumDescriptorProto = { + encode( + message: EnumDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + for (const v of message.value) { + EnumValueDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.options !== undefined) { + EnumOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.reserved_range) { + EnumDescriptorProto_EnumReservedRange.encode( + v!, + writer.uint32(34).fork() + ).ldelim(); + } + for (const v of message.reserved_name) { + writer.uint32(42).string(v!); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): EnumDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEnumDescriptorProto } as EnumDescriptorProto; + message.value = []; + message.reserved_range = []; + message.reserved_name = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.value.push( + EnumValueDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 3: + message.options = EnumOptions.decode(reader, reader.uint32()); + break; + case 4: + message.reserved_range.push( + EnumDescriptorProto_EnumReservedRange.decode( + reader, + reader.uint32() + ) + ); + break; + case 5: + message.reserved_name.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnumDescriptorProto { + const message = { ...baseEnumDescriptorProto } as EnumDescriptorProto; + message.value = []; + message.reserved_range = []; + message.reserved_name = []; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.value !== undefined && object.value !== null) { + for (const e of object.value) { + message.value.push(EnumValueDescriptorProto.fromJSON(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = EnumOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + if (object.reserved_range !== undefined && object.reserved_range !== null) { + for (const e of object.reserved_range) { + message.reserved_range.push( + EnumDescriptorProto_EnumReservedRange.fromJSON(e) + ); + } + } + if (object.reserved_name !== undefined && object.reserved_name !== null) { + for (const e of object.reserved_name) { + message.reserved_name.push(String(e)); + } + } + return message; + }, + + toJSON(message: EnumDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + if (message.value) { + obj.value = message.value.map((e) => + e ? EnumValueDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.value = []; + } + message.options !== undefined && + (obj.options = message.options + ? EnumOptions.toJSON(message.options) + : undefined); + if (message.reserved_range) { + obj.reserved_range = message.reserved_range.map((e) => + e ? EnumDescriptorProto_EnumReservedRange.toJSON(e) : undefined + ); + } else { + obj.reserved_range = []; + } + if (message.reserved_name) { + obj.reserved_name = message.reserved_name.map((e) => e); + } else { + obj.reserved_name = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): EnumDescriptorProto { + const message = { ...baseEnumDescriptorProto } as EnumDescriptorProto; + message.value = []; + message.reserved_range = []; + message.reserved_name = []; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.value !== undefined && object.value !== null) { + for (const e of object.value) { + message.value.push(EnumValueDescriptorProto.fromPartial(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = EnumOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + if (object.reserved_range !== undefined && object.reserved_range !== null) { + for (const e of object.reserved_range) { + message.reserved_range.push( + EnumDescriptorProto_EnumReservedRange.fromPartial(e) + ); + } + } + if (object.reserved_name !== undefined && object.reserved_name !== null) { + for (const e of object.reserved_name) { + message.reserved_name.push(e); + } + } + return message; + }, +}; + +const baseEnumDescriptorProto_EnumReservedRange: object = { start: 0, end: 0 }; + +export const EnumDescriptorProto_EnumReservedRange = { + encode( + message: EnumDescriptorProto_EnumReservedRange, + writer: Writer = Writer.create() + ): Writer { + if (message.start !== 0) { + writer.uint32(8).int32(message.start); + } + if (message.end !== 0) { + writer.uint32(16).int32(message.end); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): EnumDescriptorProto_EnumReservedRange { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseEnumDescriptorProto_EnumReservedRange, + } as EnumDescriptorProto_EnumReservedRange; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.int32(); + break; + case 2: + message.end = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnumDescriptorProto_EnumReservedRange { + const message = { + ...baseEnumDescriptorProto_EnumReservedRange, + } as EnumDescriptorProto_EnumReservedRange; + if (object.start !== undefined && object.start !== null) { + message.start = Number(object.start); + } else { + message.start = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = Number(object.end); + } else { + message.end = 0; + } + return message; + }, + + toJSON(message: EnumDescriptorProto_EnumReservedRange): unknown { + const obj: any = {}; + message.start !== undefined && (obj.start = message.start); + message.end !== undefined && (obj.end = message.end); + return obj; + }, + + fromPartial( + object: DeepPartial + ): EnumDescriptorProto_EnumReservedRange { + const message = { + ...baseEnumDescriptorProto_EnumReservedRange, + } as EnumDescriptorProto_EnumReservedRange; + if (object.start !== undefined && object.start !== null) { + message.start = object.start; + } else { + message.start = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = object.end; + } else { + message.end = 0; + } + return message; + }, +}; + +const baseEnumValueDescriptorProto: object = { name: "", number: 0 }; + +export const EnumValueDescriptorProto = { + encode( + message: EnumValueDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.number !== 0) { + writer.uint32(16).int32(message.number); + } + if (message.options !== undefined) { + EnumValueOptions.encode( + message.options, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): EnumValueDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseEnumValueDescriptorProto, + } as EnumValueDescriptorProto; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.number = reader.int32(); + break; + case 3: + message.options = EnumValueOptions.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnumValueDescriptorProto { + const message = { + ...baseEnumValueDescriptorProto, + } as EnumValueDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.number !== undefined && object.number !== null) { + message.number = Number(object.number); + } else { + message.number = 0; + } + if (object.options !== undefined && object.options !== null) { + message.options = EnumValueOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + return message; + }, + + toJSON(message: EnumValueDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.number !== undefined && (obj.number = message.number); + message.options !== undefined && + (obj.options = message.options + ? EnumValueOptions.toJSON(message.options) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): EnumValueDescriptorProto { + const message = { + ...baseEnumValueDescriptorProto, + } as EnumValueDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.number !== undefined && object.number !== null) { + message.number = object.number; + } else { + message.number = 0; + } + if (object.options !== undefined && object.options !== null) { + message.options = EnumValueOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + return message; + }, +}; + +const baseServiceDescriptorProto: object = { name: "" }; + +export const ServiceDescriptorProto = { + encode( + message: ServiceDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + for (const v of message.method) { + MethodDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.options !== undefined) { + ServiceOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): ServiceDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseServiceDescriptorProto } as ServiceDescriptorProto; + message.method = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.method.push( + MethodDescriptorProto.decode(reader, reader.uint32()) + ); + break; + case 3: + message.options = ServiceOptions.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ServiceDescriptorProto { + const message = { ...baseServiceDescriptorProto } as ServiceDescriptorProto; + message.method = []; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.method !== undefined && object.method !== null) { + for (const e of object.method) { + message.method.push(MethodDescriptorProto.fromJSON(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = ServiceOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + return message; + }, + + toJSON(message: ServiceDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + if (message.method) { + obj.method = message.method.map((e) => + e ? MethodDescriptorProto.toJSON(e) : undefined + ); + } else { + obj.method = []; + } + message.options !== undefined && + (obj.options = message.options + ? ServiceOptions.toJSON(message.options) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): ServiceDescriptorProto { + const message = { ...baseServiceDescriptorProto } as ServiceDescriptorProto; + message.method = []; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.method !== undefined && object.method !== null) { + for (const e of object.method) { + message.method.push(MethodDescriptorProto.fromPartial(e)); + } + } + if (object.options !== undefined && object.options !== null) { + message.options = ServiceOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + return message; + }, +}; + +const baseMethodDescriptorProto: object = { + name: "", + input_type: "", + output_type: "", + client_streaming: false, + server_streaming: false, +}; + +export const MethodDescriptorProto = { + encode( + message: MethodDescriptorProto, + writer: Writer = Writer.create() + ): Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.input_type !== "") { + writer.uint32(18).string(message.input_type); + } + if (message.output_type !== "") { + writer.uint32(26).string(message.output_type); + } + if (message.options !== undefined) { + MethodOptions.encode(message.options, writer.uint32(34).fork()).ldelim(); + } + if (message.client_streaming === true) { + writer.uint32(40).bool(message.client_streaming); + } + if (message.server_streaming === true) { + writer.uint32(48).bool(message.server_streaming); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MethodDescriptorProto { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMethodDescriptorProto } as MethodDescriptorProto; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.input_type = reader.string(); + break; + case 3: + message.output_type = reader.string(); + break; + case 4: + message.options = MethodOptions.decode(reader, reader.uint32()); + break; + case 5: + message.client_streaming = reader.bool(); + break; + case 6: + message.server_streaming = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MethodDescriptorProto { + const message = { ...baseMethodDescriptorProto } as MethodDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = String(object.name); + } else { + message.name = ""; + } + if (object.input_type !== undefined && object.input_type !== null) { + message.input_type = String(object.input_type); + } else { + message.input_type = ""; + } + if (object.output_type !== undefined && object.output_type !== null) { + message.output_type = String(object.output_type); + } else { + message.output_type = ""; + } + if (object.options !== undefined && object.options !== null) { + message.options = MethodOptions.fromJSON(object.options); + } else { + message.options = undefined; + } + if ( + object.client_streaming !== undefined && + object.client_streaming !== null + ) { + message.client_streaming = Boolean(object.client_streaming); + } else { + message.client_streaming = false; + } + if ( + object.server_streaming !== undefined && + object.server_streaming !== null + ) { + message.server_streaming = Boolean(object.server_streaming); + } else { + message.server_streaming = false; + } + return message; + }, + + toJSON(message: MethodDescriptorProto): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.input_type !== undefined && (obj.input_type = message.input_type); + message.output_type !== undefined && + (obj.output_type = message.output_type); + message.options !== undefined && + (obj.options = message.options + ? MethodOptions.toJSON(message.options) + : undefined); + message.client_streaming !== undefined && + (obj.client_streaming = message.client_streaming); + message.server_streaming !== undefined && + (obj.server_streaming = message.server_streaming); + return obj; + }, + + fromPartial( + object: DeepPartial + ): MethodDescriptorProto { + const message = { ...baseMethodDescriptorProto } as MethodDescriptorProto; + if (object.name !== undefined && object.name !== null) { + message.name = object.name; + } else { + message.name = ""; + } + if (object.input_type !== undefined && object.input_type !== null) { + message.input_type = object.input_type; + } else { + message.input_type = ""; + } + if (object.output_type !== undefined && object.output_type !== null) { + message.output_type = object.output_type; + } else { + message.output_type = ""; + } + if (object.options !== undefined && object.options !== null) { + message.options = MethodOptions.fromPartial(object.options); + } else { + message.options = undefined; + } + if ( + object.client_streaming !== undefined && + object.client_streaming !== null + ) { + message.client_streaming = object.client_streaming; + } else { + message.client_streaming = false; + } + if ( + object.server_streaming !== undefined && + object.server_streaming !== null + ) { + message.server_streaming = object.server_streaming; + } else { + message.server_streaming = false; + } + return message; + }, +}; + +const baseFileOptions: object = { + java_package: "", + java_outer_classname: "", + java_multiple_files: false, + java_generate_equals_and_hash: false, + java_string_check_utf8: false, + optimize_for: 1, + go_package: "", + cc_generic_services: false, + java_generic_services: false, + py_generic_services: false, + php_generic_services: false, + deprecated: false, + cc_enable_arenas: false, + objc_class_prefix: "", + csharp_namespace: "", + swift_prefix: "", + php_class_prefix: "", + php_namespace: "", + php_metadata_namespace: "", + ruby_package: "", +}; + +export const FileOptions = { + encode(message: FileOptions, writer: Writer = Writer.create()): Writer { + if (message.java_package !== "") { + writer.uint32(10).string(message.java_package); + } + if (message.java_outer_classname !== "") { + writer.uint32(66).string(message.java_outer_classname); + } + if (message.java_multiple_files === true) { + writer.uint32(80).bool(message.java_multiple_files); + } + if (message.java_generate_equals_and_hash === true) { + writer.uint32(160).bool(message.java_generate_equals_and_hash); + } + if (message.java_string_check_utf8 === true) { + writer.uint32(216).bool(message.java_string_check_utf8); + } + if (message.optimize_for !== 1) { + writer.uint32(72).int32(message.optimize_for); + } + if (message.go_package !== "") { + writer.uint32(90).string(message.go_package); + } + if (message.cc_generic_services === true) { + writer.uint32(128).bool(message.cc_generic_services); + } + if (message.java_generic_services === true) { + writer.uint32(136).bool(message.java_generic_services); + } + if (message.py_generic_services === true) { + writer.uint32(144).bool(message.py_generic_services); + } + if (message.php_generic_services === true) { + writer.uint32(336).bool(message.php_generic_services); + } + if (message.deprecated === true) { + writer.uint32(184).bool(message.deprecated); + } + if (message.cc_enable_arenas === true) { + writer.uint32(248).bool(message.cc_enable_arenas); + } + if (message.objc_class_prefix !== "") { + writer.uint32(290).string(message.objc_class_prefix); + } + if (message.csharp_namespace !== "") { + writer.uint32(298).string(message.csharp_namespace); + } + if (message.swift_prefix !== "") { + writer.uint32(314).string(message.swift_prefix); + } + if (message.php_class_prefix !== "") { + writer.uint32(322).string(message.php_class_prefix); + } + if (message.php_namespace !== "") { + writer.uint32(330).string(message.php_namespace); + } + if (message.php_metadata_namespace !== "") { + writer.uint32(354).string(message.php_metadata_namespace); + } + if (message.ruby_package !== "") { + writer.uint32(362).string(message.ruby_package); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): FileOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFileOptions } as FileOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.java_package = reader.string(); + break; + case 8: + message.java_outer_classname = reader.string(); + break; + case 10: + message.java_multiple_files = reader.bool(); + break; + case 20: + message.java_generate_equals_and_hash = reader.bool(); + break; + case 27: + message.java_string_check_utf8 = reader.bool(); + break; + case 9: + message.optimize_for = reader.int32() as any; + break; + case 11: + message.go_package = reader.string(); + break; + case 16: + message.cc_generic_services = reader.bool(); + break; + case 17: + message.java_generic_services = reader.bool(); + break; + case 18: + message.py_generic_services = reader.bool(); + break; + case 42: + message.php_generic_services = reader.bool(); + break; + case 23: + message.deprecated = reader.bool(); + break; + case 31: + message.cc_enable_arenas = reader.bool(); + break; + case 36: + message.objc_class_prefix = reader.string(); + break; + case 37: + message.csharp_namespace = reader.string(); + break; + case 39: + message.swift_prefix = reader.string(); + break; + case 40: + message.php_class_prefix = reader.string(); + break; + case 41: + message.php_namespace = reader.string(); + break; + case 44: + message.php_metadata_namespace = reader.string(); + break; + case 45: + message.ruby_package = reader.string(); + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FileOptions { + const message = { ...baseFileOptions } as FileOptions; + message.uninterpreted_option = []; + if (object.java_package !== undefined && object.java_package !== null) { + message.java_package = String(object.java_package); + } else { + message.java_package = ""; + } + if ( + object.java_outer_classname !== undefined && + object.java_outer_classname !== null + ) { + message.java_outer_classname = String(object.java_outer_classname); + } else { + message.java_outer_classname = ""; + } + if ( + object.java_multiple_files !== undefined && + object.java_multiple_files !== null + ) { + message.java_multiple_files = Boolean(object.java_multiple_files); + } else { + message.java_multiple_files = false; + } + if ( + object.java_generate_equals_and_hash !== undefined && + object.java_generate_equals_and_hash !== null + ) { + message.java_generate_equals_and_hash = Boolean( + object.java_generate_equals_and_hash + ); + } else { + message.java_generate_equals_and_hash = false; + } + if ( + object.java_string_check_utf8 !== undefined && + object.java_string_check_utf8 !== null + ) { + message.java_string_check_utf8 = Boolean(object.java_string_check_utf8); + } else { + message.java_string_check_utf8 = false; + } + if (object.optimize_for !== undefined && object.optimize_for !== null) { + message.optimize_for = fileOptions_OptimizeModeFromJSON( + object.optimize_for + ); + } else { + message.optimize_for = 1; + } + if (object.go_package !== undefined && object.go_package !== null) { + message.go_package = String(object.go_package); + } else { + message.go_package = ""; + } + if ( + object.cc_generic_services !== undefined && + object.cc_generic_services !== null + ) { + message.cc_generic_services = Boolean(object.cc_generic_services); + } else { + message.cc_generic_services = false; + } + if ( + object.java_generic_services !== undefined && + object.java_generic_services !== null + ) { + message.java_generic_services = Boolean(object.java_generic_services); + } else { + message.java_generic_services = false; + } + if ( + object.py_generic_services !== undefined && + object.py_generic_services !== null + ) { + message.py_generic_services = Boolean(object.py_generic_services); + } else { + message.py_generic_services = false; + } + if ( + object.php_generic_services !== undefined && + object.php_generic_services !== null + ) { + message.php_generic_services = Boolean(object.php_generic_services); + } else { + message.php_generic_services = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if ( + object.cc_enable_arenas !== undefined && + object.cc_enable_arenas !== null + ) { + message.cc_enable_arenas = Boolean(object.cc_enable_arenas); + } else { + message.cc_enable_arenas = false; + } + if ( + object.objc_class_prefix !== undefined && + object.objc_class_prefix !== null + ) { + message.objc_class_prefix = String(object.objc_class_prefix); + } else { + message.objc_class_prefix = ""; + } + if ( + object.csharp_namespace !== undefined && + object.csharp_namespace !== null + ) { + message.csharp_namespace = String(object.csharp_namespace); + } else { + message.csharp_namespace = ""; + } + if (object.swift_prefix !== undefined && object.swift_prefix !== null) { + message.swift_prefix = String(object.swift_prefix); + } else { + message.swift_prefix = ""; + } + if ( + object.php_class_prefix !== undefined && + object.php_class_prefix !== null + ) { + message.php_class_prefix = String(object.php_class_prefix); + } else { + message.php_class_prefix = ""; + } + if (object.php_namespace !== undefined && object.php_namespace !== null) { + message.php_namespace = String(object.php_namespace); + } else { + message.php_namespace = ""; + } + if ( + object.php_metadata_namespace !== undefined && + object.php_metadata_namespace !== null + ) { + message.php_metadata_namespace = String(object.php_metadata_namespace); + } else { + message.php_metadata_namespace = ""; + } + if (object.ruby_package !== undefined && object.ruby_package !== null) { + message.ruby_package = String(object.ruby_package); + } else { + message.ruby_package = ""; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: FileOptions): unknown { + const obj: any = {}; + message.java_package !== undefined && + (obj.java_package = message.java_package); + message.java_outer_classname !== undefined && + (obj.java_outer_classname = message.java_outer_classname); + message.java_multiple_files !== undefined && + (obj.java_multiple_files = message.java_multiple_files); + message.java_generate_equals_and_hash !== undefined && + (obj.java_generate_equals_and_hash = + message.java_generate_equals_and_hash); + message.java_string_check_utf8 !== undefined && + (obj.java_string_check_utf8 = message.java_string_check_utf8); + message.optimize_for !== undefined && + (obj.optimize_for = fileOptions_OptimizeModeToJSON(message.optimize_for)); + message.go_package !== undefined && (obj.go_package = message.go_package); + message.cc_generic_services !== undefined && + (obj.cc_generic_services = message.cc_generic_services); + message.java_generic_services !== undefined && + (obj.java_generic_services = message.java_generic_services); + message.py_generic_services !== undefined && + (obj.py_generic_services = message.py_generic_services); + message.php_generic_services !== undefined && + (obj.php_generic_services = message.php_generic_services); + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + message.cc_enable_arenas !== undefined && + (obj.cc_enable_arenas = message.cc_enable_arenas); + message.objc_class_prefix !== undefined && + (obj.objc_class_prefix = message.objc_class_prefix); + message.csharp_namespace !== undefined && + (obj.csharp_namespace = message.csharp_namespace); + message.swift_prefix !== undefined && + (obj.swift_prefix = message.swift_prefix); + message.php_class_prefix !== undefined && + (obj.php_class_prefix = message.php_class_prefix); + message.php_namespace !== undefined && + (obj.php_namespace = message.php_namespace); + message.php_metadata_namespace !== undefined && + (obj.php_metadata_namespace = message.php_metadata_namespace); + message.ruby_package !== undefined && + (obj.ruby_package = message.ruby_package); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): FileOptions { + const message = { ...baseFileOptions } as FileOptions; + message.uninterpreted_option = []; + if (object.java_package !== undefined && object.java_package !== null) { + message.java_package = object.java_package; + } else { + message.java_package = ""; + } + if ( + object.java_outer_classname !== undefined && + object.java_outer_classname !== null + ) { + message.java_outer_classname = object.java_outer_classname; + } else { + message.java_outer_classname = ""; + } + if ( + object.java_multiple_files !== undefined && + object.java_multiple_files !== null + ) { + message.java_multiple_files = object.java_multiple_files; + } else { + message.java_multiple_files = false; + } + if ( + object.java_generate_equals_and_hash !== undefined && + object.java_generate_equals_and_hash !== null + ) { + message.java_generate_equals_and_hash = + object.java_generate_equals_and_hash; + } else { + message.java_generate_equals_and_hash = false; + } + if ( + object.java_string_check_utf8 !== undefined && + object.java_string_check_utf8 !== null + ) { + message.java_string_check_utf8 = object.java_string_check_utf8; + } else { + message.java_string_check_utf8 = false; + } + if (object.optimize_for !== undefined && object.optimize_for !== null) { + message.optimize_for = object.optimize_for; + } else { + message.optimize_for = 1; + } + if (object.go_package !== undefined && object.go_package !== null) { + message.go_package = object.go_package; + } else { + message.go_package = ""; + } + if ( + object.cc_generic_services !== undefined && + object.cc_generic_services !== null + ) { + message.cc_generic_services = object.cc_generic_services; + } else { + message.cc_generic_services = false; + } + if ( + object.java_generic_services !== undefined && + object.java_generic_services !== null + ) { + message.java_generic_services = object.java_generic_services; + } else { + message.java_generic_services = false; + } + if ( + object.py_generic_services !== undefined && + object.py_generic_services !== null + ) { + message.py_generic_services = object.py_generic_services; + } else { + message.py_generic_services = false; + } + if ( + object.php_generic_services !== undefined && + object.php_generic_services !== null + ) { + message.php_generic_services = object.php_generic_services; + } else { + message.php_generic_services = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if ( + object.cc_enable_arenas !== undefined && + object.cc_enable_arenas !== null + ) { + message.cc_enable_arenas = object.cc_enable_arenas; + } else { + message.cc_enable_arenas = false; + } + if ( + object.objc_class_prefix !== undefined && + object.objc_class_prefix !== null + ) { + message.objc_class_prefix = object.objc_class_prefix; + } else { + message.objc_class_prefix = ""; + } + if ( + object.csharp_namespace !== undefined && + object.csharp_namespace !== null + ) { + message.csharp_namespace = object.csharp_namespace; + } else { + message.csharp_namespace = ""; + } + if (object.swift_prefix !== undefined && object.swift_prefix !== null) { + message.swift_prefix = object.swift_prefix; + } else { + message.swift_prefix = ""; + } + if ( + object.php_class_prefix !== undefined && + object.php_class_prefix !== null + ) { + message.php_class_prefix = object.php_class_prefix; + } else { + message.php_class_prefix = ""; + } + if (object.php_namespace !== undefined && object.php_namespace !== null) { + message.php_namespace = object.php_namespace; + } else { + message.php_namespace = ""; + } + if ( + object.php_metadata_namespace !== undefined && + object.php_metadata_namespace !== null + ) { + message.php_metadata_namespace = object.php_metadata_namespace; + } else { + message.php_metadata_namespace = ""; + } + if (object.ruby_package !== undefined && object.ruby_package !== null) { + message.ruby_package = object.ruby_package; + } else { + message.ruby_package = ""; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseMessageOptions: object = { + message_set_wire_format: false, + no_standard_descriptor_accessor: false, + deprecated: false, + map_entry: false, +}; + +export const MessageOptions = { + encode(message: MessageOptions, writer: Writer = Writer.create()): Writer { + if (message.message_set_wire_format === true) { + writer.uint32(8).bool(message.message_set_wire_format); + } + if (message.no_standard_descriptor_accessor === true) { + writer.uint32(16).bool(message.no_standard_descriptor_accessor); + } + if (message.deprecated === true) { + writer.uint32(24).bool(message.deprecated); + } + if (message.map_entry === true) { + writer.uint32(56).bool(message.map_entry); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MessageOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMessageOptions } as MessageOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.message_set_wire_format = reader.bool(); + break; + case 2: + message.no_standard_descriptor_accessor = reader.bool(); + break; + case 3: + message.deprecated = reader.bool(); + break; + case 7: + message.map_entry = reader.bool(); + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MessageOptions { + const message = { ...baseMessageOptions } as MessageOptions; + message.uninterpreted_option = []; + if ( + object.message_set_wire_format !== undefined && + object.message_set_wire_format !== null + ) { + message.message_set_wire_format = Boolean(object.message_set_wire_format); + } else { + message.message_set_wire_format = false; + } + if ( + object.no_standard_descriptor_accessor !== undefined && + object.no_standard_descriptor_accessor !== null + ) { + message.no_standard_descriptor_accessor = Boolean( + object.no_standard_descriptor_accessor + ); + } else { + message.no_standard_descriptor_accessor = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if (object.map_entry !== undefined && object.map_entry !== null) { + message.map_entry = Boolean(object.map_entry); + } else { + message.map_entry = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: MessageOptions): unknown { + const obj: any = {}; + message.message_set_wire_format !== undefined && + (obj.message_set_wire_format = message.message_set_wire_format); + message.no_standard_descriptor_accessor !== undefined && + (obj.no_standard_descriptor_accessor = + message.no_standard_descriptor_accessor); + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + message.map_entry !== undefined && (obj.map_entry = message.map_entry); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): MessageOptions { + const message = { ...baseMessageOptions } as MessageOptions; + message.uninterpreted_option = []; + if ( + object.message_set_wire_format !== undefined && + object.message_set_wire_format !== null + ) { + message.message_set_wire_format = object.message_set_wire_format; + } else { + message.message_set_wire_format = false; + } + if ( + object.no_standard_descriptor_accessor !== undefined && + object.no_standard_descriptor_accessor !== null + ) { + message.no_standard_descriptor_accessor = + object.no_standard_descriptor_accessor; + } else { + message.no_standard_descriptor_accessor = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if (object.map_entry !== undefined && object.map_entry !== null) { + message.map_entry = object.map_entry; + } else { + message.map_entry = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseFieldOptions: object = { + ctype: 0, + packed: false, + jstype: 0, + lazy: false, + deprecated: false, + weak: false, +}; + +export const FieldOptions = { + encode(message: FieldOptions, writer: Writer = Writer.create()): Writer { + if (message.ctype !== 0) { + writer.uint32(8).int32(message.ctype); + } + if (message.packed === true) { + writer.uint32(16).bool(message.packed); + } + if (message.jstype !== 0) { + writer.uint32(48).int32(message.jstype); + } + if (message.lazy === true) { + writer.uint32(40).bool(message.lazy); + } + if (message.deprecated === true) { + writer.uint32(24).bool(message.deprecated); + } + if (message.weak === true) { + writer.uint32(80).bool(message.weak); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): FieldOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFieldOptions } as FieldOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ctype = reader.int32() as any; + break; + case 2: + message.packed = reader.bool(); + break; + case 6: + message.jstype = reader.int32() as any; + break; + case 5: + message.lazy = reader.bool(); + break; + case 3: + message.deprecated = reader.bool(); + break; + case 10: + message.weak = reader.bool(); + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FieldOptions { + const message = { ...baseFieldOptions } as FieldOptions; + message.uninterpreted_option = []; + if (object.ctype !== undefined && object.ctype !== null) { + message.ctype = fieldOptions_CTypeFromJSON(object.ctype); + } else { + message.ctype = 0; + } + if (object.packed !== undefined && object.packed !== null) { + message.packed = Boolean(object.packed); + } else { + message.packed = false; + } + if (object.jstype !== undefined && object.jstype !== null) { + message.jstype = fieldOptions_JSTypeFromJSON(object.jstype); + } else { + message.jstype = 0; + } + if (object.lazy !== undefined && object.lazy !== null) { + message.lazy = Boolean(object.lazy); + } else { + message.lazy = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if (object.weak !== undefined && object.weak !== null) { + message.weak = Boolean(object.weak); + } else { + message.weak = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: FieldOptions): unknown { + const obj: any = {}; + message.ctype !== undefined && + (obj.ctype = fieldOptions_CTypeToJSON(message.ctype)); + message.packed !== undefined && (obj.packed = message.packed); + message.jstype !== undefined && + (obj.jstype = fieldOptions_JSTypeToJSON(message.jstype)); + message.lazy !== undefined && (obj.lazy = message.lazy); + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + message.weak !== undefined && (obj.weak = message.weak); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): FieldOptions { + const message = { ...baseFieldOptions } as FieldOptions; + message.uninterpreted_option = []; + if (object.ctype !== undefined && object.ctype !== null) { + message.ctype = object.ctype; + } else { + message.ctype = 0; + } + if (object.packed !== undefined && object.packed !== null) { + message.packed = object.packed; + } else { + message.packed = false; + } + if (object.jstype !== undefined && object.jstype !== null) { + message.jstype = object.jstype; + } else { + message.jstype = 0; + } + if (object.lazy !== undefined && object.lazy !== null) { + message.lazy = object.lazy; + } else { + message.lazy = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if (object.weak !== undefined && object.weak !== null) { + message.weak = object.weak; + } else { + message.weak = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseOneofOptions: object = {}; + +export const OneofOptions = { + encode(message: OneofOptions, writer: Writer = Writer.create()): Writer { + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): OneofOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOneofOptions } as OneofOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OneofOptions { + const message = { ...baseOneofOptions } as OneofOptions; + message.uninterpreted_option = []; + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: OneofOptions): unknown { + const obj: any = {}; + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): OneofOptions { + const message = { ...baseOneofOptions } as OneofOptions; + message.uninterpreted_option = []; + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseEnumOptions: object = { allow_alias: false, deprecated: false }; + +export const EnumOptions = { + encode(message: EnumOptions, writer: Writer = Writer.create()): Writer { + if (message.allow_alias === true) { + writer.uint32(16).bool(message.allow_alias); + } + if (message.deprecated === true) { + writer.uint32(24).bool(message.deprecated); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): EnumOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEnumOptions } as EnumOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.allow_alias = reader.bool(); + break; + case 3: + message.deprecated = reader.bool(); + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnumOptions { + const message = { ...baseEnumOptions } as EnumOptions; + message.uninterpreted_option = []; + if (object.allow_alias !== undefined && object.allow_alias !== null) { + message.allow_alias = Boolean(object.allow_alias); + } else { + message.allow_alias = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: EnumOptions): unknown { + const obj: any = {}; + message.allow_alias !== undefined && + (obj.allow_alias = message.allow_alias); + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): EnumOptions { + const message = { ...baseEnumOptions } as EnumOptions; + message.uninterpreted_option = []; + if (object.allow_alias !== undefined && object.allow_alias !== null) { + message.allow_alias = object.allow_alias; + } else { + message.allow_alias = false; + } + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseEnumValueOptions: object = { deprecated: false }; + +export const EnumValueOptions = { + encode(message: EnumValueOptions, writer: Writer = Writer.create()): Writer { + if (message.deprecated === true) { + writer.uint32(8).bool(message.deprecated); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): EnumValueOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEnumValueOptions } as EnumValueOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.deprecated = reader.bool(); + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnumValueOptions { + const message = { ...baseEnumValueOptions } as EnumValueOptions; + message.uninterpreted_option = []; + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: EnumValueOptions): unknown { + const obj: any = {}; + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): EnumValueOptions { + const message = { ...baseEnumValueOptions } as EnumValueOptions; + message.uninterpreted_option = []; + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseServiceOptions: object = { deprecated: false }; + +export const ServiceOptions = { + encode(message: ServiceOptions, writer: Writer = Writer.create()): Writer { + if (message.deprecated === true) { + writer.uint32(264).bool(message.deprecated); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): ServiceOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseServiceOptions } as ServiceOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 33: + message.deprecated = reader.bool(); + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ServiceOptions { + const message = { ...baseServiceOptions } as ServiceOptions; + message.uninterpreted_option = []; + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: ServiceOptions): unknown { + const obj: any = {}; + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): ServiceOptions { + const message = { ...baseServiceOptions } as ServiceOptions; + message.uninterpreted_option = []; + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseMethodOptions: object = { deprecated: false, idempotency_level: 0 }; + +export const MethodOptions = { + encode(message: MethodOptions, writer: Writer = Writer.create()): Writer { + if (message.deprecated === true) { + writer.uint32(264).bool(message.deprecated); + } + if (message.idempotency_level !== 0) { + writer.uint32(272).int32(message.idempotency_level); + } + for (const v of message.uninterpreted_option) { + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MethodOptions { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMethodOptions } as MethodOptions; + message.uninterpreted_option = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 33: + message.deprecated = reader.bool(); + break; + case 34: + message.idempotency_level = reader.int32() as any; + break; + case 999: + message.uninterpreted_option.push( + UninterpretedOption.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MethodOptions { + const message = { ...baseMethodOptions } as MethodOptions; + message.uninterpreted_option = []; + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = Boolean(object.deprecated); + } else { + message.deprecated = false; + } + if ( + object.idempotency_level !== undefined && + object.idempotency_level !== null + ) { + message.idempotency_level = methodOptions_IdempotencyLevelFromJSON( + object.idempotency_level + ); + } else { + message.idempotency_level = 0; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: MethodOptions): unknown { + const obj: any = {}; + message.deprecated !== undefined && (obj.deprecated = message.deprecated); + message.idempotency_level !== undefined && + (obj.idempotency_level = methodOptions_IdempotencyLevelToJSON( + message.idempotency_level + )); + if (message.uninterpreted_option) { + obj.uninterpreted_option = message.uninterpreted_option.map((e) => + e ? UninterpretedOption.toJSON(e) : undefined + ); + } else { + obj.uninterpreted_option = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): MethodOptions { + const message = { ...baseMethodOptions } as MethodOptions; + message.uninterpreted_option = []; + if (object.deprecated !== undefined && object.deprecated !== null) { + message.deprecated = object.deprecated; + } else { + message.deprecated = false; + } + if ( + object.idempotency_level !== undefined && + object.idempotency_level !== null + ) { + message.idempotency_level = object.idempotency_level; + } else { + message.idempotency_level = 0; + } + if ( + object.uninterpreted_option !== undefined && + object.uninterpreted_option !== null + ) { + for (const e of object.uninterpreted_option) { + message.uninterpreted_option.push(UninterpretedOption.fromPartial(e)); + } + } + return message; + }, +}; + +const baseUninterpretedOption: object = { + identifier_value: "", + positive_int_value: 0, + negative_int_value: 0, + double_value: 0, + aggregate_value: "", +}; + +export const UninterpretedOption = { + encode( + message: UninterpretedOption, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.name) { + UninterpretedOption_NamePart.encode( + v!, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.identifier_value !== "") { + writer.uint32(26).string(message.identifier_value); + } + if (message.positive_int_value !== 0) { + writer.uint32(32).uint64(message.positive_int_value); + } + if (message.negative_int_value !== 0) { + writer.uint32(40).int64(message.negative_int_value); + } + if (message.double_value !== 0) { + writer.uint32(49).double(message.double_value); + } + if (message.string_value.length !== 0) { + writer.uint32(58).bytes(message.string_value); + } + if (message.aggregate_value !== "") { + writer.uint32(66).string(message.aggregate_value); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): UninterpretedOption { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUninterpretedOption } as UninterpretedOption; + message.name = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.name.push( + UninterpretedOption_NamePart.decode(reader, reader.uint32()) + ); + break; + case 3: + message.identifier_value = reader.string(); + break; + case 4: + message.positive_int_value = longToNumber(reader.uint64() as Long); + break; + case 5: + message.negative_int_value = longToNumber(reader.int64() as Long); + break; + case 6: + message.double_value = reader.double(); + break; + case 7: + message.string_value = reader.bytes(); + break; + case 8: + message.aggregate_value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UninterpretedOption { + const message = { ...baseUninterpretedOption } as UninterpretedOption; + message.name = []; + if (object.name !== undefined && object.name !== null) { + for (const e of object.name) { + message.name.push(UninterpretedOption_NamePart.fromJSON(e)); + } + } + if ( + object.identifier_value !== undefined && + object.identifier_value !== null + ) { + message.identifier_value = String(object.identifier_value); + } else { + message.identifier_value = ""; + } + if ( + object.positive_int_value !== undefined && + object.positive_int_value !== null + ) { + message.positive_int_value = Number(object.positive_int_value); + } else { + message.positive_int_value = 0; + } + if ( + object.negative_int_value !== undefined && + object.negative_int_value !== null + ) { + message.negative_int_value = Number(object.negative_int_value); + } else { + message.negative_int_value = 0; + } + if (object.double_value !== undefined && object.double_value !== null) { + message.double_value = Number(object.double_value); + } else { + message.double_value = 0; + } + if (object.string_value !== undefined && object.string_value !== null) { + message.string_value = bytesFromBase64(object.string_value); + } + if ( + object.aggregate_value !== undefined && + object.aggregate_value !== null + ) { + message.aggregate_value = String(object.aggregate_value); + } else { + message.aggregate_value = ""; + } + return message; + }, + + toJSON(message: UninterpretedOption): unknown { + const obj: any = {}; + if (message.name) { + obj.name = message.name.map((e) => + e ? UninterpretedOption_NamePart.toJSON(e) : undefined + ); + } else { + obj.name = []; + } + message.identifier_value !== undefined && + (obj.identifier_value = message.identifier_value); + message.positive_int_value !== undefined && + (obj.positive_int_value = message.positive_int_value); + message.negative_int_value !== undefined && + (obj.negative_int_value = message.negative_int_value); + message.double_value !== undefined && + (obj.double_value = message.double_value); + message.string_value !== undefined && + (obj.string_value = base64FromBytes( + message.string_value !== undefined + ? message.string_value + : new Uint8Array() + )); + message.aggregate_value !== undefined && + (obj.aggregate_value = message.aggregate_value); + return obj; + }, + + fromPartial(object: DeepPartial): UninterpretedOption { + const message = { ...baseUninterpretedOption } as UninterpretedOption; + message.name = []; + if (object.name !== undefined && object.name !== null) { + for (const e of object.name) { + message.name.push(UninterpretedOption_NamePart.fromPartial(e)); + } + } + if ( + object.identifier_value !== undefined && + object.identifier_value !== null + ) { + message.identifier_value = object.identifier_value; + } else { + message.identifier_value = ""; + } + if ( + object.positive_int_value !== undefined && + object.positive_int_value !== null + ) { + message.positive_int_value = object.positive_int_value; + } else { + message.positive_int_value = 0; + } + if ( + object.negative_int_value !== undefined && + object.negative_int_value !== null + ) { + message.negative_int_value = object.negative_int_value; + } else { + message.negative_int_value = 0; + } + if (object.double_value !== undefined && object.double_value !== null) { + message.double_value = object.double_value; + } else { + message.double_value = 0; + } + if (object.string_value !== undefined && object.string_value !== null) { + message.string_value = object.string_value; + } else { + message.string_value = new Uint8Array(); + } + if ( + object.aggregate_value !== undefined && + object.aggregate_value !== null + ) { + message.aggregate_value = object.aggregate_value; + } else { + message.aggregate_value = ""; + } + return message; + }, +}; + +const baseUninterpretedOption_NamePart: object = { + name_part: "", + is_extension: false, +}; + +export const UninterpretedOption_NamePart = { + encode( + message: UninterpretedOption_NamePart, + writer: Writer = Writer.create() + ): Writer { + if (message.name_part !== "") { + writer.uint32(10).string(message.name_part); + } + if (message.is_extension === true) { + writer.uint32(16).bool(message.is_extension); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): UninterpretedOption_NamePart { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUninterpretedOption_NamePart, + } as UninterpretedOption_NamePart; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name_part = reader.string(); + break; + case 2: + message.is_extension = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UninterpretedOption_NamePart { + const message = { + ...baseUninterpretedOption_NamePart, + } as UninterpretedOption_NamePart; + if (object.name_part !== undefined && object.name_part !== null) { + message.name_part = String(object.name_part); + } else { + message.name_part = ""; + } + if (object.is_extension !== undefined && object.is_extension !== null) { + message.is_extension = Boolean(object.is_extension); + } else { + message.is_extension = false; + } + return message; + }, + + toJSON(message: UninterpretedOption_NamePart): unknown { + const obj: any = {}; + message.name_part !== undefined && (obj.name_part = message.name_part); + message.is_extension !== undefined && + (obj.is_extension = message.is_extension); + return obj; + }, + + fromPartial( + object: DeepPartial + ): UninterpretedOption_NamePart { + const message = { + ...baseUninterpretedOption_NamePart, + } as UninterpretedOption_NamePart; + if (object.name_part !== undefined && object.name_part !== null) { + message.name_part = object.name_part; + } else { + message.name_part = ""; + } + if (object.is_extension !== undefined && object.is_extension !== null) { + message.is_extension = object.is_extension; + } else { + message.is_extension = false; + } + return message; + }, +}; + +const baseSourceCodeInfo: object = {}; + +export const SourceCodeInfo = { + encode(message: SourceCodeInfo, writer: Writer = Writer.create()): Writer { + for (const v of message.location) { + SourceCodeInfo_Location.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): SourceCodeInfo { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSourceCodeInfo } as SourceCodeInfo; + message.location = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.location.push( + SourceCodeInfo_Location.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SourceCodeInfo { + const message = { ...baseSourceCodeInfo } as SourceCodeInfo; + message.location = []; + if (object.location !== undefined && object.location !== null) { + for (const e of object.location) { + message.location.push(SourceCodeInfo_Location.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: SourceCodeInfo): unknown { + const obj: any = {}; + if (message.location) { + obj.location = message.location.map((e) => + e ? SourceCodeInfo_Location.toJSON(e) : undefined + ); + } else { + obj.location = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): SourceCodeInfo { + const message = { ...baseSourceCodeInfo } as SourceCodeInfo; + message.location = []; + if (object.location !== undefined && object.location !== null) { + for (const e of object.location) { + message.location.push(SourceCodeInfo_Location.fromPartial(e)); + } + } + return message; + }, +}; + +const baseSourceCodeInfo_Location: object = { + path: 0, + span: 0, + leading_comments: "", + trailing_comments: "", + leading_detached_comments: "", +}; + +export const SourceCodeInfo_Location = { + encode( + message: SourceCodeInfo_Location, + writer: Writer = Writer.create() + ): Writer { + writer.uint32(10).fork(); + for (const v of message.path) { + writer.int32(v); + } + writer.ldelim(); + writer.uint32(18).fork(); + for (const v of message.span) { + writer.int32(v); + } + writer.ldelim(); + if (message.leading_comments !== "") { + writer.uint32(26).string(message.leading_comments); + } + if (message.trailing_comments !== "") { + writer.uint32(34).string(message.trailing_comments); + } + for (const v of message.leading_detached_comments) { + writer.uint32(50).string(v!); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): SourceCodeInfo_Location { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSourceCodeInfo_Location, + } as SourceCodeInfo_Location; + message.path = []; + message.span = []; + message.leading_detached_comments = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.path.push(reader.int32()); + } + } else { + message.path.push(reader.int32()); + } + break; + case 2: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.span.push(reader.int32()); + } + } else { + message.span.push(reader.int32()); + } + break; + case 3: + message.leading_comments = reader.string(); + break; + case 4: + message.trailing_comments = reader.string(); + break; + case 6: + message.leading_detached_comments.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SourceCodeInfo_Location { + const message = { + ...baseSourceCodeInfo_Location, + } as SourceCodeInfo_Location; + message.path = []; + message.span = []; + message.leading_detached_comments = []; + if (object.path !== undefined && object.path !== null) { + for (const e of object.path) { + message.path.push(Number(e)); + } + } + if (object.span !== undefined && object.span !== null) { + for (const e of object.span) { + message.span.push(Number(e)); + } + } + if ( + object.leading_comments !== undefined && + object.leading_comments !== null + ) { + message.leading_comments = String(object.leading_comments); + } else { + message.leading_comments = ""; + } + if ( + object.trailing_comments !== undefined && + object.trailing_comments !== null + ) { + message.trailing_comments = String(object.trailing_comments); + } else { + message.trailing_comments = ""; + } + if ( + object.leading_detached_comments !== undefined && + object.leading_detached_comments !== null + ) { + for (const e of object.leading_detached_comments) { + message.leading_detached_comments.push(String(e)); + } + } + return message; + }, + + toJSON(message: SourceCodeInfo_Location): unknown { + const obj: any = {}; + if (message.path) { + obj.path = message.path.map((e) => e); + } else { + obj.path = []; + } + if (message.span) { + obj.span = message.span.map((e) => e); + } else { + obj.span = []; + } + message.leading_comments !== undefined && + (obj.leading_comments = message.leading_comments); + message.trailing_comments !== undefined && + (obj.trailing_comments = message.trailing_comments); + if (message.leading_detached_comments) { + obj.leading_detached_comments = message.leading_detached_comments.map( + (e) => e + ); + } else { + obj.leading_detached_comments = []; + } + return obj; + }, + + fromPartial( + object: DeepPartial + ): SourceCodeInfo_Location { + const message = { + ...baseSourceCodeInfo_Location, + } as SourceCodeInfo_Location; + message.path = []; + message.span = []; + message.leading_detached_comments = []; + if (object.path !== undefined && object.path !== null) { + for (const e of object.path) { + message.path.push(e); + } + } + if (object.span !== undefined && object.span !== null) { + for (const e of object.span) { + message.span.push(e); + } + } + if ( + object.leading_comments !== undefined && + object.leading_comments !== null + ) { + message.leading_comments = object.leading_comments; + } else { + message.leading_comments = ""; + } + if ( + object.trailing_comments !== undefined && + object.trailing_comments !== null + ) { + message.trailing_comments = object.trailing_comments; + } else { + message.trailing_comments = ""; + } + if ( + object.leading_detached_comments !== undefined && + object.leading_detached_comments !== null + ) { + for (const e of object.leading_detached_comments) { + message.leading_detached_comments.push(e); + } + } + return message; + }, +}; + +const baseGeneratedCodeInfo: object = {}; + +export const GeneratedCodeInfo = { + encode(message: GeneratedCodeInfo, writer: Writer = Writer.create()): Writer { + for (const v of message.annotation) { + GeneratedCodeInfo_Annotation.encode( + v!, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): GeneratedCodeInfo { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGeneratedCodeInfo } as GeneratedCodeInfo; + message.annotation = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.annotation.push( + GeneratedCodeInfo_Annotation.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GeneratedCodeInfo { + const message = { ...baseGeneratedCodeInfo } as GeneratedCodeInfo; + message.annotation = []; + if (object.annotation !== undefined && object.annotation !== null) { + for (const e of object.annotation) { + message.annotation.push(GeneratedCodeInfo_Annotation.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: GeneratedCodeInfo): unknown { + const obj: any = {}; + if (message.annotation) { + obj.annotation = message.annotation.map((e) => + e ? GeneratedCodeInfo_Annotation.toJSON(e) : undefined + ); + } else { + obj.annotation = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): GeneratedCodeInfo { + const message = { ...baseGeneratedCodeInfo } as GeneratedCodeInfo; + message.annotation = []; + if (object.annotation !== undefined && object.annotation !== null) { + for (const e of object.annotation) { + message.annotation.push(GeneratedCodeInfo_Annotation.fromPartial(e)); + } + } + return message; + }, +}; + +const baseGeneratedCodeInfo_Annotation: object = { + path: 0, + source_file: "", + begin: 0, + end: 0, +}; + +export const GeneratedCodeInfo_Annotation = { + encode( + message: GeneratedCodeInfo_Annotation, + writer: Writer = Writer.create() + ): Writer { + writer.uint32(10).fork(); + for (const v of message.path) { + writer.int32(v); + } + writer.ldelim(); + if (message.source_file !== "") { + writer.uint32(18).string(message.source_file); + } + if (message.begin !== 0) { + writer.uint32(24).int32(message.begin); + } + if (message.end !== 0) { + writer.uint32(32).int32(message.end); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): GeneratedCodeInfo_Annotation { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGeneratedCodeInfo_Annotation, + } as GeneratedCodeInfo_Annotation; + message.path = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.path.push(reader.int32()); + } + } else { + message.path.push(reader.int32()); + } + break; + case 2: + message.source_file = reader.string(); + break; + case 3: + message.begin = reader.int32(); + break; + case 4: + message.end = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GeneratedCodeInfo_Annotation { + const message = { + ...baseGeneratedCodeInfo_Annotation, + } as GeneratedCodeInfo_Annotation; + message.path = []; + if (object.path !== undefined && object.path !== null) { + for (const e of object.path) { + message.path.push(Number(e)); + } + } + if (object.source_file !== undefined && object.source_file !== null) { + message.source_file = String(object.source_file); + } else { + message.source_file = ""; + } + if (object.begin !== undefined && object.begin !== null) { + message.begin = Number(object.begin); + } else { + message.begin = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = Number(object.end); + } else { + message.end = 0; + } + return message; + }, + + toJSON(message: GeneratedCodeInfo_Annotation): unknown { + const obj: any = {}; + if (message.path) { + obj.path = message.path.map((e) => e); + } else { + obj.path = []; + } + message.source_file !== undefined && + (obj.source_file = message.source_file); + message.begin !== undefined && (obj.begin = message.begin); + message.end !== undefined && (obj.end = message.end); + return obj; + }, + + fromPartial( + object: DeepPartial + ): GeneratedCodeInfo_Annotation { + const message = { + ...baseGeneratedCodeInfo_Annotation, + } as GeneratedCodeInfo_Annotation; + message.path = []; + if (object.path !== undefined && object.path !== null) { + for (const e of object.path) { + message.path.push(e); + } + } + if (object.source_file !== undefined && object.source_file !== null) { + message.source_file = object.source_file; + } else { + message.source_file = ""; + } + if (object.begin !== undefined && object.begin !== null) { + message.begin = object.begin; + } else { + message.begin = 0; + } + if (object.end !== undefined && object.end !== null) { + message.end = object.end; + } else { + message.end = 0; + } + return message; + }, +}; + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (let i = 0; i < arr.byteLength; ++i) { + bin.push(String.fromCharCode(arr[i])); + } + return btoa(bin.join("")); +} + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/burnings.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/burnings.ts new file mode 100644 index 00000000..763ddca5 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/burnings.ts @@ -0,0 +1,151 @@ +/* eslint-disable */ +import { Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +export interface Burnings { + denom: string; + amount: string; +} + +export interface Burned { + amount: string; +} + +const baseBurnings: object = { denom: "", amount: "" }; + +export const Burnings = { + encode(message: Burnings, writer: Writer = Writer.create()): Writer { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + if (message.amount !== "") { + writer.uint32(18).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Burnings { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBurnings } as Burnings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom = reader.string(); + break; + case 2: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Burnings { + const message = { ...baseBurnings } as Burnings; + if (object.denom !== undefined && object.denom !== null) { + message.denom = String(object.denom); + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: Burnings): unknown { + const obj: any = {}; + message.denom !== undefined && (obj.denom = message.denom); + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): Burnings { + const message = { ...baseBurnings } as Burnings; + if (object.denom !== undefined && object.denom !== null) { + message.denom = object.denom; + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +const baseBurned: object = { amount: "" }; + +export const Burned = { + encode(message: Burned, writer: Writer = Writer.create()): Writer { + if (message.amount !== "") { + writer.uint32(10).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Burned { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBurned } as Burned; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Burned { + const message = { ...baseBurned } as Burned; + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: Burned): unknown { + const obj: any = {}; + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): Burned { + const message = { ...baseBurned } as Burned; + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/drop.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/drop.ts new file mode 100644 index 00000000..92dcfafc --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/drop.ts @@ -0,0 +1,353 @@ +/* eslint-disable */ +import * as Long from "long"; +import { util, configure, Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +export interface Drop { + uid: number; + owner: string; + pair: string; + drops: string; + product: string; + active: boolean; +} + +export interface Drops { + uids: number[]; + sum: string; +} + +export interface DropPairs { + pairs: string[]; +} + +const baseDrop: object = { + uid: 0, + owner: "", + pair: "", + drops: "", + product: "", + active: false, +}; + +export const Drop = { + encode(message: Drop, writer: Writer = Writer.create()): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + if (message.owner !== "") { + writer.uint32(18).string(message.owner); + } + if (message.pair !== "") { + writer.uint32(26).string(message.pair); + } + if (message.drops !== "") { + writer.uint32(34).string(message.drops); + } + if (message.product !== "") { + writer.uint32(42).string(message.product); + } + if (message.active === true) { + writer.uint32(48).bool(message.active); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Drop { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDrop } as Drop; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + case 2: + message.owner = reader.string(); + break; + case 3: + message.pair = reader.string(); + break; + case 4: + message.drops = reader.string(); + break; + case 5: + message.product = reader.string(); + break; + case 6: + message.active = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Drop { + const message = { ...baseDrop } as Drop; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + if (object.owner !== undefined && object.owner !== null) { + message.owner = String(object.owner); + } else { + message.owner = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = String(object.drops); + } else { + message.drops = ""; + } + if (object.product !== undefined && object.product !== null) { + message.product = String(object.product); + } else { + message.product = ""; + } + if (object.active !== undefined && object.active !== null) { + message.active = Boolean(object.active); + } else { + message.active = false; + } + return message; + }, + + toJSON(message: Drop): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + message.owner !== undefined && (obj.owner = message.owner); + message.pair !== undefined && (obj.pair = message.pair); + message.drops !== undefined && (obj.drops = message.drops); + message.product !== undefined && (obj.product = message.product); + message.active !== undefined && (obj.active = message.active); + return obj; + }, + + fromPartial(object: DeepPartial): Drop { + const message = { ...baseDrop } as Drop; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + if (object.owner !== undefined && object.owner !== null) { + message.owner = object.owner; + } else { + message.owner = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = object.drops; + } else { + message.drops = ""; + } + if (object.product !== undefined && object.product !== null) { + message.product = object.product; + } else { + message.product = ""; + } + if (object.active !== undefined && object.active !== null) { + message.active = object.active; + } else { + message.active = false; + } + return message; + }, +}; + +const baseDrops: object = { uids: 0, sum: "" }; + +export const Drops = { + encode(message: Drops, writer: Writer = Writer.create()): Writer { + writer.uint32(10).fork(); + for (const v of message.uids) { + writer.uint64(v); + } + writer.ldelim(); + if (message.sum !== "") { + writer.uint32(18).string(message.sum); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Drops { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDrops } as Drops; + message.uids = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.uids.push(longToNumber(reader.uint64() as Long)); + } + } else { + message.uids.push(longToNumber(reader.uint64() as Long)); + } + break; + case 2: + message.sum = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Drops { + const message = { ...baseDrops } as Drops; + message.uids = []; + if (object.uids !== undefined && object.uids !== null) { + for (const e of object.uids) { + message.uids.push(Number(e)); + } + } + if (object.sum !== undefined && object.sum !== null) { + message.sum = String(object.sum); + } else { + message.sum = ""; + } + return message; + }, + + toJSON(message: Drops): unknown { + const obj: any = {}; + if (message.uids) { + obj.uids = message.uids.map((e) => e); + } else { + obj.uids = []; + } + message.sum !== undefined && (obj.sum = message.sum); + return obj; + }, + + fromPartial(object: DeepPartial): Drops { + const message = { ...baseDrops } as Drops; + message.uids = []; + if (object.uids !== undefined && object.uids !== null) { + for (const e of object.uids) { + message.uids.push(e); + } + } + if (object.sum !== undefined && object.sum !== null) { + message.sum = object.sum; + } else { + message.sum = ""; + } + return message; + }, +}; + +const baseDropPairs: object = { pairs: "" }; + +export const DropPairs = { + encode(message: DropPairs, writer: Writer = Writer.create()): Writer { + for (const v of message.pairs) { + writer.uint32(10).string(v!); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): DropPairs { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDropPairs } as DropPairs; + message.pairs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pairs.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DropPairs { + const message = { ...baseDropPairs } as DropPairs; + message.pairs = []; + if (object.pairs !== undefined && object.pairs !== null) { + for (const e of object.pairs) { + message.pairs.push(String(e)); + } + } + return message; + }, + + toJSON(message: DropPairs): unknown { + const obj: any = {}; + if (message.pairs) { + obj.pairs = message.pairs.map((e) => e); + } else { + obj.pairs = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): DropPairs { + const message = { ...baseDropPairs } as DropPairs; + message.pairs = []; + if (object.pairs !== undefined && object.pairs !== null) { + for (const e of object.pairs) { + message.pairs.push(e); + } + } + return message; + }, +}; + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/genesis.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/genesis.ts new file mode 100644 index 00000000..17c7c7da --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/genesis.ts @@ -0,0 +1,218 @@ +/* eslint-disable */ +import { Params } from "../market/params"; +import { Pool } from "../market/pool"; +import { Drop } from "../market/drop"; +import { Member } from "../market/member"; +import { Burnings } from "../market/burnings"; +import { Order } from "../market/order"; +import { Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +/** GenesisState defines the market module's genesis state. */ +export interface GenesisState { + params: Params | undefined; + poolList: Pool[]; + dropList: Drop[]; + memberList: Member[]; + burningsList: Burnings[]; + /** this line is used by starport scaffolding # genesis/proto/state */ + orderList: Order[]; +} + +const baseGenesisState: object = {}; + +export const GenesisState = { + encode(message: GenesisState, writer: Writer = Writer.create()): Writer { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.poolList) { + Pool.encode(v!, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.dropList) { + Drop.encode(v!, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.memberList) { + Member.encode(v!, writer.uint32(34).fork()).ldelim(); + } + for (const v of message.burningsList) { + Burnings.encode(v!, writer.uint32(42).fork()).ldelim(); + } + for (const v of message.orderList) { + Order.encode(v!, writer.uint32(50).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): GenesisState { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGenesisState } as GenesisState; + message.poolList = []; + message.dropList = []; + message.memberList = []; + message.burningsList = []; + message.orderList = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.params = Params.decode(reader, reader.uint32()); + break; + case 2: + message.poolList.push(Pool.decode(reader, reader.uint32())); + break; + case 3: + message.dropList.push(Drop.decode(reader, reader.uint32())); + break; + case 4: + message.memberList.push(Member.decode(reader, reader.uint32())); + break; + case 5: + message.burningsList.push(Burnings.decode(reader, reader.uint32())); + break; + case 6: + message.orderList.push(Order.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GenesisState { + const message = { ...baseGenesisState } as GenesisState; + message.poolList = []; + message.dropList = []; + message.memberList = []; + message.burningsList = []; + message.orderList = []; + if (object.params !== undefined && object.params !== null) { + message.params = Params.fromJSON(object.params); + } else { + message.params = undefined; + } + if (object.poolList !== undefined && object.poolList !== null) { + for (const e of object.poolList) { + message.poolList.push(Pool.fromJSON(e)); + } + } + if (object.dropList !== undefined && object.dropList !== null) { + for (const e of object.dropList) { + message.dropList.push(Drop.fromJSON(e)); + } + } + if (object.memberList !== undefined && object.memberList !== null) { + for (const e of object.memberList) { + message.memberList.push(Member.fromJSON(e)); + } + } + if (object.burningsList !== undefined && object.burningsList !== null) { + for (const e of object.burningsList) { + message.burningsList.push(Burnings.fromJSON(e)); + } + } + if (object.orderList !== undefined && object.orderList !== null) { + for (const e of object.orderList) { + message.orderList.push(Order.fromJSON(e)); + } + } + return message; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + message.params !== undefined && + (obj.params = message.params ? Params.toJSON(message.params) : undefined); + if (message.poolList) { + obj.poolList = message.poolList.map((e) => + e ? Pool.toJSON(e) : undefined + ); + } else { + obj.poolList = []; + } + if (message.dropList) { + obj.dropList = message.dropList.map((e) => + e ? Drop.toJSON(e) : undefined + ); + } else { + obj.dropList = []; + } + if (message.memberList) { + obj.memberList = message.memberList.map((e) => + e ? Member.toJSON(e) : undefined + ); + } else { + obj.memberList = []; + } + if (message.burningsList) { + obj.burningsList = message.burningsList.map((e) => + e ? Burnings.toJSON(e) : undefined + ); + } else { + obj.burningsList = []; + } + if (message.orderList) { + obj.orderList = message.orderList.map((e) => + e ? Order.toJSON(e) : undefined + ); + } else { + obj.orderList = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): GenesisState { + const message = { ...baseGenesisState } as GenesisState; + message.poolList = []; + message.dropList = []; + message.memberList = []; + message.burningsList = []; + message.orderList = []; + if (object.params !== undefined && object.params !== null) { + message.params = Params.fromPartial(object.params); + } else { + message.params = undefined; + } + if (object.poolList !== undefined && object.poolList !== null) { + for (const e of object.poolList) { + message.poolList.push(Pool.fromPartial(e)); + } + } + if (object.dropList !== undefined && object.dropList !== null) { + for (const e of object.dropList) { + message.dropList.push(Drop.fromPartial(e)); + } + } + if (object.memberList !== undefined && object.memberList !== null) { + for (const e of object.memberList) { + message.memberList.push(Member.fromPartial(e)); + } + } + if (object.burningsList !== undefined && object.burningsList !== null) { + for (const e of object.burningsList) { + message.burningsList.push(Burnings.fromPartial(e)); + } + } + if (object.orderList !== undefined && object.orderList !== null) { + for (const e of object.orderList) { + message.orderList.push(Order.fromPartial(e)); + } + } + return message; + }, +}; + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/member.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/member.ts new file mode 100644 index 00000000..235b9636 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/member.ts @@ -0,0 +1,213 @@ +/* eslint-disable */ +import * as Long from "long"; +import { util, configure, Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +export interface Member { + pair: string; + denomA: string; + denomB: string; + balance: string; + previous: string; + limit: number; + stop: number; +} + +const baseMember: object = { + pair: "", + denomA: "", + denomB: "", + balance: "", + previous: "", + limit: 0, + stop: 0, +}; + +export const Member = { + encode(message: Member, writer: Writer = Writer.create()): Writer { + if (message.pair !== "") { + writer.uint32(10).string(message.pair); + } + if (message.denomA !== "") { + writer.uint32(18).string(message.denomA); + } + if (message.denomB !== "") { + writer.uint32(26).string(message.denomB); + } + if (message.balance !== "") { + writer.uint32(34).string(message.balance); + } + if (message.previous !== "") { + writer.uint32(42).string(message.previous); + } + if (message.limit !== 0) { + writer.uint32(48).uint64(message.limit); + } + if (message.stop !== 0) { + writer.uint32(56).uint64(message.stop); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Member { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMember } as Member; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pair = reader.string(); + break; + case 2: + message.denomA = reader.string(); + break; + case 3: + message.denomB = reader.string(); + break; + case 4: + message.balance = reader.string(); + break; + case 5: + message.previous = reader.string(); + break; + case 6: + message.limit = longToNumber(reader.uint64() as Long); + break; + case 7: + message.stop = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Member { + const message = { ...baseMember } as Member; + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = String(object.denomA); + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = String(object.denomB); + } else { + message.denomB = ""; + } + if (object.balance !== undefined && object.balance !== null) { + message.balance = String(object.balance); + } else { + message.balance = ""; + } + if (object.previous !== undefined && object.previous !== null) { + message.previous = String(object.previous); + } else { + message.previous = ""; + } + if (object.limit !== undefined && object.limit !== null) { + message.limit = Number(object.limit); + } else { + message.limit = 0; + } + if (object.stop !== undefined && object.stop !== null) { + message.stop = Number(object.stop); + } else { + message.stop = 0; + } + return message; + }, + + toJSON(message: Member): unknown { + const obj: any = {}; + message.pair !== undefined && (obj.pair = message.pair); + message.denomA !== undefined && (obj.denomA = message.denomA); + message.denomB !== undefined && (obj.denomB = message.denomB); + message.balance !== undefined && (obj.balance = message.balance); + message.previous !== undefined && (obj.previous = message.previous); + message.limit !== undefined && (obj.limit = message.limit); + message.stop !== undefined && (obj.stop = message.stop); + return obj; + }, + + fromPartial(object: DeepPartial): Member { + const message = { ...baseMember } as Member; + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = object.denomA; + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = object.denomB; + } else { + message.denomB = ""; + } + if (object.balance !== undefined && object.balance !== null) { + message.balance = object.balance; + } else { + message.balance = ""; + } + if (object.previous !== undefined && object.previous !== null) { + message.previous = object.previous; + } else { + message.previous = ""; + } + if (object.limit !== undefined && object.limit !== null) { + message.limit = object.limit; + } else { + message.limit = 0; + } + if (object.stop !== undefined && object.stop !== null) { + message.stop = object.stop; + } else { + message.stop = 0; + } + return message; + }, +}; + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/order.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/order.ts new file mode 100644 index 00000000..83b2f76d --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/order.ts @@ -0,0 +1,667 @@ +/* eslint-disable */ +import * as Long from "long"; +import { util, configure, Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +export interface Order { + uid: number; + owner: string; + status: string; + orderType: string; + denomAsk: string; + denomBid: string; + amount: string; + rate: string[]; + prev: number; + next: number; + beg_time: number; + upd_time: number; +} + +export interface Orders { + uids: number[]; +} + +export interface OrderResponse { + uid: number; + owner: string; + status: string; + orderType: string; + denomAsk: string; + denomBid: string; + amount: string; + rate: string[]; + prev: number; + next: number; + beg_time: number; + upd_time: number; +} + +const baseOrder: object = { + uid: 0, + owner: "", + status: "", + orderType: "", + denomAsk: "", + denomBid: "", + amount: "", + rate: "", + prev: 0, + next: 0, + beg_time: 0, + upd_time: 0, +}; + +export const Order = { + encode(message: Order, writer: Writer = Writer.create()): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + if (message.owner !== "") { + writer.uint32(18).string(message.owner); + } + if (message.status !== "") { + writer.uint32(26).string(message.status); + } + if (message.orderType !== "") { + writer.uint32(34).string(message.orderType); + } + if (message.denomAsk !== "") { + writer.uint32(42).string(message.denomAsk); + } + if (message.denomBid !== "") { + writer.uint32(50).string(message.denomBid); + } + if (message.amount !== "") { + writer.uint32(58).string(message.amount); + } + for (const v of message.rate) { + writer.uint32(66).string(v!); + } + if (message.prev !== 0) { + writer.uint32(72).uint64(message.prev); + } + if (message.next !== 0) { + writer.uint32(80).uint64(message.next); + } + if (message.beg_time !== 0) { + writer.uint32(88).int64(message.beg_time); + } + if (message.upd_time !== 0) { + writer.uint32(96).int64(message.upd_time); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Order { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOrder } as Order; + message.rate = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + case 2: + message.owner = reader.string(); + break; + case 3: + message.status = reader.string(); + break; + case 4: + message.orderType = reader.string(); + break; + case 5: + message.denomAsk = reader.string(); + break; + case 6: + message.denomBid = reader.string(); + break; + case 7: + message.amount = reader.string(); + break; + case 8: + message.rate.push(reader.string()); + break; + case 9: + message.prev = longToNumber(reader.uint64() as Long); + break; + case 10: + message.next = longToNumber(reader.uint64() as Long); + break; + case 11: + message.beg_time = longToNumber(reader.int64() as Long); + break; + case 12: + message.upd_time = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Order { + const message = { ...baseOrder } as Order; + message.rate = []; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + if (object.owner !== undefined && object.owner !== null) { + message.owner = String(object.owner); + } else { + message.owner = ""; + } + if (object.status !== undefined && object.status !== null) { + message.status = String(object.status); + } else { + message.status = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = String(object.orderType); + } else { + message.orderType = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = String(object.denomAsk); + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = String(object.denomBid); + } else { + message.denomBid = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(String(e)); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = Number(object.prev); + } else { + message.prev = 0; + } + if (object.next !== undefined && object.next !== null) { + message.next = Number(object.next); + } else { + message.next = 0; + } + if (object.beg_time !== undefined && object.beg_time !== null) { + message.beg_time = Number(object.beg_time); + } else { + message.beg_time = 0; + } + if (object.upd_time !== undefined && object.upd_time !== null) { + message.upd_time = Number(object.upd_time); + } else { + message.upd_time = 0; + } + return message; + }, + + toJSON(message: Order): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + message.owner !== undefined && (obj.owner = message.owner); + message.status !== undefined && (obj.status = message.status); + message.orderType !== undefined && (obj.orderType = message.orderType); + message.denomAsk !== undefined && (obj.denomAsk = message.denomAsk); + message.denomBid !== undefined && (obj.denomBid = message.denomBid); + message.amount !== undefined && (obj.amount = message.amount); + if (message.rate) { + obj.rate = message.rate.map((e) => e); + } else { + obj.rate = []; + } + message.prev !== undefined && (obj.prev = message.prev); + message.next !== undefined && (obj.next = message.next); + message.beg_time !== undefined && (obj.beg_time = message.beg_time); + message.upd_time !== undefined && (obj.upd_time = message.upd_time); + return obj; + }, + + fromPartial(object: DeepPartial): Order { + const message = { ...baseOrder } as Order; + message.rate = []; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + if (object.owner !== undefined && object.owner !== null) { + message.owner = object.owner; + } else { + message.owner = ""; + } + if (object.status !== undefined && object.status !== null) { + message.status = object.status; + } else { + message.status = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = object.orderType; + } else { + message.orderType = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = object.denomAsk; + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = object.denomBid; + } else { + message.denomBid = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(e); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = object.prev; + } else { + message.prev = 0; + } + if (object.next !== undefined && object.next !== null) { + message.next = object.next; + } else { + message.next = 0; + } + if (object.beg_time !== undefined && object.beg_time !== null) { + message.beg_time = object.beg_time; + } else { + message.beg_time = 0; + } + if (object.upd_time !== undefined && object.upd_time !== null) { + message.upd_time = object.upd_time; + } else { + message.upd_time = 0; + } + return message; + }, +}; + +const baseOrders: object = { uids: 0 }; + +export const Orders = { + encode(message: Orders, writer: Writer = Writer.create()): Writer { + writer.uint32(10).fork(); + for (const v of message.uids) { + writer.uint64(v); + } + writer.ldelim(); + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Orders { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOrders } as Orders; + message.uids = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.uids.push(longToNumber(reader.uint64() as Long)); + } + } else { + message.uids.push(longToNumber(reader.uint64() as Long)); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Orders { + const message = { ...baseOrders } as Orders; + message.uids = []; + if (object.uids !== undefined && object.uids !== null) { + for (const e of object.uids) { + message.uids.push(Number(e)); + } + } + return message; + }, + + toJSON(message: Orders): unknown { + const obj: any = {}; + if (message.uids) { + obj.uids = message.uids.map((e) => e); + } else { + obj.uids = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): Orders { + const message = { ...baseOrders } as Orders; + message.uids = []; + if (object.uids !== undefined && object.uids !== null) { + for (const e of object.uids) { + message.uids.push(e); + } + } + return message; + }, +}; + +const baseOrderResponse: object = { + uid: 0, + owner: "", + status: "", + orderType: "", + denomAsk: "", + denomBid: "", + amount: "", + rate: "", + prev: 0, + next: 0, + beg_time: 0, + upd_time: 0, +}; + +export const OrderResponse = { + encode(message: OrderResponse, writer: Writer = Writer.create()): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + if (message.owner !== "") { + writer.uint32(18).string(message.owner); + } + if (message.status !== "") { + writer.uint32(26).string(message.status); + } + if (message.orderType !== "") { + writer.uint32(34).string(message.orderType); + } + if (message.denomAsk !== "") { + writer.uint32(42).string(message.denomAsk); + } + if (message.denomBid !== "") { + writer.uint32(50).string(message.denomBid); + } + if (message.amount !== "") { + writer.uint32(58).string(message.amount); + } + for (const v of message.rate) { + writer.uint32(66).string(v!); + } + if (message.prev !== 0) { + writer.uint32(72).uint64(message.prev); + } + if (message.next !== 0) { + writer.uint32(80).uint64(message.next); + } + if (message.beg_time !== 0) { + writer.uint32(88).int64(message.beg_time); + } + if (message.upd_time !== 0) { + writer.uint32(96).int64(message.upd_time); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): OrderResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOrderResponse } as OrderResponse; + message.rate = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + case 2: + message.owner = reader.string(); + break; + case 3: + message.status = reader.string(); + break; + case 4: + message.orderType = reader.string(); + break; + case 5: + message.denomAsk = reader.string(); + break; + case 6: + message.denomBid = reader.string(); + break; + case 7: + message.amount = reader.string(); + break; + case 8: + message.rate.push(reader.string()); + break; + case 9: + message.prev = longToNumber(reader.uint64() as Long); + break; + case 10: + message.next = longToNumber(reader.uint64() as Long); + break; + case 11: + message.beg_time = longToNumber(reader.int64() as Long); + break; + case 12: + message.upd_time = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OrderResponse { + const message = { ...baseOrderResponse } as OrderResponse; + message.rate = []; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + if (object.owner !== undefined && object.owner !== null) { + message.owner = String(object.owner); + } else { + message.owner = ""; + } + if (object.status !== undefined && object.status !== null) { + message.status = String(object.status); + } else { + message.status = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = String(object.orderType); + } else { + message.orderType = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = String(object.denomAsk); + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = String(object.denomBid); + } else { + message.denomBid = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(String(e)); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = Number(object.prev); + } else { + message.prev = 0; + } + if (object.next !== undefined && object.next !== null) { + message.next = Number(object.next); + } else { + message.next = 0; + } + if (object.beg_time !== undefined && object.beg_time !== null) { + message.beg_time = Number(object.beg_time); + } else { + message.beg_time = 0; + } + if (object.upd_time !== undefined && object.upd_time !== null) { + message.upd_time = Number(object.upd_time); + } else { + message.upd_time = 0; + } + return message; + }, + + toJSON(message: OrderResponse): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + message.owner !== undefined && (obj.owner = message.owner); + message.status !== undefined && (obj.status = message.status); + message.orderType !== undefined && (obj.orderType = message.orderType); + message.denomAsk !== undefined && (obj.denomAsk = message.denomAsk); + message.denomBid !== undefined && (obj.denomBid = message.denomBid); + message.amount !== undefined && (obj.amount = message.amount); + if (message.rate) { + obj.rate = message.rate.map((e) => e); + } else { + obj.rate = []; + } + message.prev !== undefined && (obj.prev = message.prev); + message.next !== undefined && (obj.next = message.next); + message.beg_time !== undefined && (obj.beg_time = message.beg_time); + message.upd_time !== undefined && (obj.upd_time = message.upd_time); + return obj; + }, + + fromPartial(object: DeepPartial): OrderResponse { + const message = { ...baseOrderResponse } as OrderResponse; + message.rate = []; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + if (object.owner !== undefined && object.owner !== null) { + message.owner = object.owner; + } else { + message.owner = ""; + } + if (object.status !== undefined && object.status !== null) { + message.status = object.status; + } else { + message.status = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = object.orderType; + } else { + message.orderType = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = object.denomAsk; + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = object.denomBid; + } else { + message.denomBid = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(e); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = object.prev; + } else { + message.prev = 0; + } + if (object.next !== undefined && object.next !== null) { + message.next = object.next; + } else { + message.next = 0; + } + if (object.beg_time !== undefined && object.beg_time !== null) { + message.beg_time = object.beg_time; + } else { + message.beg_time = 0; + } + if (object.upd_time !== undefined && object.upd_time !== null) { + message.upd_time = object.upd_time; + } else { + message.upd_time = 0; + } + return message; + }, +}; + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/params.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/params.ts new file mode 100644 index 00000000..ef9339ff --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/params.ts @@ -0,0 +1,141 @@ +/* eslint-disable */ +import { Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +/** Params defines the parameters for the module. */ +export interface Params { + /** + * leader earnings rates + * 1,2,3 Comma separated, no space + */ + earn_rates: string; + /** pool burning rate */ + burn_rate: string; + /** burn coin */ + burn_coin: string; + /** market_fee (parameter / 10000), 9999 representing as 99.99% */ + market_fee: string; +} + +const baseParams: object = { + earn_rates: "", + burn_rate: "", + burn_coin: "", + market_fee: "", +}; + +export const Params = { + encode(message: Params, writer: Writer = Writer.create()): Writer { + if (message.earn_rates !== "") { + writer.uint32(10).string(message.earn_rates); + } + if (message.burn_rate !== "") { + writer.uint32(18).string(message.burn_rate); + } + if (message.burn_coin !== "") { + writer.uint32(26).string(message.burn_coin); + } + if (message.market_fee !== "") { + writer.uint32(34).string(message.market_fee); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Params { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseParams } as Params; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.earn_rates = reader.string(); + break; + case 2: + message.burn_rate = reader.string(); + break; + case 3: + message.burn_coin = reader.string(); + break; + case 4: + message.market_fee = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Params { + const message = { ...baseParams } as Params; + if (object.earn_rates !== undefined && object.earn_rates !== null) { + message.earn_rates = String(object.earn_rates); + } else { + message.earn_rates = ""; + } + if (object.burn_rate !== undefined && object.burn_rate !== null) { + message.burn_rate = String(object.burn_rate); + } else { + message.burn_rate = ""; + } + if (object.burn_coin !== undefined && object.burn_coin !== null) { + message.burn_coin = String(object.burn_coin); + } else { + message.burn_coin = ""; + } + if (object.market_fee !== undefined && object.market_fee !== null) { + message.market_fee = String(object.market_fee); + } else { + message.market_fee = ""; + } + return message; + }, + + toJSON(message: Params): unknown { + const obj: any = {}; + message.earn_rates !== undefined && (obj.earn_rates = message.earn_rates); + message.burn_rate !== undefined && (obj.burn_rate = message.burn_rate); + message.burn_coin !== undefined && (obj.burn_coin = message.burn_coin); + message.market_fee !== undefined && (obj.market_fee = message.market_fee); + return obj; + }, + + fromPartial(object: DeepPartial): Params { + const message = { ...baseParams } as Params; + if (object.earn_rates !== undefined && object.earn_rates !== null) { + message.earn_rates = object.earn_rates; + } else { + message.earn_rates = ""; + } + if (object.burn_rate !== undefined && object.burn_rate !== null) { + message.burn_rate = object.burn_rate; + } else { + message.burn_rate = ""; + } + if (object.burn_coin !== undefined && object.burn_coin !== null) { + message.burn_coin = object.burn_coin; + } else { + message.burn_coin = ""; + } + if (object.market_fee !== undefined && object.market_fee !== null) { + message.market_fee = object.market_fee; + } else { + message.market_fee = ""; + } + return message; + }, +}; + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/pool.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/pool.ts new file mode 100644 index 00000000..72676705 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/pool.ts @@ -0,0 +1,398 @@ +/* eslint-disable */ +import * as Long from "long"; +import { util, configure, Writer, Reader } from "protobufjs/minimal"; + +export const protobufPackage = "pendulumlabs.market.market"; + +export interface Pool { + pair: string; + denom1: string; + denom2: string; + volume1: Volume | undefined; + volume2: Volume | undefined; + leaders: Leader[]; + drops: string; + history: number; +} + +export interface Leader { + address: string; + drops: string; +} + +export interface Volume { + denom: string; + amount: string; +} + +const basePool: object = { + pair: "", + denom1: "", + denom2: "", + drops: "", + history: 0, +}; + +export const Pool = { + encode(message: Pool, writer: Writer = Writer.create()): Writer { + if (message.pair !== "") { + writer.uint32(10).string(message.pair); + } + if (message.denom1 !== "") { + writer.uint32(18).string(message.denom1); + } + if (message.denom2 !== "") { + writer.uint32(26).string(message.denom2); + } + if (message.volume1 !== undefined) { + Volume.encode(message.volume1, writer.uint32(34).fork()).ldelim(); + } + if (message.volume2 !== undefined) { + Volume.encode(message.volume2, writer.uint32(42).fork()).ldelim(); + } + for (const v of message.leaders) { + Leader.encode(v!, writer.uint32(50).fork()).ldelim(); + } + if (message.drops !== "") { + writer.uint32(58).string(message.drops); + } + if (message.history !== 0) { + writer.uint32(64).uint64(message.history); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Pool { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePool } as Pool; + message.leaders = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pair = reader.string(); + break; + case 2: + message.denom1 = reader.string(); + break; + case 3: + message.denom2 = reader.string(); + break; + case 4: + message.volume1 = Volume.decode(reader, reader.uint32()); + break; + case 5: + message.volume2 = Volume.decode(reader, reader.uint32()); + break; + case 6: + message.leaders.push(Leader.decode(reader, reader.uint32())); + break; + case 7: + message.drops = reader.string(); + break; + case 8: + message.history = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Pool { + const message = { ...basePool } as Pool; + message.leaders = []; + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.denom1 !== undefined && object.denom1 !== null) { + message.denom1 = String(object.denom1); + } else { + message.denom1 = ""; + } + if (object.denom2 !== undefined && object.denom2 !== null) { + message.denom2 = String(object.denom2); + } else { + message.denom2 = ""; + } + if (object.volume1 !== undefined && object.volume1 !== null) { + message.volume1 = Volume.fromJSON(object.volume1); + } else { + message.volume1 = undefined; + } + if (object.volume2 !== undefined && object.volume2 !== null) { + message.volume2 = Volume.fromJSON(object.volume2); + } else { + message.volume2 = undefined; + } + if (object.leaders !== undefined && object.leaders !== null) { + for (const e of object.leaders) { + message.leaders.push(Leader.fromJSON(e)); + } + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = String(object.drops); + } else { + message.drops = ""; + } + if (object.history !== undefined && object.history !== null) { + message.history = Number(object.history); + } else { + message.history = 0; + } + return message; + }, + + toJSON(message: Pool): unknown { + const obj: any = {}; + message.pair !== undefined && (obj.pair = message.pair); + message.denom1 !== undefined && (obj.denom1 = message.denom1); + message.denom2 !== undefined && (obj.denom2 = message.denom2); + message.volume1 !== undefined && + (obj.volume1 = message.volume1 + ? Volume.toJSON(message.volume1) + : undefined); + message.volume2 !== undefined && + (obj.volume2 = message.volume2 + ? Volume.toJSON(message.volume2) + : undefined); + if (message.leaders) { + obj.leaders = message.leaders.map((e) => + e ? Leader.toJSON(e) : undefined + ); + } else { + obj.leaders = []; + } + message.drops !== undefined && (obj.drops = message.drops); + message.history !== undefined && (obj.history = message.history); + return obj; + }, + + fromPartial(object: DeepPartial): Pool { + const message = { ...basePool } as Pool; + message.leaders = []; + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.denom1 !== undefined && object.denom1 !== null) { + message.denom1 = object.denom1; + } else { + message.denom1 = ""; + } + if (object.denom2 !== undefined && object.denom2 !== null) { + message.denom2 = object.denom2; + } else { + message.denom2 = ""; + } + if (object.volume1 !== undefined && object.volume1 !== null) { + message.volume1 = Volume.fromPartial(object.volume1); + } else { + message.volume1 = undefined; + } + if (object.volume2 !== undefined && object.volume2 !== null) { + message.volume2 = Volume.fromPartial(object.volume2); + } else { + message.volume2 = undefined; + } + if (object.leaders !== undefined && object.leaders !== null) { + for (const e of object.leaders) { + message.leaders.push(Leader.fromPartial(e)); + } + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = object.drops; + } else { + message.drops = ""; + } + if (object.history !== undefined && object.history !== null) { + message.history = object.history; + } else { + message.history = 0; + } + return message; + }, +}; + +const baseLeader: object = { address: "", drops: "" }; + +export const Leader = { + encode(message: Leader, writer: Writer = Writer.create()): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.drops !== "") { + writer.uint32(18).string(message.drops); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Leader { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLeader } as Leader; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.drops = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Leader { + const message = { ...baseLeader } as Leader; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = String(object.drops); + } else { + message.drops = ""; + } + return message; + }, + + toJSON(message: Leader): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.drops !== undefined && (obj.drops = message.drops); + return obj; + }, + + fromPartial(object: DeepPartial): Leader { + const message = { ...baseLeader } as Leader; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = object.drops; + } else { + message.drops = ""; + } + return message; + }, +}; + +const baseVolume: object = { denom: "", amount: "" }; + +export const Volume = { + encode(message: Volume, writer: Writer = Writer.create()): Writer { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + if (message.amount !== "") { + writer.uint32(18).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): Volume { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseVolume } as Volume; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom = reader.string(); + break; + case 2: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Volume { + const message = { ...baseVolume } as Volume; + if (object.denom !== undefined && object.denom !== null) { + message.denom = String(object.denom); + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: Volume): unknown { + const obj: any = {}; + message.denom !== undefined && (obj.denom = message.denom); + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): Volume { + const message = { ...baseVolume } as Volume; + if (object.denom !== undefined && object.denom !== null) { + message.denom = object.denom; + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/query.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/query.ts new file mode 100644 index 00000000..aae55ab8 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/query.ts @@ -0,0 +1,4916 @@ +/* eslint-disable */ +import { Reader, util, configure, Writer } from "protobufjs/minimal"; +import * as Long from "long"; +import { Params } from "../market/params"; +import { Pool, Volume } from "../market/pool"; +import { + PageRequest, + PageResponse, +} from "../cosmos/base/query/v1beta1/pagination"; +import { Drop } from "../market/drop"; +import { Member } from "../market/member"; +import { Burnings } from "../market/burnings"; +import { Order, Orders, OrderResponse } from "../market/order"; + +export const protobufPackage = "pendulumlabs.market.market"; + +/** QueryParamsRequest is request type for the Query/Params RPC method. */ +export interface QueryParamsRequest {} + +/** QueryParamsResponse is response type for the Query/Params RPC method. */ +export interface QueryParamsResponse { + /** params holds all the parameters of this module. */ + params: Params | undefined; +} + +export interface QueryGetPoolRequest { + pair: string; +} + +export interface QueryGetPoolResponse { + pool: Pool | undefined; +} + +export interface QueryAllPoolRequest { + pagination: PageRequest | undefined; +} + +export interface QueryAllPoolResponse { + pool: Pool[]; + pagination: PageResponse | undefined; +} + +export interface QueryVolumeRequest { + denom: string; +} + +export interface QueryVolumeResponse { + amount: string; +} + +export interface QueryAllVolumeRequest { + pagination: PageRequest | undefined; +} + +export interface QueryAllVolumeResponse { + volumes: Volume[]; + pagination: PageResponse | undefined; +} + +export interface QueryBurnedRequest {} + +export interface QueryBurnedResponse { + denom: string; + amount: string; +} + +export interface QueryDropRequest { + uid: number; +} + +export interface QueryDropCoinRequest { + denomA: string; + denomB: string; + amountA: string; +} + +export interface QueryDropCoinResponse { + drops: string; + amountB: string; +} + +export interface QueryDropResponse { + drop: Drop | undefined; +} + +export interface QueryDropAmountsRequest { + uid: number; +} + +export interface QueryDropAmountsResponse { + denom1: string; + denom2: string; + amount1: string; + amount2: string; +} + +export interface QueryDropsToCoinsRequest { + pair: string; + drops: string; +} + +export interface QueryDropPairsRequest { + address: string; +} + +export interface QueryDropPairsResponse { + pairs: string[]; +} + +export interface QueryDropOwnerPairRequest { + address: string; + pair: string; + pagination: PageRequest | undefined; +} + +export interface QueryDropOwnerPairSumRequest { + address: string; + pair: string; +} + +export interface QueryDropOwnerPairSumResponse { + sum: string; +} + +export interface QueryDropOwnerPairUidsRequest { + address: string; + pair: string; + pagination: PageRequest | undefined; +} + +export interface QueryUidsResponse { + uids: number[]; + pagination: PageResponse | undefined; +} + +export interface QueryDropOwnerPairDetailRequest { + address: string; + pair: string; + pagination: PageRequest | undefined; +} + +export interface QueryAllDropRequest { + pagination: PageRequest | undefined; +} + +export interface QueryDropsResponse { + drops: Drop[]; + pagination: PageResponse | undefined; +} + +export interface QueryGetMemberRequest { + denomA: string; + denomB: string; +} + +export interface QueryGetMemberResponse { + member: Member | undefined; +} + +export interface QueryAllMemberRequest { + pagination: PageRequest | undefined; +} + +export interface QueryAllMemberResponse { + member: Member[]; + pagination: PageResponse | undefined; +} + +export interface QueryGetBurningsRequest { + denom: string; +} + +export interface QueryGetBurningsResponse { + burnings: Burnings | undefined; +} + +export interface QueryAllBurningsRequest { + pagination: PageRequest | undefined; +} + +export interface QueryAllBurningsResponse { + burnings: Burnings[]; + pagination: PageResponse | undefined; +} + +export interface QueryOrderRequest { + uid: number; +} + +export interface QueryOrderResponse { + order: Order | undefined; +} + +export interface QueryOrdersResponse { + orders: Order[]; + pagination: PageResponse | undefined; +} + +export interface QueryAllOrderRequest { + pagination: PageRequest | undefined; +} + +export interface QueryOrderOwnerRequest { + address: string; + pagination: PageRequest | undefined; +} + +export interface QueryOrderOwnerUidsResponse { + orders: Orders | undefined; + pagination: PageResponse | undefined; +} + +export interface QueryOrderOwnerPairRequest { + address: string; + pair: string; + pagination: PageRequest | undefined; +} + +export interface QueryOrderOwnerPairResponse { + order: Order[]; + pagination: PageResponse | undefined; +} + +export interface QueryBookRequest { + denomA: string; + denomB: string; + orderType: string; + pagination: PageRequest | undefined; +} + +export interface QueryBookResponse { + book: OrderResponse[]; + pagination: PageResponse | undefined; +} + +export interface QueryBookendsRequest { + coinA: string; + coinB: string; + orderType: string; + rate: string[]; +} + +export interface QueryBookendsResponse { + coinA: string; + coinB: string; + orderType: string; + rate: string[]; + prev: number; + next: number; +} + +export interface QueryHistoryRequest { + pair: string; + length: string; + pagination: PageRequest | undefined; +} + +export interface QueryHistoryResponse { + history: OrderResponse[]; + pagination: PageResponse | undefined; +} + +/** Market Quote: denom is the denom that is input for quote */ +export interface QueryQuoteRequest { + denomAsk: string; + denomBid: string; + denomAmount: string; + amount: string; +} + +export interface QueryQuoteResponse { + denom: string; + amount: string; +} + +const baseQueryParamsRequest: object = {}; + +export const QueryParamsRequest = { + encode(_: QueryParamsRequest, writer: Writer = Writer.create()): Writer { + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryParamsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryParamsRequest } as QueryParamsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): QueryParamsRequest { + const message = { ...baseQueryParamsRequest } as QueryParamsRequest; + return message; + }, + + toJSON(_: QueryParamsRequest): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial(_: DeepPartial): QueryParamsRequest { + const message = { ...baseQueryParamsRequest } as QueryParamsRequest; + return message; + }, +}; + +const baseQueryParamsResponse: object = {}; + +export const QueryParamsResponse = { + encode( + message: QueryParamsResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryParamsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryParamsResponse } as QueryParamsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.params = Params.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryParamsResponse { + const message = { ...baseQueryParamsResponse } as QueryParamsResponse; + if (object.params !== undefined && object.params !== null) { + message.params = Params.fromJSON(object.params); + } else { + message.params = undefined; + } + return message; + }, + + toJSON(message: QueryParamsResponse): unknown { + const obj: any = {}; + message.params !== undefined && + (obj.params = message.params ? Params.toJSON(message.params) : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryParamsResponse { + const message = { ...baseQueryParamsResponse } as QueryParamsResponse; + if (object.params !== undefined && object.params !== null) { + message.params = Params.fromPartial(object.params); + } else { + message.params = undefined; + } + return message; + }, +}; + +const baseQueryGetPoolRequest: object = { pair: "" }; + +export const QueryGetPoolRequest = { + encode( + message: QueryGetPoolRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pair !== "") { + writer.uint32(10).string(message.pair); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryGetPoolRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryGetPoolRequest } as QueryGetPoolRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pair = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryGetPoolRequest { + const message = { ...baseQueryGetPoolRequest } as QueryGetPoolRequest; + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + return message; + }, + + toJSON(message: QueryGetPoolRequest): unknown { + const obj: any = {}; + message.pair !== undefined && (obj.pair = message.pair); + return obj; + }, + + fromPartial(object: DeepPartial): QueryGetPoolRequest { + const message = { ...baseQueryGetPoolRequest } as QueryGetPoolRequest; + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + return message; + }, +}; + +const baseQueryGetPoolResponse: object = {}; + +export const QueryGetPoolResponse = { + encode( + message: QueryGetPoolResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.pool !== undefined) { + Pool.encode(message.pool, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryGetPoolResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryGetPoolResponse } as QueryGetPoolResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pool = Pool.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryGetPoolResponse { + const message = { ...baseQueryGetPoolResponse } as QueryGetPoolResponse; + if (object.pool !== undefined && object.pool !== null) { + message.pool = Pool.fromJSON(object.pool); + } else { + message.pool = undefined; + } + return message; + }, + + toJSON(message: QueryGetPoolResponse): unknown { + const obj: any = {}; + message.pool !== undefined && + (obj.pool = message.pool ? Pool.toJSON(message.pool) : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryGetPoolResponse { + const message = { ...baseQueryGetPoolResponse } as QueryGetPoolResponse; + if (object.pool !== undefined && object.pool !== null) { + message.pool = Pool.fromPartial(object.pool); + } else { + message.pool = undefined; + } + return message; + }, +}; + +const baseQueryAllPoolRequest: object = {}; + +export const QueryAllPoolRequest = { + encode( + message: QueryAllPoolRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllPoolRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllPoolRequest } as QueryAllPoolRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllPoolRequest { + const message = { ...baseQueryAllPoolRequest } as QueryAllPoolRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllPoolRequest): unknown { + const obj: any = {}; + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryAllPoolRequest { + const message = { ...baseQueryAllPoolRequest } as QueryAllPoolRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryAllPoolResponse: object = {}; + +export const QueryAllPoolResponse = { + encode( + message: QueryAllPoolResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.pool) { + Pool.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllPoolResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllPoolResponse } as QueryAllPoolResponse; + message.pool = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pool.push(Pool.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllPoolResponse { + const message = { ...baseQueryAllPoolResponse } as QueryAllPoolResponse; + message.pool = []; + if (object.pool !== undefined && object.pool !== null) { + for (const e of object.pool) { + message.pool.push(Pool.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllPoolResponse): unknown { + const obj: any = {}; + if (message.pool) { + obj.pool = message.pool.map((e) => (e ? Pool.toJSON(e) : undefined)); + } else { + obj.pool = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryAllPoolResponse { + const message = { ...baseQueryAllPoolResponse } as QueryAllPoolResponse; + message.pool = []; + if (object.pool !== undefined && object.pool !== null) { + for (const e of object.pool) { + message.pool.push(Pool.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryVolumeRequest: object = { denom: "" }; + +export const QueryVolumeRequest = { + encode( + message: QueryVolumeRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryVolumeRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryVolumeRequest } as QueryVolumeRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryVolumeRequest { + const message = { ...baseQueryVolumeRequest } as QueryVolumeRequest; + if (object.denom !== undefined && object.denom !== null) { + message.denom = String(object.denom); + } else { + message.denom = ""; + } + return message; + }, + + toJSON(message: QueryVolumeRequest): unknown { + const obj: any = {}; + message.denom !== undefined && (obj.denom = message.denom); + return obj; + }, + + fromPartial(object: DeepPartial): QueryVolumeRequest { + const message = { ...baseQueryVolumeRequest } as QueryVolumeRequest; + if (object.denom !== undefined && object.denom !== null) { + message.denom = object.denom; + } else { + message.denom = ""; + } + return message; + }, +}; + +const baseQueryVolumeResponse: object = { amount: "" }; + +export const QueryVolumeResponse = { + encode( + message: QueryVolumeResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.amount !== "") { + writer.uint32(10).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryVolumeResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryVolumeResponse } as QueryVolumeResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryVolumeResponse { + const message = { ...baseQueryVolumeResponse } as QueryVolumeResponse; + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: QueryVolumeResponse): unknown { + const obj: any = {}; + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): QueryVolumeResponse { + const message = { ...baseQueryVolumeResponse } as QueryVolumeResponse; + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +const baseQueryAllVolumeRequest: object = {}; + +export const QueryAllVolumeRequest = { + encode( + message: QueryAllVolumeRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllVolumeRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllVolumeRequest } as QueryAllVolumeRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllVolumeRequest { + const message = { ...baseQueryAllVolumeRequest } as QueryAllVolumeRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllVolumeRequest): unknown { + const obj: any = {}; + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryAllVolumeRequest { + const message = { ...baseQueryAllVolumeRequest } as QueryAllVolumeRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryAllVolumeResponse: object = {}; + +export const QueryAllVolumeResponse = { + encode( + message: QueryAllVolumeResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.volumes) { + Volume.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllVolumeResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllVolumeResponse } as QueryAllVolumeResponse; + message.volumes = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.volumes.push(Volume.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllVolumeResponse { + const message = { ...baseQueryAllVolumeResponse } as QueryAllVolumeResponse; + message.volumes = []; + if (object.volumes !== undefined && object.volumes !== null) { + for (const e of object.volumes) { + message.volumes.push(Volume.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllVolumeResponse): unknown { + const obj: any = {}; + if (message.volumes) { + obj.volumes = message.volumes.map((e) => + e ? Volume.toJSON(e) : undefined + ); + } else { + obj.volumes = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryAllVolumeResponse { + const message = { ...baseQueryAllVolumeResponse } as QueryAllVolumeResponse; + message.volumes = []; + if (object.volumes !== undefined && object.volumes !== null) { + for (const e of object.volumes) { + message.volumes.push(Volume.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryBurnedRequest: object = {}; + +export const QueryBurnedRequest = { + encode(_: QueryBurnedRequest, writer: Writer = Writer.create()): Writer { + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryBurnedRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryBurnedRequest } as QueryBurnedRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): QueryBurnedRequest { + const message = { ...baseQueryBurnedRequest } as QueryBurnedRequest; + return message; + }, + + toJSON(_: QueryBurnedRequest): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial(_: DeepPartial): QueryBurnedRequest { + const message = { ...baseQueryBurnedRequest } as QueryBurnedRequest; + return message; + }, +}; + +const baseQueryBurnedResponse: object = { denom: "", amount: "" }; + +export const QueryBurnedResponse = { + encode( + message: QueryBurnedResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + if (message.amount !== "") { + writer.uint32(18).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryBurnedResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryBurnedResponse } as QueryBurnedResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom = reader.string(); + break; + case 2: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryBurnedResponse { + const message = { ...baseQueryBurnedResponse } as QueryBurnedResponse; + if (object.denom !== undefined && object.denom !== null) { + message.denom = String(object.denom); + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: QueryBurnedResponse): unknown { + const obj: any = {}; + message.denom !== undefined && (obj.denom = message.denom); + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): QueryBurnedResponse { + const message = { ...baseQueryBurnedResponse } as QueryBurnedResponse; + if (object.denom !== undefined && object.denom !== null) { + message.denom = object.denom; + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +const baseQueryDropRequest: object = { uid: 0 }; + +export const QueryDropRequest = { + encode(message: QueryDropRequest, writer: Writer = Writer.create()): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropRequest } as QueryDropRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropRequest { + const message = { ...baseQueryDropRequest } as QueryDropRequest; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + return message; + }, + + toJSON(message: QueryDropRequest): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + return obj; + }, + + fromPartial(object: DeepPartial): QueryDropRequest { + const message = { ...baseQueryDropRequest } as QueryDropRequest; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + return message; + }, +}; + +const baseQueryDropCoinRequest: object = { + denomA: "", + denomB: "", + amountA: "", +}; + +export const QueryDropCoinRequest = { + encode( + message: QueryDropCoinRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.denomA !== "") { + writer.uint32(10).string(message.denomA); + } + if (message.denomB !== "") { + writer.uint32(18).string(message.denomB); + } + if (message.amountA !== "") { + writer.uint32(26).string(message.amountA); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropCoinRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropCoinRequest } as QueryDropCoinRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denomA = reader.string(); + break; + case 2: + message.denomB = reader.string(); + break; + case 3: + message.amountA = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropCoinRequest { + const message = { ...baseQueryDropCoinRequest } as QueryDropCoinRequest; + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = String(object.denomA); + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = String(object.denomB); + } else { + message.denomB = ""; + } + if (object.amountA !== undefined && object.amountA !== null) { + message.amountA = String(object.amountA); + } else { + message.amountA = ""; + } + return message; + }, + + toJSON(message: QueryDropCoinRequest): unknown { + const obj: any = {}; + message.denomA !== undefined && (obj.denomA = message.denomA); + message.denomB !== undefined && (obj.denomB = message.denomB); + message.amountA !== undefined && (obj.amountA = message.amountA); + return obj; + }, + + fromPartial(object: DeepPartial): QueryDropCoinRequest { + const message = { ...baseQueryDropCoinRequest } as QueryDropCoinRequest; + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = object.denomA; + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = object.denomB; + } else { + message.denomB = ""; + } + if (object.amountA !== undefined && object.amountA !== null) { + message.amountA = object.amountA; + } else { + message.amountA = ""; + } + return message; + }, +}; + +const baseQueryDropCoinResponse: object = { drops: "", amountB: "" }; + +export const QueryDropCoinResponse = { + encode( + message: QueryDropCoinResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.drops !== "") { + writer.uint32(10).string(message.drops); + } + if (message.amountB !== "") { + writer.uint32(18).string(message.amountB); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropCoinResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropCoinResponse } as QueryDropCoinResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.drops = reader.string(); + break; + case 2: + message.amountB = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropCoinResponse { + const message = { ...baseQueryDropCoinResponse } as QueryDropCoinResponse; + if (object.drops !== undefined && object.drops !== null) { + message.drops = String(object.drops); + } else { + message.drops = ""; + } + if (object.amountB !== undefined && object.amountB !== null) { + message.amountB = String(object.amountB); + } else { + message.amountB = ""; + } + return message; + }, + + toJSON(message: QueryDropCoinResponse): unknown { + const obj: any = {}; + message.drops !== undefined && (obj.drops = message.drops); + message.amountB !== undefined && (obj.amountB = message.amountB); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropCoinResponse { + const message = { ...baseQueryDropCoinResponse } as QueryDropCoinResponse; + if (object.drops !== undefined && object.drops !== null) { + message.drops = object.drops; + } else { + message.drops = ""; + } + if (object.amountB !== undefined && object.amountB !== null) { + message.amountB = object.amountB; + } else { + message.amountB = ""; + } + return message; + }, +}; + +const baseQueryDropResponse: object = {}; + +export const QueryDropResponse = { + encode(message: QueryDropResponse, writer: Writer = Writer.create()): Writer { + if (message.drop !== undefined) { + Drop.encode(message.drop, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropResponse } as QueryDropResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.drop = Drop.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropResponse { + const message = { ...baseQueryDropResponse } as QueryDropResponse; + if (object.drop !== undefined && object.drop !== null) { + message.drop = Drop.fromJSON(object.drop); + } else { + message.drop = undefined; + } + return message; + }, + + toJSON(message: QueryDropResponse): unknown { + const obj: any = {}; + message.drop !== undefined && + (obj.drop = message.drop ? Drop.toJSON(message.drop) : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryDropResponse { + const message = { ...baseQueryDropResponse } as QueryDropResponse; + if (object.drop !== undefined && object.drop !== null) { + message.drop = Drop.fromPartial(object.drop); + } else { + message.drop = undefined; + } + return message; + }, +}; + +const baseQueryDropAmountsRequest: object = { uid: 0 }; + +export const QueryDropAmountsRequest = { + encode( + message: QueryDropAmountsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropAmountsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropAmountsRequest, + } as QueryDropAmountsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropAmountsRequest { + const message = { + ...baseQueryDropAmountsRequest, + } as QueryDropAmountsRequest; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + return message; + }, + + toJSON(message: QueryDropAmountsRequest): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropAmountsRequest { + const message = { + ...baseQueryDropAmountsRequest, + } as QueryDropAmountsRequest; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + return message; + }, +}; + +const baseQueryDropAmountsResponse: object = { + denom1: "", + denom2: "", + amount1: "", + amount2: "", +}; + +export const QueryDropAmountsResponse = { + encode( + message: QueryDropAmountsResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.denom1 !== "") { + writer.uint32(10).string(message.denom1); + } + if (message.denom2 !== "") { + writer.uint32(18).string(message.denom2); + } + if (message.amount1 !== "") { + writer.uint32(26).string(message.amount1); + } + if (message.amount2 !== "") { + writer.uint32(34).string(message.amount2); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropAmountsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropAmountsResponse, + } as QueryDropAmountsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom1 = reader.string(); + break; + case 2: + message.denom2 = reader.string(); + break; + case 3: + message.amount1 = reader.string(); + break; + case 4: + message.amount2 = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropAmountsResponse { + const message = { + ...baseQueryDropAmountsResponse, + } as QueryDropAmountsResponse; + if (object.denom1 !== undefined && object.denom1 !== null) { + message.denom1 = String(object.denom1); + } else { + message.denom1 = ""; + } + if (object.denom2 !== undefined && object.denom2 !== null) { + message.denom2 = String(object.denom2); + } else { + message.denom2 = ""; + } + if (object.amount1 !== undefined && object.amount1 !== null) { + message.amount1 = String(object.amount1); + } else { + message.amount1 = ""; + } + if (object.amount2 !== undefined && object.amount2 !== null) { + message.amount2 = String(object.amount2); + } else { + message.amount2 = ""; + } + return message; + }, + + toJSON(message: QueryDropAmountsResponse): unknown { + const obj: any = {}; + message.denom1 !== undefined && (obj.denom1 = message.denom1); + message.denom2 !== undefined && (obj.denom2 = message.denom2); + message.amount1 !== undefined && (obj.amount1 = message.amount1); + message.amount2 !== undefined && (obj.amount2 = message.amount2); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropAmountsResponse { + const message = { + ...baseQueryDropAmountsResponse, + } as QueryDropAmountsResponse; + if (object.denom1 !== undefined && object.denom1 !== null) { + message.denom1 = object.denom1; + } else { + message.denom1 = ""; + } + if (object.denom2 !== undefined && object.denom2 !== null) { + message.denom2 = object.denom2; + } else { + message.denom2 = ""; + } + if (object.amount1 !== undefined && object.amount1 !== null) { + message.amount1 = object.amount1; + } else { + message.amount1 = ""; + } + if (object.amount2 !== undefined && object.amount2 !== null) { + message.amount2 = object.amount2; + } else { + message.amount2 = ""; + } + return message; + }, +}; + +const baseQueryDropsToCoinsRequest: object = { pair: "", drops: "" }; + +export const QueryDropsToCoinsRequest = { + encode( + message: QueryDropsToCoinsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pair !== "") { + writer.uint32(10).string(message.pair); + } + if (message.drops !== "") { + writer.uint32(18).string(message.drops); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropsToCoinsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropsToCoinsRequest, + } as QueryDropsToCoinsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pair = reader.string(); + break; + case 2: + message.drops = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropsToCoinsRequest { + const message = { + ...baseQueryDropsToCoinsRequest, + } as QueryDropsToCoinsRequest; + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = String(object.drops); + } else { + message.drops = ""; + } + return message; + }, + + toJSON(message: QueryDropsToCoinsRequest): unknown { + const obj: any = {}; + message.pair !== undefined && (obj.pair = message.pair); + message.drops !== undefined && (obj.drops = message.drops); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropsToCoinsRequest { + const message = { + ...baseQueryDropsToCoinsRequest, + } as QueryDropsToCoinsRequest; + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = object.drops; + } else { + message.drops = ""; + } + return message; + }, +}; + +const baseQueryDropPairsRequest: object = { address: "" }; + +export const QueryDropPairsRequest = { + encode( + message: QueryDropPairsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropPairsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropPairsRequest } as QueryDropPairsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropPairsRequest { + const message = { ...baseQueryDropPairsRequest } as QueryDropPairsRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + return message; + }, + + toJSON(message: QueryDropPairsRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropPairsRequest { + const message = { ...baseQueryDropPairsRequest } as QueryDropPairsRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + return message; + }, +}; + +const baseQueryDropPairsResponse: object = { pairs: "" }; + +export const QueryDropPairsResponse = { + encode( + message: QueryDropPairsResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.pairs) { + writer.uint32(10).string(v!); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropPairsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropPairsResponse } as QueryDropPairsResponse; + message.pairs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pairs.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropPairsResponse { + const message = { ...baseQueryDropPairsResponse } as QueryDropPairsResponse; + message.pairs = []; + if (object.pairs !== undefined && object.pairs !== null) { + for (const e of object.pairs) { + message.pairs.push(String(e)); + } + } + return message; + }, + + toJSON(message: QueryDropPairsResponse): unknown { + const obj: any = {}; + if (message.pairs) { + obj.pairs = message.pairs.map((e) => e); + } else { + obj.pairs = []; + } + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropPairsResponse { + const message = { ...baseQueryDropPairsResponse } as QueryDropPairsResponse; + message.pairs = []; + if (object.pairs !== undefined && object.pairs !== null) { + for (const e of object.pairs) { + message.pairs.push(e); + } + } + return message; + }, +}; + +const baseQueryDropOwnerPairRequest: object = { address: "", pair: "" }; + +export const QueryDropOwnerPairRequest = { + encode( + message: QueryDropOwnerPairRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.pair !== "") { + writer.uint32(18).string(message.pair); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropOwnerPairRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropOwnerPairRequest, + } as QueryDropOwnerPairRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.pair = reader.string(); + break; + case 3: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropOwnerPairRequest { + const message = { + ...baseQueryDropOwnerPairRequest, + } as QueryDropOwnerPairRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryDropOwnerPairRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.pair !== undefined && (obj.pair = message.pair); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropOwnerPairRequest { + const message = { + ...baseQueryDropOwnerPairRequest, + } as QueryDropOwnerPairRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryDropOwnerPairSumRequest: object = { address: "", pair: "" }; + +export const QueryDropOwnerPairSumRequest = { + encode( + message: QueryDropOwnerPairSumRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.pair !== "") { + writer.uint32(18).string(message.pair); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropOwnerPairSumRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropOwnerPairSumRequest, + } as QueryDropOwnerPairSumRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.pair = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropOwnerPairSumRequest { + const message = { + ...baseQueryDropOwnerPairSumRequest, + } as QueryDropOwnerPairSumRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + return message; + }, + + toJSON(message: QueryDropOwnerPairSumRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.pair !== undefined && (obj.pair = message.pair); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropOwnerPairSumRequest { + const message = { + ...baseQueryDropOwnerPairSumRequest, + } as QueryDropOwnerPairSumRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + return message; + }, +}; + +const baseQueryDropOwnerPairSumResponse: object = { sum: "" }; + +export const QueryDropOwnerPairSumResponse = { + encode( + message: QueryDropOwnerPairSumResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.sum !== "") { + writer.uint32(10).string(message.sum); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropOwnerPairSumResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropOwnerPairSumResponse, + } as QueryDropOwnerPairSumResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sum = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropOwnerPairSumResponse { + const message = { + ...baseQueryDropOwnerPairSumResponse, + } as QueryDropOwnerPairSumResponse; + if (object.sum !== undefined && object.sum !== null) { + message.sum = String(object.sum); + } else { + message.sum = ""; + } + return message; + }, + + toJSON(message: QueryDropOwnerPairSumResponse): unknown { + const obj: any = {}; + message.sum !== undefined && (obj.sum = message.sum); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropOwnerPairSumResponse { + const message = { + ...baseQueryDropOwnerPairSumResponse, + } as QueryDropOwnerPairSumResponse; + if (object.sum !== undefined && object.sum !== null) { + message.sum = object.sum; + } else { + message.sum = ""; + } + return message; + }, +}; + +const baseQueryDropOwnerPairUidsRequest: object = { address: "", pair: "" }; + +export const QueryDropOwnerPairUidsRequest = { + encode( + message: QueryDropOwnerPairUidsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.pair !== "") { + writer.uint32(18).string(message.pair); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropOwnerPairUidsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropOwnerPairUidsRequest, + } as QueryDropOwnerPairUidsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.pair = reader.string(); + break; + case 3: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropOwnerPairUidsRequest { + const message = { + ...baseQueryDropOwnerPairUidsRequest, + } as QueryDropOwnerPairUidsRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryDropOwnerPairUidsRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.pair !== undefined && (obj.pair = message.pair); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropOwnerPairUidsRequest { + const message = { + ...baseQueryDropOwnerPairUidsRequest, + } as QueryDropOwnerPairUidsRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryUidsResponse: object = { uids: 0 }; + +export const QueryUidsResponse = { + encode(message: QueryUidsResponse, writer: Writer = Writer.create()): Writer { + writer.uint32(10).fork(); + for (const v of message.uids) { + writer.uint64(v); + } + writer.ldelim(); + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryUidsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryUidsResponse } as QueryUidsResponse; + message.uids = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.uids.push(longToNumber(reader.uint64() as Long)); + } + } else { + message.uids.push(longToNumber(reader.uint64() as Long)); + } + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryUidsResponse { + const message = { ...baseQueryUidsResponse } as QueryUidsResponse; + message.uids = []; + if (object.uids !== undefined && object.uids !== null) { + for (const e of object.uids) { + message.uids.push(Number(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryUidsResponse): unknown { + const obj: any = {}; + if (message.uids) { + obj.uids = message.uids.map((e) => e); + } else { + obj.uids = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryUidsResponse { + const message = { ...baseQueryUidsResponse } as QueryUidsResponse; + message.uids = []; + if (object.uids !== undefined && object.uids !== null) { + for (const e of object.uids) { + message.uids.push(e); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryDropOwnerPairDetailRequest: object = { address: "", pair: "" }; + +export const QueryDropOwnerPairDetailRequest = { + encode( + message: QueryDropOwnerPairDetailRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.pair !== "") { + writer.uint32(18).string(message.pair); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryDropOwnerPairDetailRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryDropOwnerPairDetailRequest, + } as QueryDropOwnerPairDetailRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.pair = reader.string(); + break; + case 3: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropOwnerPairDetailRequest { + const message = { + ...baseQueryDropOwnerPairDetailRequest, + } as QueryDropOwnerPairDetailRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryDropOwnerPairDetailRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.pair !== undefined && (obj.pair = message.pair); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryDropOwnerPairDetailRequest { + const message = { + ...baseQueryDropOwnerPairDetailRequest, + } as QueryDropOwnerPairDetailRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryAllDropRequest: object = {}; + +export const QueryAllDropRequest = { + encode( + message: QueryAllDropRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllDropRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllDropRequest } as QueryAllDropRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllDropRequest { + const message = { ...baseQueryAllDropRequest } as QueryAllDropRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllDropRequest): unknown { + const obj: any = {}; + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryAllDropRequest { + const message = { ...baseQueryAllDropRequest } as QueryAllDropRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryDropsResponse: object = {}; + +export const QueryDropsResponse = { + encode( + message: QueryDropsResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.drops) { + Drop.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryDropsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryDropsResponse } as QueryDropsResponse; + message.drops = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.drops.push(Drop.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryDropsResponse { + const message = { ...baseQueryDropsResponse } as QueryDropsResponse; + message.drops = []; + if (object.drops !== undefined && object.drops !== null) { + for (const e of object.drops) { + message.drops.push(Drop.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryDropsResponse): unknown { + const obj: any = {}; + if (message.drops) { + obj.drops = message.drops.map((e) => (e ? Drop.toJSON(e) : undefined)); + } else { + obj.drops = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryDropsResponse { + const message = { ...baseQueryDropsResponse } as QueryDropsResponse; + message.drops = []; + if (object.drops !== undefined && object.drops !== null) { + for (const e of object.drops) { + message.drops.push(Drop.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryGetMemberRequest: object = { denomA: "", denomB: "" }; + +export const QueryGetMemberRequest = { + encode( + message: QueryGetMemberRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.denomA !== "") { + writer.uint32(18).string(message.denomA); + } + if (message.denomB !== "") { + writer.uint32(26).string(message.denomB); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryGetMemberRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryGetMemberRequest } as QueryGetMemberRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.denomA = reader.string(); + break; + case 3: + message.denomB = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryGetMemberRequest { + const message = { ...baseQueryGetMemberRequest } as QueryGetMemberRequest; + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = String(object.denomA); + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = String(object.denomB); + } else { + message.denomB = ""; + } + return message; + }, + + toJSON(message: QueryGetMemberRequest): unknown { + const obj: any = {}; + message.denomA !== undefined && (obj.denomA = message.denomA); + message.denomB !== undefined && (obj.denomB = message.denomB); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryGetMemberRequest { + const message = { ...baseQueryGetMemberRequest } as QueryGetMemberRequest; + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = object.denomA; + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = object.denomB; + } else { + message.denomB = ""; + } + return message; + }, +}; + +const baseQueryGetMemberResponse: object = {}; + +export const QueryGetMemberResponse = { + encode( + message: QueryGetMemberResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.member !== undefined) { + Member.encode(message.member, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryGetMemberResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryGetMemberResponse } as QueryGetMemberResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.member = Member.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryGetMemberResponse { + const message = { ...baseQueryGetMemberResponse } as QueryGetMemberResponse; + if (object.member !== undefined && object.member !== null) { + message.member = Member.fromJSON(object.member); + } else { + message.member = undefined; + } + return message; + }, + + toJSON(message: QueryGetMemberResponse): unknown { + const obj: any = {}; + message.member !== undefined && + (obj.member = message.member ? Member.toJSON(message.member) : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryGetMemberResponse { + const message = { ...baseQueryGetMemberResponse } as QueryGetMemberResponse; + if (object.member !== undefined && object.member !== null) { + message.member = Member.fromPartial(object.member); + } else { + message.member = undefined; + } + return message; + }, +}; + +const baseQueryAllMemberRequest: object = {}; + +export const QueryAllMemberRequest = { + encode( + message: QueryAllMemberRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllMemberRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllMemberRequest } as QueryAllMemberRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllMemberRequest { + const message = { ...baseQueryAllMemberRequest } as QueryAllMemberRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllMemberRequest): unknown { + const obj: any = {}; + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryAllMemberRequest { + const message = { ...baseQueryAllMemberRequest } as QueryAllMemberRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryAllMemberResponse: object = {}; + +export const QueryAllMemberResponse = { + encode( + message: QueryAllMemberResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.member) { + Member.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllMemberResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllMemberResponse } as QueryAllMemberResponse; + message.member = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.member.push(Member.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllMemberResponse { + const message = { ...baseQueryAllMemberResponse } as QueryAllMemberResponse; + message.member = []; + if (object.member !== undefined && object.member !== null) { + for (const e of object.member) { + message.member.push(Member.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllMemberResponse): unknown { + const obj: any = {}; + if (message.member) { + obj.member = message.member.map((e) => + e ? Member.toJSON(e) : undefined + ); + } else { + obj.member = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryAllMemberResponse { + const message = { ...baseQueryAllMemberResponse } as QueryAllMemberResponse; + message.member = []; + if (object.member !== undefined && object.member !== null) { + for (const e of object.member) { + message.member.push(Member.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryGetBurningsRequest: object = { denom: "" }; + +export const QueryGetBurningsRequest = { + encode( + message: QueryGetBurningsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryGetBurningsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryGetBurningsRequest, + } as QueryGetBurningsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryGetBurningsRequest { + const message = { + ...baseQueryGetBurningsRequest, + } as QueryGetBurningsRequest; + if (object.denom !== undefined && object.denom !== null) { + message.denom = String(object.denom); + } else { + message.denom = ""; + } + return message; + }, + + toJSON(message: QueryGetBurningsRequest): unknown { + const obj: any = {}; + message.denom !== undefined && (obj.denom = message.denom); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryGetBurningsRequest { + const message = { + ...baseQueryGetBurningsRequest, + } as QueryGetBurningsRequest; + if (object.denom !== undefined && object.denom !== null) { + message.denom = object.denom; + } else { + message.denom = ""; + } + return message; + }, +}; + +const baseQueryGetBurningsResponse: object = {}; + +export const QueryGetBurningsResponse = { + encode( + message: QueryGetBurningsResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.burnings !== undefined) { + Burnings.encode(message.burnings, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryGetBurningsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryGetBurningsResponse, + } as QueryGetBurningsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.burnings = Burnings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryGetBurningsResponse { + const message = { + ...baseQueryGetBurningsResponse, + } as QueryGetBurningsResponse; + if (object.burnings !== undefined && object.burnings !== null) { + message.burnings = Burnings.fromJSON(object.burnings); + } else { + message.burnings = undefined; + } + return message; + }, + + toJSON(message: QueryGetBurningsResponse): unknown { + const obj: any = {}; + message.burnings !== undefined && + (obj.burnings = message.burnings + ? Burnings.toJSON(message.burnings) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryGetBurningsResponse { + const message = { + ...baseQueryGetBurningsResponse, + } as QueryGetBurningsResponse; + if (object.burnings !== undefined && object.burnings !== null) { + message.burnings = Burnings.fromPartial(object.burnings); + } else { + message.burnings = undefined; + } + return message; + }, +}; + +const baseQueryAllBurningsRequest: object = {}; + +export const QueryAllBurningsRequest = { + encode( + message: QueryAllBurningsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllBurningsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryAllBurningsRequest, + } as QueryAllBurningsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllBurningsRequest { + const message = { + ...baseQueryAllBurningsRequest, + } as QueryAllBurningsRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllBurningsRequest): unknown { + const obj: any = {}; + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryAllBurningsRequest { + const message = { + ...baseQueryAllBurningsRequest, + } as QueryAllBurningsRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryAllBurningsResponse: object = {}; + +export const QueryAllBurningsResponse = { + encode( + message: QueryAllBurningsResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.burnings) { + Burnings.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryAllBurningsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryAllBurningsResponse, + } as QueryAllBurningsResponse; + message.burnings = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.burnings.push(Burnings.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllBurningsResponse { + const message = { + ...baseQueryAllBurningsResponse, + } as QueryAllBurningsResponse; + message.burnings = []; + if (object.burnings !== undefined && object.burnings !== null) { + for (const e of object.burnings) { + message.burnings.push(Burnings.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllBurningsResponse): unknown { + const obj: any = {}; + if (message.burnings) { + obj.burnings = message.burnings.map((e) => + e ? Burnings.toJSON(e) : undefined + ); + } else { + obj.burnings = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryAllBurningsResponse { + const message = { + ...baseQueryAllBurningsResponse, + } as QueryAllBurningsResponse; + message.burnings = []; + if (object.burnings !== undefined && object.burnings !== null) { + for (const e of object.burnings) { + message.burnings.push(Burnings.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryOrderRequest: object = { uid: 0 }; + +export const QueryOrderRequest = { + encode(message: QueryOrderRequest, writer: Writer = Writer.create()): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryOrderRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryOrderRequest } as QueryOrderRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrderRequest { + const message = { ...baseQueryOrderRequest } as QueryOrderRequest; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + return message; + }, + + toJSON(message: QueryOrderRequest): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + return obj; + }, + + fromPartial(object: DeepPartial): QueryOrderRequest { + const message = { ...baseQueryOrderRequest } as QueryOrderRequest; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + return message; + }, +}; + +const baseQueryOrderResponse: object = {}; + +export const QueryOrderResponse = { + encode( + message: QueryOrderResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.order !== undefined) { + Order.encode(message.order, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryOrderResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryOrderResponse } as QueryOrderResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.order = Order.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrderResponse { + const message = { ...baseQueryOrderResponse } as QueryOrderResponse; + if (object.order !== undefined && object.order !== null) { + message.order = Order.fromJSON(object.order); + } else { + message.order = undefined; + } + return message; + }, + + toJSON(message: QueryOrderResponse): unknown { + const obj: any = {}; + message.order !== undefined && + (obj.order = message.order ? Order.toJSON(message.order) : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryOrderResponse { + const message = { ...baseQueryOrderResponse } as QueryOrderResponse; + if (object.order !== undefined && object.order !== null) { + message.order = Order.fromPartial(object.order); + } else { + message.order = undefined; + } + return message; + }, +}; + +const baseQueryOrdersResponse: object = {}; + +export const QueryOrdersResponse = { + encode( + message: QueryOrdersResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.orders) { + Order.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryOrdersResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryOrdersResponse } as QueryOrdersResponse; + message.orders = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.orders.push(Order.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrdersResponse { + const message = { ...baseQueryOrdersResponse } as QueryOrdersResponse; + message.orders = []; + if (object.orders !== undefined && object.orders !== null) { + for (const e of object.orders) { + message.orders.push(Order.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryOrdersResponse): unknown { + const obj: any = {}; + if (message.orders) { + obj.orders = message.orders.map((e) => (e ? Order.toJSON(e) : undefined)); + } else { + obj.orders = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryOrdersResponse { + const message = { ...baseQueryOrdersResponse } as QueryOrdersResponse; + message.orders = []; + if (object.orders !== undefined && object.orders !== null) { + for (const e of object.orders) { + message.orders.push(Order.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryAllOrderRequest: object = {}; + +export const QueryAllOrderRequest = { + encode( + message: QueryAllOrderRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryAllOrderRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryAllOrderRequest } as QueryAllOrderRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryAllOrderRequest { + const message = { ...baseQueryAllOrderRequest } as QueryAllOrderRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryAllOrderRequest): unknown { + const obj: any = {}; + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryAllOrderRequest { + const message = { ...baseQueryAllOrderRequest } as QueryAllOrderRequest; + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryOrderOwnerRequest: object = { address: "" }; + +export const QueryOrderOwnerRequest = { + encode( + message: QueryOrderOwnerRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryOrderOwnerRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryOrderOwnerRequest } as QueryOrderOwnerRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrderOwnerRequest { + const message = { ...baseQueryOrderOwnerRequest } as QueryOrderOwnerRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryOrderOwnerRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryOrderOwnerRequest { + const message = { ...baseQueryOrderOwnerRequest } as QueryOrderOwnerRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryOrderOwnerUidsResponse: object = {}; + +export const QueryOrderOwnerUidsResponse = { + encode( + message: QueryOrderOwnerUidsResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.orders !== undefined) { + Orders.encode(message.orders, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryOrderOwnerUidsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryOrderOwnerUidsResponse, + } as QueryOrderOwnerUidsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.orders = Orders.decode(reader, reader.uint32()); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrderOwnerUidsResponse { + const message = { + ...baseQueryOrderOwnerUidsResponse, + } as QueryOrderOwnerUidsResponse; + if (object.orders !== undefined && object.orders !== null) { + message.orders = Orders.fromJSON(object.orders); + } else { + message.orders = undefined; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryOrderOwnerUidsResponse): unknown { + const obj: any = {}; + message.orders !== undefined && + (obj.orders = message.orders ? Orders.toJSON(message.orders) : undefined); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryOrderOwnerUidsResponse { + const message = { + ...baseQueryOrderOwnerUidsResponse, + } as QueryOrderOwnerUidsResponse; + if (object.orders !== undefined && object.orders !== null) { + message.orders = Orders.fromPartial(object.orders); + } else { + message.orders = undefined; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryOrderOwnerPairRequest: object = { address: "", pair: "" }; + +export const QueryOrderOwnerPairRequest = { + encode( + message: QueryOrderOwnerPairRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } + if (message.pair !== "") { + writer.uint32(18).string(message.pair); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryOrderOwnerPairRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryOrderOwnerPairRequest, + } as QueryOrderOwnerPairRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; + case 2: + message.pair = reader.string(); + break; + case 3: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrderOwnerPairRequest { + const message = { + ...baseQueryOrderOwnerPairRequest, + } as QueryOrderOwnerPairRequest; + if (object.address !== undefined && object.address !== null) { + message.address = String(object.address); + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryOrderOwnerPairRequest): unknown { + const obj: any = {}; + message.address !== undefined && (obj.address = message.address); + message.pair !== undefined && (obj.pair = message.pair); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryOrderOwnerPairRequest { + const message = { + ...baseQueryOrderOwnerPairRequest, + } as QueryOrderOwnerPairRequest; + if (object.address !== undefined && object.address !== null) { + message.address = object.address; + } else { + message.address = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryOrderOwnerPairResponse: object = {}; + +export const QueryOrderOwnerPairResponse = { + encode( + message: QueryOrderOwnerPairResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.order) { + Order.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: Reader | Uint8Array, + length?: number + ): QueryOrderOwnerPairResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseQueryOrderOwnerPairResponse, + } as QueryOrderOwnerPairResponse; + message.order = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.order.push(Order.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryOrderOwnerPairResponse { + const message = { + ...baseQueryOrderOwnerPairResponse, + } as QueryOrderOwnerPairResponse; + message.order = []; + if (object.order !== undefined && object.order !== null) { + for (const e of object.order) { + message.order.push(Order.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryOrderOwnerPairResponse): unknown { + const obj: any = {}; + if (message.order) { + obj.order = message.order.map((e) => (e ? Order.toJSON(e) : undefined)); + } else { + obj.order = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryOrderOwnerPairResponse { + const message = { + ...baseQueryOrderOwnerPairResponse, + } as QueryOrderOwnerPairResponse; + message.order = []; + if (object.order !== undefined && object.order !== null) { + for (const e of object.order) { + message.order.push(Order.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryBookRequest: object = { denomA: "", denomB: "", orderType: "" }; + +export const QueryBookRequest = { + encode(message: QueryBookRequest, writer: Writer = Writer.create()): Writer { + if (message.denomA !== "") { + writer.uint32(10).string(message.denomA); + } + if (message.denomB !== "") { + writer.uint32(18).string(message.denomB); + } + if (message.orderType !== "") { + writer.uint32(26).string(message.orderType); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryBookRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryBookRequest } as QueryBookRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denomA = reader.string(); + break; + case 2: + message.denomB = reader.string(); + break; + case 3: + message.orderType = reader.string(); + break; + case 4: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryBookRequest { + const message = { ...baseQueryBookRequest } as QueryBookRequest; + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = String(object.denomA); + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = String(object.denomB); + } else { + message.denomB = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = String(object.orderType); + } else { + message.orderType = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryBookRequest): unknown { + const obj: any = {}; + message.denomA !== undefined && (obj.denomA = message.denomA); + message.denomB !== undefined && (obj.denomB = message.denomB); + message.orderType !== undefined && (obj.orderType = message.orderType); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryBookRequest { + const message = { ...baseQueryBookRequest } as QueryBookRequest; + if (object.denomA !== undefined && object.denomA !== null) { + message.denomA = object.denomA; + } else { + message.denomA = ""; + } + if (object.denomB !== undefined && object.denomB !== null) { + message.denomB = object.denomB; + } else { + message.denomB = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = object.orderType; + } else { + message.orderType = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryBookResponse: object = {}; + +export const QueryBookResponse = { + encode(message: QueryBookResponse, writer: Writer = Writer.create()): Writer { + for (const v of message.book) { + OrderResponse.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryBookResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryBookResponse } as QueryBookResponse; + message.book = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.book.push(OrderResponse.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryBookResponse { + const message = { ...baseQueryBookResponse } as QueryBookResponse; + message.book = []; + if (object.book !== undefined && object.book !== null) { + for (const e of object.book) { + message.book.push(OrderResponse.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryBookResponse): unknown { + const obj: any = {}; + if (message.book) { + obj.book = message.book.map((e) => + e ? OrderResponse.toJSON(e) : undefined + ); + } else { + obj.book = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryBookResponse { + const message = { ...baseQueryBookResponse } as QueryBookResponse; + message.book = []; + if (object.book !== undefined && object.book !== null) { + for (const e of object.book) { + message.book.push(OrderResponse.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryBookendsRequest: object = { + coinA: "", + coinB: "", + orderType: "", + rate: "", +}; + +export const QueryBookendsRequest = { + encode( + message: QueryBookendsRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.coinA !== "") { + writer.uint32(10).string(message.coinA); + } + if (message.coinB !== "") { + writer.uint32(18).string(message.coinB); + } + if (message.orderType !== "") { + writer.uint32(26).string(message.orderType); + } + for (const v of message.rate) { + writer.uint32(34).string(v!); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryBookendsRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryBookendsRequest } as QueryBookendsRequest; + message.rate = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.coinA = reader.string(); + break; + case 2: + message.coinB = reader.string(); + break; + case 3: + message.orderType = reader.string(); + break; + case 4: + message.rate.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryBookendsRequest { + const message = { ...baseQueryBookendsRequest } as QueryBookendsRequest; + message.rate = []; + if (object.coinA !== undefined && object.coinA !== null) { + message.coinA = String(object.coinA); + } else { + message.coinA = ""; + } + if (object.coinB !== undefined && object.coinB !== null) { + message.coinB = String(object.coinB); + } else { + message.coinB = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = String(object.orderType); + } else { + message.orderType = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(String(e)); + } + } + return message; + }, + + toJSON(message: QueryBookendsRequest): unknown { + const obj: any = {}; + message.coinA !== undefined && (obj.coinA = message.coinA); + message.coinB !== undefined && (obj.coinB = message.coinB); + message.orderType !== undefined && (obj.orderType = message.orderType); + if (message.rate) { + obj.rate = message.rate.map((e) => e); + } else { + obj.rate = []; + } + return obj; + }, + + fromPartial(object: DeepPartial): QueryBookendsRequest { + const message = { ...baseQueryBookendsRequest } as QueryBookendsRequest; + message.rate = []; + if (object.coinA !== undefined && object.coinA !== null) { + message.coinA = object.coinA; + } else { + message.coinA = ""; + } + if (object.coinB !== undefined && object.coinB !== null) { + message.coinB = object.coinB; + } else { + message.coinB = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = object.orderType; + } else { + message.orderType = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(e); + } + } + return message; + }, +}; + +const baseQueryBookendsResponse: object = { + coinA: "", + coinB: "", + orderType: "", + rate: "", + prev: 0, + next: 0, +}; + +export const QueryBookendsResponse = { + encode( + message: QueryBookendsResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.coinA !== "") { + writer.uint32(10).string(message.coinA); + } + if (message.coinB !== "") { + writer.uint32(18).string(message.coinB); + } + if (message.orderType !== "") { + writer.uint32(26).string(message.orderType); + } + for (const v of message.rate) { + writer.uint32(34).string(v!); + } + if (message.prev !== 0) { + writer.uint32(40).uint64(message.prev); + } + if (message.next !== 0) { + writer.uint32(48).uint64(message.next); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryBookendsResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryBookendsResponse } as QueryBookendsResponse; + message.rate = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.coinA = reader.string(); + break; + case 2: + message.coinB = reader.string(); + break; + case 3: + message.orderType = reader.string(); + break; + case 4: + message.rate.push(reader.string()); + break; + case 5: + message.prev = longToNumber(reader.uint64() as Long); + break; + case 6: + message.next = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryBookendsResponse { + const message = { ...baseQueryBookendsResponse } as QueryBookendsResponse; + message.rate = []; + if (object.coinA !== undefined && object.coinA !== null) { + message.coinA = String(object.coinA); + } else { + message.coinA = ""; + } + if (object.coinB !== undefined && object.coinB !== null) { + message.coinB = String(object.coinB); + } else { + message.coinB = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = String(object.orderType); + } else { + message.orderType = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(String(e)); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = Number(object.prev); + } else { + message.prev = 0; + } + if (object.next !== undefined && object.next !== null) { + message.next = Number(object.next); + } else { + message.next = 0; + } + return message; + }, + + toJSON(message: QueryBookendsResponse): unknown { + const obj: any = {}; + message.coinA !== undefined && (obj.coinA = message.coinA); + message.coinB !== undefined && (obj.coinB = message.coinB); + message.orderType !== undefined && (obj.orderType = message.orderType); + if (message.rate) { + obj.rate = message.rate.map((e) => e); + } else { + obj.rate = []; + } + message.prev !== undefined && (obj.prev = message.prev); + message.next !== undefined && (obj.next = message.next); + return obj; + }, + + fromPartial( + object: DeepPartial + ): QueryBookendsResponse { + const message = { ...baseQueryBookendsResponse } as QueryBookendsResponse; + message.rate = []; + if (object.coinA !== undefined && object.coinA !== null) { + message.coinA = object.coinA; + } else { + message.coinA = ""; + } + if (object.coinB !== undefined && object.coinB !== null) { + message.coinB = object.coinB; + } else { + message.coinB = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = object.orderType; + } else { + message.orderType = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(e); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = object.prev; + } else { + message.prev = 0; + } + if (object.next !== undefined && object.next !== null) { + message.next = object.next; + } else { + message.next = 0; + } + return message; + }, +}; + +const baseQueryHistoryRequest: object = { pair: "", length: "" }; + +export const QueryHistoryRequest = { + encode( + message: QueryHistoryRequest, + writer: Writer = Writer.create() + ): Writer { + if (message.pair !== "") { + writer.uint32(10).string(message.pair); + } + if (message.length !== "") { + writer.uint32(18).string(message.length); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryHistoryRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryHistoryRequest } as QueryHistoryRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pair = reader.string(); + break; + case 2: + message.length = reader.string(); + break; + case 3: + message.pagination = PageRequest.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryHistoryRequest { + const message = { ...baseQueryHistoryRequest } as QueryHistoryRequest; + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.length !== undefined && object.length !== null) { + message.length = String(object.length); + } else { + message.length = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryHistoryRequest): unknown { + const obj: any = {}; + message.pair !== undefined && (obj.pair = message.pair); + message.length !== undefined && (obj.length = message.length); + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageRequest.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryHistoryRequest { + const message = { ...baseQueryHistoryRequest } as QueryHistoryRequest; + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.length !== undefined && object.length !== null) { + message.length = object.length; + } else { + message.length = ""; + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageRequest.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryHistoryResponse: object = {}; + +export const QueryHistoryResponse = { + encode( + message: QueryHistoryResponse, + writer: Writer = Writer.create() + ): Writer { + for (const v of message.history) { + OrderResponse.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryHistoryResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryHistoryResponse } as QueryHistoryResponse; + message.history = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.history.push(OrderResponse.decode(reader, reader.uint32())); + break; + case 2: + message.pagination = PageResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryHistoryResponse { + const message = { ...baseQueryHistoryResponse } as QueryHistoryResponse; + message.history = []; + if (object.history !== undefined && object.history !== null) { + for (const e of object.history) { + message.history.push(OrderResponse.fromJSON(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromJSON(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, + + toJSON(message: QueryHistoryResponse): unknown { + const obj: any = {}; + if (message.history) { + obj.history = message.history.map((e) => + e ? OrderResponse.toJSON(e) : undefined + ); + } else { + obj.history = []; + } + message.pagination !== undefined && + (obj.pagination = message.pagination + ? PageResponse.toJSON(message.pagination) + : undefined); + return obj; + }, + + fromPartial(object: DeepPartial): QueryHistoryResponse { + const message = { ...baseQueryHistoryResponse } as QueryHistoryResponse; + message.history = []; + if (object.history !== undefined && object.history !== null) { + for (const e of object.history) { + message.history.push(OrderResponse.fromPartial(e)); + } + } + if (object.pagination !== undefined && object.pagination !== null) { + message.pagination = PageResponse.fromPartial(object.pagination); + } else { + message.pagination = undefined; + } + return message; + }, +}; + +const baseQueryQuoteRequest: object = { + denomAsk: "", + denomBid: "", + denomAmount: "", + amount: "", +}; + +export const QueryQuoteRequest = { + encode(message: QueryQuoteRequest, writer: Writer = Writer.create()): Writer { + if (message.denomAsk !== "") { + writer.uint32(10).string(message.denomAsk); + } + if (message.denomBid !== "") { + writer.uint32(18).string(message.denomBid); + } + if (message.denomAmount !== "") { + writer.uint32(26).string(message.denomAmount); + } + if (message.amount !== "") { + writer.uint32(34).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryQuoteRequest { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryQuoteRequest } as QueryQuoteRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denomAsk = reader.string(); + break; + case 2: + message.denomBid = reader.string(); + break; + case 3: + message.denomAmount = reader.string(); + break; + case 4: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryQuoteRequest { + const message = { ...baseQueryQuoteRequest } as QueryQuoteRequest; + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = String(object.denomAsk); + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = String(object.denomBid); + } else { + message.denomBid = ""; + } + if (object.denomAmount !== undefined && object.denomAmount !== null) { + message.denomAmount = String(object.denomAmount); + } else { + message.denomAmount = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: QueryQuoteRequest): unknown { + const obj: any = {}; + message.denomAsk !== undefined && (obj.denomAsk = message.denomAsk); + message.denomBid !== undefined && (obj.denomBid = message.denomBid); + message.denomAmount !== undefined && + (obj.denomAmount = message.denomAmount); + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): QueryQuoteRequest { + const message = { ...baseQueryQuoteRequest } as QueryQuoteRequest; + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = object.denomAsk; + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = object.denomBid; + } else { + message.denomBid = ""; + } + if (object.denomAmount !== undefined && object.denomAmount !== null) { + message.denomAmount = object.denomAmount; + } else { + message.denomAmount = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +const baseQueryQuoteResponse: object = { denom: "", amount: "" }; + +export const QueryQuoteResponse = { + encode( + message: QueryQuoteResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + if (message.amount !== "") { + writer.uint32(18).string(message.amount); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): QueryQuoteResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseQueryQuoteResponse } as QueryQuoteResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.denom = reader.string(); + break; + case 2: + message.amount = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): QueryQuoteResponse { + const message = { ...baseQueryQuoteResponse } as QueryQuoteResponse; + if (object.denom !== undefined && object.denom !== null) { + message.denom = String(object.denom); + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + return message; + }, + + toJSON(message: QueryQuoteResponse): unknown { + const obj: any = {}; + message.denom !== undefined && (obj.denom = message.denom); + message.amount !== undefined && (obj.amount = message.amount); + return obj; + }, + + fromPartial(object: DeepPartial): QueryQuoteResponse { + const message = { ...baseQueryQuoteResponse } as QueryQuoteResponse; + if (object.denom !== undefined && object.denom !== null) { + message.denom = object.denom; + } else { + message.denom = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + return message; + }, +}; + +/** Query defines the gRPC querier service. */ +export interface Query { + /** Parameters queries the parameters of the module. */ + Params(request: QueryParamsRequest): Promise; + /** Queries total burned. */ + Burned(request: QueryBurnedRequest): Promise; + /** Queries a Pool by index. */ + Pool(request: QueryGetPoolRequest): Promise; + /** Queries a list of Pool items. */ + PoolAll(request: QueryAllPoolRequest): Promise; + /** Queries a Volume by index. */ + Volume(request: QueryVolumeRequest): Promise; + /** Queries all Volumes. */ + VolumeAll(request: QueryAllVolumeRequest): Promise; + /** Queries a Drop by index. */ + Drop(request: QueryDropRequest): Promise; + /** Queries a Drop by index. */ + DropAmounts( + request: QueryDropAmountsRequest + ): Promise; + /** Queries a Drop by index. */ + DropCoin(request: QueryDropCoinRequest): Promise; + /** Converts drops to coin amounts */ + DropsToCoins( + request: QueryDropsToCoinsRequest + ): Promise; + /** Queries a Drop by index. */ + DropPairs(request: QueryDropPairsRequest): Promise; + /** Queries a Drop by index. */ + DropOwnerPair( + request: QueryDropOwnerPairRequest + ): Promise; + /** Queries a list of Drop items. */ + DropAll(request: QueryAllDropRequest): Promise; + /** Queries a Member by index. */ + Member(request: QueryGetMemberRequest): Promise; + /** Queries a list of Member items. */ + MemberAll(request: QueryAllMemberRequest): Promise; + /** Queries a Burnings by index. */ + Burnings(request: QueryGetBurningsRequest): Promise; + /** Queries a list of Burnings items. */ + BurningsAll( + request: QueryAllBurningsRequest + ): Promise; + /** Queries a Order by index. */ + Order(request: QueryOrderRequest): Promise; + /** Queries a list of Order items. */ + OrderAll(request: QueryAllOrderRequest): Promise; + /** Queries a list of Order items. */ + OrderOwner(request: QueryOrderOwnerRequest): Promise; + /** Queries a list of Order items. */ + OrderOwnerUids( + request: QueryOrderOwnerRequest + ): Promise; + /** Queries a list of Book items. */ + Book(request: QueryBookRequest): Promise; + /** Queries a list of Bookends items. */ + Bookends(request: QueryBookendsRequest): Promise; + /** Queries pool trade history. */ + History(request: QueryHistoryRequest): Promise; + /** Queries pool trade history. */ + Quote(request: QueryQuoteRequest): Promise; +} + +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + constructor(rpc: Rpc) { + this.rpc = rpc; + } + Params(request: QueryParamsRequest): Promise { + const data = QueryParamsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Params", + data + ); + return promise.then((data) => QueryParamsResponse.decode(new Reader(data))); + } + + Burned(request: QueryBurnedRequest): Promise { + const data = QueryBurnedRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Burned", + data + ); + return promise.then((data) => QueryBurnedResponse.decode(new Reader(data))); + } + + Pool(request: QueryGetPoolRequest): Promise { + const data = QueryGetPoolRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Pool", + data + ); + return promise.then((data) => + QueryGetPoolResponse.decode(new Reader(data)) + ); + } + + PoolAll(request: QueryAllPoolRequest): Promise { + const data = QueryAllPoolRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "PoolAll", + data + ); + return promise.then((data) => + QueryAllPoolResponse.decode(new Reader(data)) + ); + } + + Volume(request: QueryVolumeRequest): Promise { + const data = QueryVolumeRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Volume", + data + ); + return promise.then((data) => QueryVolumeResponse.decode(new Reader(data))); + } + + VolumeAll(request: QueryAllVolumeRequest): Promise { + const data = QueryAllVolumeRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "VolumeAll", + data + ); + return promise.then((data) => + QueryAllVolumeResponse.decode(new Reader(data)) + ); + } + + Drop(request: QueryDropRequest): Promise { + const data = QueryDropRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Drop", + data + ); + return promise.then((data) => QueryDropResponse.decode(new Reader(data))); + } + + DropAmounts( + request: QueryDropAmountsRequest + ): Promise { + const data = QueryDropAmountsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "DropAmounts", + data + ); + return promise.then((data) => + QueryDropAmountsResponse.decode(new Reader(data)) + ); + } + + DropCoin(request: QueryDropCoinRequest): Promise { + const data = QueryDropCoinRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "DropCoin", + data + ); + return promise.then((data) => + QueryDropCoinResponse.decode(new Reader(data)) + ); + } + + DropsToCoins( + request: QueryDropsToCoinsRequest + ): Promise { + const data = QueryDropsToCoinsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "DropsToCoins", + data + ); + return promise.then((data) => + QueryDropAmountsResponse.decode(new Reader(data)) + ); + } + + DropPairs(request: QueryDropPairsRequest): Promise { + const data = QueryDropPairsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "DropPairs", + data + ); + return promise.then((data) => + QueryDropPairsResponse.decode(new Reader(data)) + ); + } + + DropOwnerPair( + request: QueryDropOwnerPairRequest + ): Promise { + const data = QueryDropOwnerPairRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "DropOwnerPair", + data + ); + return promise.then((data) => QueryDropsResponse.decode(new Reader(data))); + } + + DropAll(request: QueryAllDropRequest): Promise { + const data = QueryAllDropRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "DropAll", + data + ); + return promise.then((data) => QueryDropsResponse.decode(new Reader(data))); + } + + Member(request: QueryGetMemberRequest): Promise { + const data = QueryGetMemberRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Member", + data + ); + return promise.then((data) => + QueryGetMemberResponse.decode(new Reader(data)) + ); + } + + MemberAll(request: QueryAllMemberRequest): Promise { + const data = QueryAllMemberRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "MemberAll", + data + ); + return promise.then((data) => + QueryAllMemberResponse.decode(new Reader(data)) + ); + } + + Burnings( + request: QueryGetBurningsRequest + ): Promise { + const data = QueryGetBurningsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Burnings", + data + ); + return promise.then((data) => + QueryGetBurningsResponse.decode(new Reader(data)) + ); + } + + BurningsAll( + request: QueryAllBurningsRequest + ): Promise { + const data = QueryAllBurningsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "BurningsAll", + data + ); + return promise.then((data) => + QueryAllBurningsResponse.decode(new Reader(data)) + ); + } + + Order(request: QueryOrderRequest): Promise { + const data = QueryOrderRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Order", + data + ); + return promise.then((data) => QueryOrderResponse.decode(new Reader(data))); + } + + OrderAll(request: QueryAllOrderRequest): Promise { + const data = QueryAllOrderRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "OrderAll", + data + ); + return promise.then((data) => QueryOrdersResponse.decode(new Reader(data))); + } + + OrderOwner(request: QueryOrderOwnerRequest): Promise { + const data = QueryOrderOwnerRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "OrderOwner", + data + ); + return promise.then((data) => QueryOrdersResponse.decode(new Reader(data))); + } + + OrderOwnerUids( + request: QueryOrderOwnerRequest + ): Promise { + const data = QueryOrderOwnerRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "OrderOwnerUids", + data + ); + return promise.then((data) => + QueryOrderOwnerUidsResponse.decode(new Reader(data)) + ); + } + + Book(request: QueryBookRequest): Promise { + const data = QueryBookRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Book", + data + ); + return promise.then((data) => QueryBookResponse.decode(new Reader(data))); + } + + Bookends(request: QueryBookendsRequest): Promise { + const data = QueryBookendsRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Bookends", + data + ); + return promise.then((data) => + QueryBookendsResponse.decode(new Reader(data)) + ); + } + + History(request: QueryHistoryRequest): Promise { + const data = QueryHistoryRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "History", + data + ); + return promise.then((data) => + QueryHistoryResponse.decode(new Reader(data)) + ); + } + + Quote(request: QueryQuoteRequest): Promise { + const data = QueryQuoteRequest.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Query", + "Quote", + data + ); + return promise.then((data) => QueryQuoteResponse.decode(new Reader(data))); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array + ): Promise; +} + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/tx.ts b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/tx.ts new file mode 100644 index 00000000..7bfb2278 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/module/types/market/tx.ts @@ -0,0 +1,1165 @@ +/* eslint-disable */ +import { Reader, util, configure, Writer } from "protobufjs/minimal"; +import * as Long from "long"; + +export const protobufPackage = "pendulumlabs.market.market"; + +export interface MsgCreatePool { + creator: string; + coinA: string; + coinB: string; +} + +export interface MsgCreatePoolResponse {} + +export interface MsgCreateDrop { + creator: string; + pair: string; + drops: string; +} + +export interface MsgCreateDropResponse {} + +export interface MsgRedeemDrop { + creator: string; + uid: string; +} + +export interface MsgRedeemDropResponse {} + +export interface MsgCreateOrder { + creator: string; + denomAsk: string; + denomBid: string; + orderType: string; + amount: string; + rate: string[]; + prev: string; + next: string; +} + +export interface MsgCreateOrderResponse { + uid: number; +} + +export interface MsgCancelOrder { + creator: string; + uid: string; +} + +export interface MsgCancelOrderResponse {} + +export interface MsgMarketOrder { + creator: string; + denomAsk: string; + amountAsk: string; + denomBid: string; + amountBid: string; + /** Slippage is percentage based on (parameter / 10000), 9999 representing as 99.99% */ + slippage: string; +} + +export interface MsgMarketOrderResponse { + amountBid: string; + amountAsk: string; + slippage: string; +} + +const baseMsgCreatePool: object = { creator: "", coinA: "", coinB: "" }; + +export const MsgCreatePool = { + encode(message: MsgCreatePool, writer: Writer = Writer.create()): Writer { + if (message.creator !== "") { + writer.uint32(10).string(message.creator); + } + if (message.coinA !== "") { + writer.uint32(18).string(message.coinA); + } + if (message.coinB !== "") { + writer.uint32(26).string(message.coinB); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCreatePool { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCreatePool } as MsgCreatePool; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.creator = reader.string(); + break; + case 2: + message.coinA = reader.string(); + break; + case 3: + message.coinB = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgCreatePool { + const message = { ...baseMsgCreatePool } as MsgCreatePool; + if (object.creator !== undefined && object.creator !== null) { + message.creator = String(object.creator); + } else { + message.creator = ""; + } + if (object.coinA !== undefined && object.coinA !== null) { + message.coinA = String(object.coinA); + } else { + message.coinA = ""; + } + if (object.coinB !== undefined && object.coinB !== null) { + message.coinB = String(object.coinB); + } else { + message.coinB = ""; + } + return message; + }, + + toJSON(message: MsgCreatePool): unknown { + const obj: any = {}; + message.creator !== undefined && (obj.creator = message.creator); + message.coinA !== undefined && (obj.coinA = message.coinA); + message.coinB !== undefined && (obj.coinB = message.coinB); + return obj; + }, + + fromPartial(object: DeepPartial): MsgCreatePool { + const message = { ...baseMsgCreatePool } as MsgCreatePool; + if (object.creator !== undefined && object.creator !== null) { + message.creator = object.creator; + } else { + message.creator = ""; + } + if (object.coinA !== undefined && object.coinA !== null) { + message.coinA = object.coinA; + } else { + message.coinA = ""; + } + if (object.coinB !== undefined && object.coinB !== null) { + message.coinB = object.coinB; + } else { + message.coinB = ""; + } + return message; + }, +}; + +const baseMsgCreatePoolResponse: object = {}; + +export const MsgCreatePoolResponse = { + encode(_: MsgCreatePoolResponse, writer: Writer = Writer.create()): Writer { + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCreatePoolResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCreatePoolResponse } as MsgCreatePoolResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): MsgCreatePoolResponse { + const message = { ...baseMsgCreatePoolResponse } as MsgCreatePoolResponse; + return message; + }, + + toJSON(_: MsgCreatePoolResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial(_: DeepPartial): MsgCreatePoolResponse { + const message = { ...baseMsgCreatePoolResponse } as MsgCreatePoolResponse; + return message; + }, +}; + +const baseMsgCreateDrop: object = { creator: "", pair: "", drops: "" }; + +export const MsgCreateDrop = { + encode(message: MsgCreateDrop, writer: Writer = Writer.create()): Writer { + if (message.creator !== "") { + writer.uint32(10).string(message.creator); + } + if (message.pair !== "") { + writer.uint32(18).string(message.pair); + } + if (message.drops !== "") { + writer.uint32(26).string(message.drops); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCreateDrop { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCreateDrop } as MsgCreateDrop; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.creator = reader.string(); + break; + case 2: + message.pair = reader.string(); + break; + case 3: + message.drops = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgCreateDrop { + const message = { ...baseMsgCreateDrop } as MsgCreateDrop; + if (object.creator !== undefined && object.creator !== null) { + message.creator = String(object.creator); + } else { + message.creator = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = String(object.pair); + } else { + message.pair = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = String(object.drops); + } else { + message.drops = ""; + } + return message; + }, + + toJSON(message: MsgCreateDrop): unknown { + const obj: any = {}; + message.creator !== undefined && (obj.creator = message.creator); + message.pair !== undefined && (obj.pair = message.pair); + message.drops !== undefined && (obj.drops = message.drops); + return obj; + }, + + fromPartial(object: DeepPartial): MsgCreateDrop { + const message = { ...baseMsgCreateDrop } as MsgCreateDrop; + if (object.creator !== undefined && object.creator !== null) { + message.creator = object.creator; + } else { + message.creator = ""; + } + if (object.pair !== undefined && object.pair !== null) { + message.pair = object.pair; + } else { + message.pair = ""; + } + if (object.drops !== undefined && object.drops !== null) { + message.drops = object.drops; + } else { + message.drops = ""; + } + return message; + }, +}; + +const baseMsgCreateDropResponse: object = {}; + +export const MsgCreateDropResponse = { + encode(_: MsgCreateDropResponse, writer: Writer = Writer.create()): Writer { + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCreateDropResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCreateDropResponse } as MsgCreateDropResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): MsgCreateDropResponse { + const message = { ...baseMsgCreateDropResponse } as MsgCreateDropResponse; + return message; + }, + + toJSON(_: MsgCreateDropResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial(_: DeepPartial): MsgCreateDropResponse { + const message = { ...baseMsgCreateDropResponse } as MsgCreateDropResponse; + return message; + }, +}; + +const baseMsgRedeemDrop: object = { creator: "", uid: "" }; + +export const MsgRedeemDrop = { + encode(message: MsgRedeemDrop, writer: Writer = Writer.create()): Writer { + if (message.creator !== "") { + writer.uint32(10).string(message.creator); + } + if (message.uid !== "") { + writer.uint32(18).string(message.uid); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgRedeemDrop { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgRedeemDrop } as MsgRedeemDrop; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.creator = reader.string(); + break; + case 2: + message.uid = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgRedeemDrop { + const message = { ...baseMsgRedeemDrop } as MsgRedeemDrop; + if (object.creator !== undefined && object.creator !== null) { + message.creator = String(object.creator); + } else { + message.creator = ""; + } + if (object.uid !== undefined && object.uid !== null) { + message.uid = String(object.uid); + } else { + message.uid = ""; + } + return message; + }, + + toJSON(message: MsgRedeemDrop): unknown { + const obj: any = {}; + message.creator !== undefined && (obj.creator = message.creator); + message.uid !== undefined && (obj.uid = message.uid); + return obj; + }, + + fromPartial(object: DeepPartial): MsgRedeemDrop { + const message = { ...baseMsgRedeemDrop } as MsgRedeemDrop; + if (object.creator !== undefined && object.creator !== null) { + message.creator = object.creator; + } else { + message.creator = ""; + } + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = ""; + } + return message; + }, +}; + +const baseMsgRedeemDropResponse: object = {}; + +export const MsgRedeemDropResponse = { + encode(_: MsgRedeemDropResponse, writer: Writer = Writer.create()): Writer { + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgRedeemDropResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgRedeemDropResponse } as MsgRedeemDropResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): MsgRedeemDropResponse { + const message = { ...baseMsgRedeemDropResponse } as MsgRedeemDropResponse; + return message; + }, + + toJSON(_: MsgRedeemDropResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial(_: DeepPartial): MsgRedeemDropResponse { + const message = { ...baseMsgRedeemDropResponse } as MsgRedeemDropResponse; + return message; + }, +}; + +const baseMsgCreateOrder: object = { + creator: "", + denomAsk: "", + denomBid: "", + orderType: "", + amount: "", + rate: "", + prev: "", + next: "", +}; + +export const MsgCreateOrder = { + encode(message: MsgCreateOrder, writer: Writer = Writer.create()): Writer { + if (message.creator !== "") { + writer.uint32(10).string(message.creator); + } + if (message.denomAsk !== "") { + writer.uint32(18).string(message.denomAsk); + } + if (message.denomBid !== "") { + writer.uint32(26).string(message.denomBid); + } + if (message.orderType !== "") { + writer.uint32(34).string(message.orderType); + } + if (message.amount !== "") { + writer.uint32(42).string(message.amount); + } + for (const v of message.rate) { + writer.uint32(50).string(v!); + } + if (message.prev !== "") { + writer.uint32(58).string(message.prev); + } + if (message.next !== "") { + writer.uint32(66).string(message.next); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCreateOrder { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCreateOrder } as MsgCreateOrder; + message.rate = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.creator = reader.string(); + break; + case 2: + message.denomAsk = reader.string(); + break; + case 3: + message.denomBid = reader.string(); + break; + case 4: + message.orderType = reader.string(); + break; + case 5: + message.amount = reader.string(); + break; + case 6: + message.rate.push(reader.string()); + break; + case 7: + message.prev = reader.string(); + break; + case 8: + message.next = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgCreateOrder { + const message = { ...baseMsgCreateOrder } as MsgCreateOrder; + message.rate = []; + if (object.creator !== undefined && object.creator !== null) { + message.creator = String(object.creator); + } else { + message.creator = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = String(object.denomAsk); + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = String(object.denomBid); + } else { + message.denomBid = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = String(object.orderType); + } else { + message.orderType = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = String(object.amount); + } else { + message.amount = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(String(e)); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = String(object.prev); + } else { + message.prev = ""; + } + if (object.next !== undefined && object.next !== null) { + message.next = String(object.next); + } else { + message.next = ""; + } + return message; + }, + + toJSON(message: MsgCreateOrder): unknown { + const obj: any = {}; + message.creator !== undefined && (obj.creator = message.creator); + message.denomAsk !== undefined && (obj.denomAsk = message.denomAsk); + message.denomBid !== undefined && (obj.denomBid = message.denomBid); + message.orderType !== undefined && (obj.orderType = message.orderType); + message.amount !== undefined && (obj.amount = message.amount); + if (message.rate) { + obj.rate = message.rate.map((e) => e); + } else { + obj.rate = []; + } + message.prev !== undefined && (obj.prev = message.prev); + message.next !== undefined && (obj.next = message.next); + return obj; + }, + + fromPartial(object: DeepPartial): MsgCreateOrder { + const message = { ...baseMsgCreateOrder } as MsgCreateOrder; + message.rate = []; + if (object.creator !== undefined && object.creator !== null) { + message.creator = object.creator; + } else { + message.creator = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = object.denomAsk; + } else { + message.denomAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = object.denomBid; + } else { + message.denomBid = ""; + } + if (object.orderType !== undefined && object.orderType !== null) { + message.orderType = object.orderType; + } else { + message.orderType = ""; + } + if (object.amount !== undefined && object.amount !== null) { + message.amount = object.amount; + } else { + message.amount = ""; + } + if (object.rate !== undefined && object.rate !== null) { + for (const e of object.rate) { + message.rate.push(e); + } + } + if (object.prev !== undefined && object.prev !== null) { + message.prev = object.prev; + } else { + message.prev = ""; + } + if (object.next !== undefined && object.next !== null) { + message.next = object.next; + } else { + message.next = ""; + } + return message; + }, +}; + +const baseMsgCreateOrderResponse: object = { uid: 0 }; + +export const MsgCreateOrderResponse = { + encode( + message: MsgCreateOrderResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.uid !== 0) { + writer.uint32(8).uint64(message.uid); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCreateOrderResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCreateOrderResponse } as MsgCreateOrderResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uid = longToNumber(reader.uint64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgCreateOrderResponse { + const message = { ...baseMsgCreateOrderResponse } as MsgCreateOrderResponse; + if (object.uid !== undefined && object.uid !== null) { + message.uid = Number(object.uid); + } else { + message.uid = 0; + } + return message; + }, + + toJSON(message: MsgCreateOrderResponse): unknown { + const obj: any = {}; + message.uid !== undefined && (obj.uid = message.uid); + return obj; + }, + + fromPartial( + object: DeepPartial + ): MsgCreateOrderResponse { + const message = { ...baseMsgCreateOrderResponse } as MsgCreateOrderResponse; + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = 0; + } + return message; + }, +}; + +const baseMsgCancelOrder: object = { creator: "", uid: "" }; + +export const MsgCancelOrder = { + encode(message: MsgCancelOrder, writer: Writer = Writer.create()): Writer { + if (message.creator !== "") { + writer.uint32(10).string(message.creator); + } + if (message.uid !== "") { + writer.uint32(18).string(message.uid); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCancelOrder { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCancelOrder } as MsgCancelOrder; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.creator = reader.string(); + break; + case 2: + message.uid = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgCancelOrder { + const message = { ...baseMsgCancelOrder } as MsgCancelOrder; + if (object.creator !== undefined && object.creator !== null) { + message.creator = String(object.creator); + } else { + message.creator = ""; + } + if (object.uid !== undefined && object.uid !== null) { + message.uid = String(object.uid); + } else { + message.uid = ""; + } + return message; + }, + + toJSON(message: MsgCancelOrder): unknown { + const obj: any = {}; + message.creator !== undefined && (obj.creator = message.creator); + message.uid !== undefined && (obj.uid = message.uid); + return obj; + }, + + fromPartial(object: DeepPartial): MsgCancelOrder { + const message = { ...baseMsgCancelOrder } as MsgCancelOrder; + if (object.creator !== undefined && object.creator !== null) { + message.creator = object.creator; + } else { + message.creator = ""; + } + if (object.uid !== undefined && object.uid !== null) { + message.uid = object.uid; + } else { + message.uid = ""; + } + return message; + }, +}; + +const baseMsgCancelOrderResponse: object = {}; + +export const MsgCancelOrderResponse = { + encode(_: MsgCancelOrderResponse, writer: Writer = Writer.create()): Writer { + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgCancelOrderResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgCancelOrderResponse } as MsgCancelOrderResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): MsgCancelOrderResponse { + const message = { ...baseMsgCancelOrderResponse } as MsgCancelOrderResponse; + return message; + }, + + toJSON(_: MsgCancelOrderResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial(_: DeepPartial): MsgCancelOrderResponse { + const message = { ...baseMsgCancelOrderResponse } as MsgCancelOrderResponse; + return message; + }, +}; + +const baseMsgMarketOrder: object = { + creator: "", + denomAsk: "", + amountAsk: "", + denomBid: "", + amountBid: "", + slippage: "", +}; + +export const MsgMarketOrder = { + encode(message: MsgMarketOrder, writer: Writer = Writer.create()): Writer { + if (message.creator !== "") { + writer.uint32(10).string(message.creator); + } + if (message.denomAsk !== "") { + writer.uint32(18).string(message.denomAsk); + } + if (message.amountAsk !== "") { + writer.uint32(26).string(message.amountAsk); + } + if (message.denomBid !== "") { + writer.uint32(34).string(message.denomBid); + } + if (message.amountBid !== "") { + writer.uint32(42).string(message.amountBid); + } + if (message.slippage !== "") { + writer.uint32(50).string(message.slippage); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgMarketOrder { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgMarketOrder } as MsgMarketOrder; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.creator = reader.string(); + break; + case 2: + message.denomAsk = reader.string(); + break; + case 3: + message.amountAsk = reader.string(); + break; + case 4: + message.denomBid = reader.string(); + break; + case 5: + message.amountBid = reader.string(); + break; + case 6: + message.slippage = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgMarketOrder { + const message = { ...baseMsgMarketOrder } as MsgMarketOrder; + if (object.creator !== undefined && object.creator !== null) { + message.creator = String(object.creator); + } else { + message.creator = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = String(object.denomAsk); + } else { + message.denomAsk = ""; + } + if (object.amountAsk !== undefined && object.amountAsk !== null) { + message.amountAsk = String(object.amountAsk); + } else { + message.amountAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = String(object.denomBid); + } else { + message.denomBid = ""; + } + if (object.amountBid !== undefined && object.amountBid !== null) { + message.amountBid = String(object.amountBid); + } else { + message.amountBid = ""; + } + if (object.slippage !== undefined && object.slippage !== null) { + message.slippage = String(object.slippage); + } else { + message.slippage = ""; + } + return message; + }, + + toJSON(message: MsgMarketOrder): unknown { + const obj: any = {}; + message.creator !== undefined && (obj.creator = message.creator); + message.denomAsk !== undefined && (obj.denomAsk = message.denomAsk); + message.amountAsk !== undefined && (obj.amountAsk = message.amountAsk); + message.denomBid !== undefined && (obj.denomBid = message.denomBid); + message.amountBid !== undefined && (obj.amountBid = message.amountBid); + message.slippage !== undefined && (obj.slippage = message.slippage); + return obj; + }, + + fromPartial(object: DeepPartial): MsgMarketOrder { + const message = { ...baseMsgMarketOrder } as MsgMarketOrder; + if (object.creator !== undefined && object.creator !== null) { + message.creator = object.creator; + } else { + message.creator = ""; + } + if (object.denomAsk !== undefined && object.denomAsk !== null) { + message.denomAsk = object.denomAsk; + } else { + message.denomAsk = ""; + } + if (object.amountAsk !== undefined && object.amountAsk !== null) { + message.amountAsk = object.amountAsk; + } else { + message.amountAsk = ""; + } + if (object.denomBid !== undefined && object.denomBid !== null) { + message.denomBid = object.denomBid; + } else { + message.denomBid = ""; + } + if (object.amountBid !== undefined && object.amountBid !== null) { + message.amountBid = object.amountBid; + } else { + message.amountBid = ""; + } + if (object.slippage !== undefined && object.slippage !== null) { + message.slippage = object.slippage; + } else { + message.slippage = ""; + } + return message; + }, +}; + +const baseMsgMarketOrderResponse: object = { + amountBid: "", + amountAsk: "", + slippage: "", +}; + +export const MsgMarketOrderResponse = { + encode( + message: MsgMarketOrderResponse, + writer: Writer = Writer.create() + ): Writer { + if (message.amountBid !== "") { + writer.uint32(10).string(message.amountBid); + } + if (message.amountAsk !== "") { + writer.uint32(18).string(message.amountAsk); + } + if (message.slippage !== "") { + writer.uint32(26).string(message.slippage); + } + return writer; + }, + + decode(input: Reader | Uint8Array, length?: number): MsgMarketOrderResponse { + const reader = input instanceof Uint8Array ? new Reader(input) : input; + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMsgMarketOrderResponse } as MsgMarketOrderResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.amountBid = reader.string(); + break; + case 2: + message.amountAsk = reader.string(); + break; + case 3: + message.slippage = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MsgMarketOrderResponse { + const message = { ...baseMsgMarketOrderResponse } as MsgMarketOrderResponse; + if (object.amountBid !== undefined && object.amountBid !== null) { + message.amountBid = String(object.amountBid); + } else { + message.amountBid = ""; + } + if (object.amountAsk !== undefined && object.amountAsk !== null) { + message.amountAsk = String(object.amountAsk); + } else { + message.amountAsk = ""; + } + if (object.slippage !== undefined && object.slippage !== null) { + message.slippage = String(object.slippage); + } else { + message.slippage = ""; + } + return message; + }, + + toJSON(message: MsgMarketOrderResponse): unknown { + const obj: any = {}; + message.amountBid !== undefined && (obj.amountBid = message.amountBid); + message.amountAsk !== undefined && (obj.amountAsk = message.amountAsk); + message.slippage !== undefined && (obj.slippage = message.slippage); + return obj; + }, + + fromPartial( + object: DeepPartial + ): MsgMarketOrderResponse { + const message = { ...baseMsgMarketOrderResponse } as MsgMarketOrderResponse; + if (object.amountBid !== undefined && object.amountBid !== null) { + message.amountBid = object.amountBid; + } else { + message.amountBid = ""; + } + if (object.amountAsk !== undefined && object.amountAsk !== null) { + message.amountAsk = object.amountAsk; + } else { + message.amountAsk = ""; + } + if (object.slippage !== undefined && object.slippage !== null) { + message.slippage = object.slippage; + } else { + message.slippage = ""; + } + return message; + }, +}; + +/** Msg defines the Msg service. */ +export interface Msg { + CreatePool(request: MsgCreatePool): Promise; + CreateDrop(request: MsgCreateDrop): Promise; + RedeemDrop(request: MsgRedeemDrop): Promise; + CreateOrder(request: MsgCreateOrder): Promise; + CancelOrder(request: MsgCancelOrder): Promise; + /** this line is used by starport scaffolding # proto/tx/rpc */ + MarketOrder(request: MsgMarketOrder): Promise; +} + +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + constructor(rpc: Rpc) { + this.rpc = rpc; + } + CreatePool(request: MsgCreatePool): Promise { + const data = MsgCreatePool.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Msg", + "CreatePool", + data + ); + return promise.then((data) => + MsgCreatePoolResponse.decode(new Reader(data)) + ); + } + + CreateDrop(request: MsgCreateDrop): Promise { + const data = MsgCreateDrop.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Msg", + "CreateDrop", + data + ); + return promise.then((data) => + MsgCreateDropResponse.decode(new Reader(data)) + ); + } + + RedeemDrop(request: MsgRedeemDrop): Promise { + const data = MsgRedeemDrop.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Msg", + "RedeemDrop", + data + ); + return promise.then((data) => + MsgRedeemDropResponse.decode(new Reader(data)) + ); + } + + CreateOrder(request: MsgCreateOrder): Promise { + const data = MsgCreateOrder.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Msg", + "CreateOrder", + data + ); + return promise.then((data) => + MsgCreateOrderResponse.decode(new Reader(data)) + ); + } + + CancelOrder(request: MsgCancelOrder): Promise { + const data = MsgCancelOrder.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Msg", + "CancelOrder", + data + ); + return promise.then((data) => + MsgCancelOrderResponse.decode(new Reader(data)) + ); + } + + MarketOrder(request: MsgMarketOrder): Promise { + const data = MsgMarketOrder.encode(request).finish(); + const promise = this.rpc.request( + "pendulumlabs.market.market.Msg", + "MarketOrder", + data + ); + return promise.then((data) => + MsgMarketOrderResponse.decode(new Reader(data)) + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array + ): Promise; +} + +declare var self: any | undefined; +declare var window: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = Date | Function | Uint8Array | string | number | undefined; +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (util.Long !== Long) { + util.Long = Long as any; + configure(); +} diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/package.json b/vue/src/store/generated/market/pendulumlabs.market.market/package.json new file mode 100755 index 00000000..e9eadac2 --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/package.json @@ -0,0 +1,18 @@ +{ + "name": "pendulumlabs-market-market-js", + "version": "0.1.0", + "description": "Autogenerated vuex store for Cosmos module pendulumlabs.market.market", + "author": "Starport Codegen ", + "homepage": "http://market/x/market/types", + "license": "Apache-2.0", + "licenses": [ + { + "type": "Apache-2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0" + } + ], + "main": "index.js", + "publishConfig": { + "access": "public" + } +} \ No newline at end of file diff --git a/vue/src/store/generated/market/pendulumlabs.market.market/vuex-root b/vue/src/store/generated/market/pendulumlabs.market.market/vuex-root new file mode 100755 index 00000000..0fcc121a --- /dev/null +++ b/vue/src/store/generated/market/pendulumlabs.market.market/vuex-root @@ -0,0 +1 @@ +THIS FILE IS GENERATED AUTOMATICALLY. DO NOT DELETE. diff --git a/x/market/client/cli/query.go b/x/market/client/cli/query.go index d14e9a51..f44be2fc 100644 --- a/x/market/client/cli/query.go +++ b/x/market/client/cli/query.go @@ -25,6 +25,26 @@ func GetQueryCmd(queryRoute string) *cobra.Command { } cmd.AddCommand(CmdQueryParams()) + cmd.AddCommand(CmdListPool()) + cmd.AddCommand(CmdPool()) + cmd.AddCommand(CmdListDrop()) + cmd.AddCommand(CmdShowDrop()) + cmd.AddCommand(CmdDropCoin()) + cmd.AddCommand(CmdShowDropPairs()) + cmd.AddCommand(CmdDropOwnerPair()) + cmd.AddCommand(CmdListMember()) + cmd.AddCommand(CmdShowMember()) + cmd.AddCommand(CmdListBurnings()) + cmd.AddCommand(CmdShowBurnings()) + cmd.AddCommand(CmdShowBurned()) + cmd.AddCommand(CmdListOrder()) + cmd.AddCommand(CmdShowOrder()) + cmd.AddCommand(CmdBook()) + cmd.AddCommand(CmdBookends()) + cmd.AddCommand(CmdHistory()) + cmd.AddCommand(CmdOrderOwner()) + cmd.AddCommand(CmdOrderOwnerUids()) + // this line is used by starport scaffolding # 1 return cmd diff --git a/x/market/client/cli/query_book.go b/x/market/client/cli/query_book.go new file mode 100644 index 00000000..643bf73e --- /dev/null +++ b/x/market/client/cli/query_book.go @@ -0,0 +1,50 @@ +package cli + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdBook() *cobra.Command { + cmd := &cobra.Command{ + Use: "book [denom-a] [denom-b] [order-type]", + Short: "Query book", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqDenomA := args[0] + reqDenomB := args[1] + reqOrderType := args[2] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryBookRequest{ + DenomA: reqDenomA, + DenomB: reqDenomB, + OrderType: reqOrderType, + } + + res, err := queryClient.Book(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_bookends.go b/x/market/client/cli/query_bookends.go new file mode 100644 index 00000000..e3fbe611 --- /dev/null +++ b/x/market/client/cli/query_bookends.go @@ -0,0 +1,54 @@ +package cli + +import ( + "strconv" + "strings" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdBookends() *cobra.Command { + cmd := &cobra.Command{ + Use: "bookends [coin-a] [coin-b] [order-type] [rate]", + Short: "Query bookends", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqCoinA := args[0] + reqCoinB := args[1] + reqOrderType := args[2] + reqRate := strings.Split(args[3], listSeparator) + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryBookendsRequest{ + + CoinA: reqCoinA, + CoinB: reqCoinB, + OrderType: reqOrderType, + Rate: reqRate, + } + + res, err := queryClient.Bookends(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_burnings.go b/x/market/client/cli/query_burnings.go new file mode 100644 index 00000000..fc4607a9 --- /dev/null +++ b/x/market/client/cli/query_burnings.go @@ -0,0 +1,100 @@ +package cli + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdListBurnings() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-burnings", + Short: "list all burnings", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryAllBurningsRequest{ + Pagination: pageReq, + } + + res, err := queryClient.BurningsAll(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowBurnings() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-burnings [denom]", + Short: "shows a burnings", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argDenom := args[0] + + params := &types.QueryGetBurningsRequest{ + Denom: argDenom, + } + + res, err := queryClient.Burnings(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowBurned() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-burned", + Short: "shows amount burned", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryBurnedRequest{} + + res, err := queryClient.Burned(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_burnings_test.go b/x/market/client/cli/query_burnings_test.go new file mode 100644 index 00000000..dccab1b2 --- /dev/null +++ b/x/market/client/cli/query_burnings_test.go @@ -0,0 +1,165 @@ +package cli_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/cosmos/cosmos-sdk/client/flags" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + tmcli "github.com/tendermint/tendermint/libs/cli" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "market/testutil/network" + "market/testutil/nullify" + "market/x/market/client/cli" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func networkWithBurningsObjects(t *testing.T, n int) (*network.Network, []types.Burnings) { + t.Helper() + cfg := network.DefaultConfig() + state := types.GenesisState{} + require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state)) + + for i := 0; i < n; i++ { + burnings := types.Burnings{ + Denom: strconv.Itoa(i), + Amount: sdk.NewIntFromUint64(uint64(i)), + } + nullify.Fill(&burnings) + state.BurningsList = append(state.BurningsList, burnings) + } + buf, err := cfg.Codec.MarshalJSON(&state) + require.NoError(t, err) + cfg.GenesisState[types.ModuleName] = buf + return network.New(t, cfg), state.BurningsList +} + +func TestShowBurnings(t *testing.T) { + net, objs := networkWithBurningsObjects(t, 2) + + ctx := net.Validators[0].ClientCtx + common := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + for _, tc := range []struct { + desc string + idDenom string + amount sdk.Int + + args []string + err error + obj types.Burnings + }{ + { + desc: "found", + idDenom: objs[0].Denom, + amount: objs[0].Amount, + args: common, + obj: objs[0], + }, + { + desc: "not found", + idDenom: strconv.Itoa(100000), + + args: common, + err: status.Error(codes.InvalidArgument, "not found"), + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + args := []string{ + tc.idDenom, + } + args = append(args, tc.args...) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowBurnings(), args) + if tc.err != nil { + stat, ok := status.FromError(tc.err) + require.True(t, ok) + require.ErrorIs(t, stat.Err(), tc.err) + } else { + require.NoError(t, err) + var resp types.QueryGetBurningsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NotNil(t, resp.Burnings) + require.Equal(t, + nullify.Fill(&tc.obj), + nullify.Fill(&resp.Burnings), + ) + } + }) + } +} + +func TestListBurnings(t *testing.T) { + net, objs := networkWithBurningsObjects(t, 5) + + ctx := net.Validators[0].ClientCtx + request := func(next []byte, offset, limit uint64, total bool) []string { + args := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + if next == nil { + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) + } else { + args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) + } + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) + if total { + args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) + } + return args + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(objs); i += step { + args := request(nil, uint64(i), uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListBurnings(), args) + require.NoError(t, err) + var resp types.QueryAllBurningsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Burnings), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Burnings), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(objs); i += step { + args := request(next, 0, uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListBurnings(), args) + require.NoError(t, err) + var resp types.QueryAllBurningsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Burnings), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Burnings), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + args := request(nil, 0, uint64(len(objs)), true) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListBurnings(), args) + require.NoError(t, err) + var resp types.QueryAllBurningsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NoError(t, err) + require.Equal(t, len(objs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(objs), + nullify.Fill(resp.Burnings), + ) + }) +} diff --git a/x/market/client/cli/query_drop.go b/x/market/client/cli/query_drop.go new file mode 100644 index 00000000..8b2390a0 --- /dev/null +++ b/x/market/client/cli/query_drop.go @@ -0,0 +1,200 @@ +package cli + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdListDrop() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-drop", + Short: "list all drop", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryAllDropRequest{ + Pagination: pageReq, + } + + res, err := queryClient.DropAll(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowDrop() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-drop [uid]", + Short: "shows a drop", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argUid, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + params := &types.QueryDropRequest{ + Uid: argUid, + } + + res, err := queryClient.Drop(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowDropPairs() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-drop-pairs [address]", + Short: "show pairs owner has drops", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argAddr := args[0] + + params := &types.QueryDropPairsRequest{ + Address: argAddr, + } + + res, err := queryClient.DropPairs(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdDropOwnerPair() *cobra.Command { + cmd := &cobra.Command{ + Use: "drop-owner-pair [address] [pair]", + Short: "shows all drops owned for pair", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + argAddress, err := cast.ToStringE(args[0]) + if err != nil { + return err + } + + argPair, err := cast.ToStringE(args[1]) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryDropOwnerPairRequest{ + Address: argAddress, + Pair: argPair, + Pagination: pageReq, + } + + res, err := queryClient.DropOwnerPair(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdDropCoin() *cobra.Command { + cmd := &cobra.Command{ + Use: "drop-coin [denomA] [denomB] [amountA]", + Short: "calculate drops and amountB for given denomA and amountA", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + argDenomA, err := cast.ToStringE(args[0]) + if err != nil { + return err + } + + argDenomB, err := cast.ToStringE(args[1]) + if err != nil { + return err + } + + argAmountA, err := cast.ToStringE(args[2]) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryDropCoinRequest{ + DenomA: argDenomA, + DenomB: argDenomB, + AmountA: argAmountA, + } + + res, err := queryClient.DropCoin(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_drop_test.go b/x/market/client/cli/query_drop_test.go new file mode 100644 index 00000000..e0d8b8e0 --- /dev/null +++ b/x/market/client/cli/query_drop_test.go @@ -0,0 +1,173 @@ +package cli_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/cosmos/cosmos-sdk/client/flags" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + tmcli "github.com/tendermint/tendermint/libs/cli" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "market/testutil/network" + "market/testutil/nullify" + "market/x/market/client/cli" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func networkWithDropObjects(t *testing.T, n int) (*network.Network, []types.Drop) { + t.Helper() + cfg := network.DefaultConfig() + state := types.GenesisState{} + require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state)) + + for i := 0; i < n; i++ { + drop := types.Drop{ + Uid: uint64(i), + Owner: strconv.Itoa(i), + Pair: strconv.Itoa(i), + Drops: sdk.NewIntFromUint64(uint64(i)), + Product: sdk.NewIntFromUint64(uint64(i)), + } + nullify.Fill(&drop) + state.DropList = append(state.DropList, drop) + } + buf, err := cfg.Codec.MarshalJSON(&state) + require.NoError(t, err) + cfg.GenesisState[types.ModuleName] = buf + return network.New(t, cfg), state.DropList +} + +func TestShowDrop(t *testing.T) { + net, objs := networkWithDropObjects(t, 2) + + ctx := net.Validators[0].ClientCtx + common := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + for _, tc := range []struct { + desc string + idUid uint64 + idOwner string + idPair string + + args []string + err error + obj types.Drop + }{ + { + desc: "found", + idUid: objs[0].Uid, + idOwner: objs[0].Owner, + idPair: objs[0].Pair, + + args: common, + obj: objs[0], + }, + { + desc: "not found", + idUid: 100000, + idOwner: strconv.Itoa(100000), + idPair: strconv.Itoa(100000), + + args: common, + err: status.Error(codes.InvalidArgument, "not found"), + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + args := []string{ + strconv.Itoa(int(tc.idUid)), + } + args = append(args, tc.args...) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowDrop(), args) + if tc.err != nil { + stat, ok := status.FromError(tc.err) + require.True(t, ok) + require.ErrorIs(t, stat.Err(), tc.err) + } else { + require.NoError(t, err) + var resp types.QueryDropResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NotNil(t, resp.Drop) + require.Equal(t, + nullify.Fill(&tc.obj), + nullify.Fill(&resp.Drop), + ) + } + }) + } +} + +func TestListDrop(t *testing.T) { + net, objs := networkWithDropObjects(t, 5) + + ctx := net.Validators[0].ClientCtx + request := func(next []byte, offset, limit uint64, total bool) []string { + args := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + if next == nil { + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) + } else { + args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) + } + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) + if total { + args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) + } + return args + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(objs); i += step { + args := request(nil, uint64(i), uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListDrop(), args) + require.NoError(t, err) + var resp types.QueryDropsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Drops), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Drops), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(objs); i += step { + args := request(next, 0, uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListDrop(), args) + require.NoError(t, err) + var resp types.QueryDropsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Drops), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Drops), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + args := request(nil, 0, uint64(len(objs)), true) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListDrop(), args) + require.NoError(t, err) + var resp types.QueryDropsResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NoError(t, err) + require.Equal(t, len(objs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(objs), + nullify.Fill(resp.Drops), + ) + }) +} diff --git a/x/market/client/cli/query_member.go b/x/market/client/cli/query_member.go new file mode 100644 index 00000000..c5d5fd8c --- /dev/null +++ b/x/market/client/cli/query_member.go @@ -0,0 +1,76 @@ +package cli + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdListMember() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-member", + Short: "list all member", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryAllMemberRequest{ + Pagination: pageReq, + } + + res, err := queryClient.MemberAll(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowMember() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-member [denom-a] [denom-b]", + Short: "shows a member", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argDenomA := args[0] + argDenomB := args[1] + + params := &types.QueryGetMemberRequest{ + DenomA: argDenomA, + DenomB: argDenomB, + } + + res, err := queryClient.Member(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_member_test.go b/x/market/client/cli/query_member_test.go new file mode 100644 index 00000000..1e32b787 --- /dev/null +++ b/x/market/client/cli/query_member_test.go @@ -0,0 +1,171 @@ +package cli_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/cosmos/cosmos-sdk/client/flags" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + tmcli "github.com/tendermint/tendermint/libs/cli" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "market/testutil/network" + "market/testutil/nullify" + "market/x/market/client/cli" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func networkWithMemberObjects(t *testing.T, n int) (*network.Network, []types.Member) { + t.Helper() + cfg := network.DefaultConfig() + state := types.GenesisState{} + require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state)) + + for i := 0; i < n; i++ { + member := types.Member{ + Pair: strconv.Itoa(i), + DenomA: strconv.Itoa(i), + DenomB: strconv.Itoa(i), + Balance: sdk.NewIntFromUint64(uint64(i)), + Previous: sdk.NewIntFromUint64(uint64(i)), + } + nullify.Fill(&member) + state.MemberList = append(state.MemberList, member) + } + buf, err := cfg.Codec.MarshalJSON(&state) + require.NoError(t, err) + cfg.GenesisState[types.ModuleName] = buf + return network.New(t, cfg), state.MemberList +} + +func TestShowMember(t *testing.T) { + net, objs := networkWithMemberObjects(t, 2) + + ctx := net.Validators[0].ClientCtx + common := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + for _, tc := range []struct { + desc string + idDenomA string + idDenomB string + + args []string + err error + obj types.Member + }{ + { + desc: "found", + idDenomA: objs[0].DenomA, + idDenomB: objs[0].DenomB, + + args: common, + obj: objs[0], + }, + { + desc: "not found", + idDenomA: strconv.Itoa(100000), + idDenomB: strconv.Itoa(100000), + + args: common, + err: status.Error(codes.InvalidArgument, "not found"), + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + args := []string{ + tc.idDenomA, + tc.idDenomB, + } + args = append(args, tc.args...) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowMember(), args) + if tc.err != nil { + stat, ok := status.FromError(tc.err) + require.True(t, ok) + require.ErrorIs(t, stat.Err(), tc.err) + } else { + require.NoError(t, err) + var resp types.QueryGetMemberResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NotNil(t, resp.Member) + require.Equal(t, + nullify.Fill(&tc.obj), + nullify.Fill(&resp.Member), + ) + } + }) + } +} + +func TestListMember(t *testing.T) { + net, objs := networkWithMemberObjects(t, 5) + + ctx := net.Validators[0].ClientCtx + request := func(next []byte, offset, limit uint64, total bool) []string { + args := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + if next == nil { + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) + } else { + args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) + } + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) + if total { + args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) + } + return args + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(objs); i += step { + args := request(nil, uint64(i), uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListMember(), args) + require.NoError(t, err) + var resp types.QueryAllMemberResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Member), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Member), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(objs); i += step { + args := request(next, 0, uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListMember(), args) + require.NoError(t, err) + var resp types.QueryAllMemberResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Member), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Member), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + args := request(nil, 0, uint64(len(objs)), true) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListMember(), args) + require.NoError(t, err) + var resp types.QueryAllMemberResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NoError(t, err) + require.Equal(t, len(objs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(objs), + nullify.Fill(resp.Member), + ) + }) +} diff --git a/x/market/client/cli/query_order.go b/x/market/client/cli/query_order.go new file mode 100644 index 00000000..93ed0ee1 --- /dev/null +++ b/x/market/client/cli/query_order.go @@ -0,0 +1,158 @@ +package cli + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdListOrder() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-order", + Short: "list all order", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryAllOrderRequest{ + Pagination: pageReq, + } + + res, err := queryClient.OrderAll(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowOrder() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-order [uid]", + Short: "shows a order", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argUid, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + params := &types.QueryOrderRequest{ + Uid: argUid, + } + + res, err := queryClient.Order(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdOrderOwner() *cobra.Command { + cmd := &cobra.Command{ + Use: "order-owner [address]", + Short: "shows all orders from owner", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + argOwner, err := cast.ToStringE(args[0]) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryOrderOwnerRequest{ + Address: argOwner, + Pagination: pageReq, + } + + res, err := queryClient.OrderOwner(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdOrderOwnerUids() *cobra.Command { + cmd := &cobra.Command{ + Use: "order-owner-uids [address]", + Short: "shows all order uids from owner", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + argOwner, err := cast.ToStringE(args[0]) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryOrderOwnerRequest{ + Address: argOwner, + Pagination: pageReq, + } + + res, err := queryClient.OrderOwnerUids(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_order_test.go b/x/market/client/cli/query_order_test.go new file mode 100644 index 00000000..2c3359dd --- /dev/null +++ b/x/market/client/cli/query_order_test.go @@ -0,0 +1,195 @@ +package cli_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + + "market/testutil/network" + "market/testutil/nullify" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func networkWithOrderObjects(t *testing.T, n int) (*network.Network, []types.Order) { + t.Helper() + cfg := network.DefaultConfig() + state := types.GenesisState{} + require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state)) + + for i := 0; i < n; i++ { + order := types.Order{ + Uid: uint64(i), + Owner: strconv.Itoa(i), + Status: "active", + OrderType: strconv.Itoa(i), + DenomAsk: strconv.Itoa(i), + DenomBid: strconv.Itoa(i), + Amount: sdk.NewInt(int64(i)), + Rate: []sdk.Int{sdk.NewInt(1), sdk.NewInt(2)}, + } + nullify.Fill(&order) + state.OrderList = append(state.OrderList, order) + } + buf, err := cfg.Codec.MarshalJSON(&state) + require.NoError(t, err) + cfg.GenesisState[types.ModuleName] = buf + return network.New(t, cfg), state.OrderList +} + +/* +func TestShowOrder(t *testing.T) { + net, objs := networkWithOrderObjects(t, 2) + + ctx := net.Validators[0].ClientCtx + common := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + for _, tc := range []struct { + desc string + idUid uint64 + idOwner string + idStatus string + idOrderType string + idDenomAsk string + idDenomBid string + idAmount sdk.Int + idRate []sdk.Int + idPrev uint64 + idNext uint64 + idBegTime int64 + idUpdTime int64 + + args []string + err error + obj types.Order + }{ + { + desc: "found", + idUid: objs[0].Uid, + idOwner: objs[0].Owner, + idStatus: objs[0].Status, + idOrderType: objs[0].OrderType, + idDenomAsk: objs[0].DenomAsk, + idDenomBid: objs[0].DenomBid, + idAmount: objs[0].Amount, + idRate: objs[0].Rate, + idPrev: objs[0].Prev, + idNext: objs[0].Next, + idBegTime: objs[0].BegTime, + idUpdTime: objs[0].UpdTime, + + args: common, + obj: objs[0], + }, + { + desc: "not found", + idUid: 100000, + idOwner: strconv.Itoa(100000), + idStatus: "active", + idOrderType: strconv.Itoa(100000), + idDenomAsk: strconv.Itoa(100000), + idDenomBid: strconv.Itoa(100000), + idAmount: sdk.NewInt(int64(100000)), + idRate: []sdk.Int{sdk.NewInt(100000), sdk.NewInt(100000)}, + + args: common, + err: status.Error(codes.InvalidArgument, "not found"), + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + args := []string{ + strconv.Itoa(int(tc.idUid)), + } + args = append(args, tc.args...) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowOrder(), args) + if tc.err != nil { + stat, ok := status.FromError(tc.err) + require.True(t, ok) + require.ErrorIs(t, stat.Err(), tc.err) + } else { + require.NoError(t, err) + var resp types.QueryOrderResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NotNil(t, resp.Order) + require.Equal(t, + nullify.Fill(&tc.obj), + nullify.Fill(&resp.Order), + ) + } + }) + } +} + +func TestListOrder(t *testing.T) { + net, objs := networkWithOrderObjects(t, 5) + + ctx := net.Validators[0].ClientCtx + request := func(next []byte, offset, limit uint64, total bool) []string { + args := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + if next == nil { + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) + } else { + args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) + } + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) + if total { + args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) + } + return args + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(objs); i += step { + args := request(nil, uint64(i), uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListOrder(), args) + require.NoError(t, err) + var resp types.QueryOrdersResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Orders), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Orders), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(objs); i += step { + args := request(next, 0, uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListOrder(), args) + require.NoError(t, err) + var resp types.QueryOrdersResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Orders), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Orders), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + args := request(nil, 0, uint64(len(objs)), true) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListOrder(), args) + require.NoError(t, err) + var resp types.QueryOrdersResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NoError(t, err) + require.Equal(t, len(objs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(objs), + nullify.Fill(resp.Orders), + ) + }) +} +*/ diff --git a/x/market/client/cli/query_params.go b/x/market/client/cli/query_params.go index 1711d7eb..382a2516 100644 --- a/x/market/client/cli/query_params.go +++ b/x/market/client/cli/query_params.go @@ -3,10 +3,11 @@ package cli import ( "context" + "market/x/market/types" + "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/spf13/cobra" - "market/x/market/types" ) func CmdQueryParams() *cobra.Command { diff --git a/x/market/client/cli/query_pool.go b/x/market/client/cli/query_pool.go new file mode 100644 index 00000000..e3daafe0 --- /dev/null +++ b/x/market/client/cli/query_pool.go @@ -0,0 +1,113 @@ +package cli + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdListPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-pool", + Short: "list all pool", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QueryAllPoolRequest{ + Pagination: pageReq, + } + + res, err := queryClient.PoolAll(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "pool [pair]", + Short: "shows a pool", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argPair := args[0] + + params := &types.QueryGetPoolRequest{ + Pair: argPair, + } + + res, err := queryClient.Pool(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdHistory() *cobra.Command { + cmd := &cobra.Command{ + Use: "history [pair] [length]", + Short: "pool trade history", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + argPair := args[0] + argLength := args[1] + + params := &types.QueryHistoryRequest{ + Pair: argPair, + Length: argLength, + Pagination: pageReq, + } + + res, err := queryClient.History(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/query_pool_test.go b/x/market/client/cli/query_pool_test.go new file mode 100644 index 00000000..275c4cc8 --- /dev/null +++ b/x/market/client/cli/query_pool_test.go @@ -0,0 +1,186 @@ +package cli_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/cosmos/cosmos-sdk/client/flags" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + tmcli "github.com/tendermint/tendermint/libs/cli" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "market/testutil/network" + "market/testutil/nullify" + "market/x/market/client/cli" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func networkWithPoolObjects(t *testing.T, n int) (*network.Network, []types.Pool) { + t.Helper() + cfg := network.DefaultConfig() + state := types.GenesisState{} + require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state)) + + for i := 0; i < n; i++ { + pool := types.Pool{ + Pair: strconv.Itoa(i), + Denom1: strconv.Itoa(i), + Denom2: strconv.Itoa(i), + Leaders: []*types.Leader{ + { + Address: strconv.Itoa(i), + Drops: sdk.NewIntFromUint64(uint64(i)), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(i)), + } + nullify.Fill(&pool) + state.PoolList = append(state.PoolList, pool) + } + buf, err := cfg.Codec.MarshalJSON(&state) + require.NoError(t, err) + cfg.GenesisState[types.ModuleName] = buf + return network.New(t, cfg), state.PoolList +} + +func TestShowPool(t *testing.T) { + net, objs := networkWithPoolObjects(t, 2) + + ctx := net.Validators[0].ClientCtx + common := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + for _, tc := range []struct { + desc string + idPair string + idDenom1 string + idDenom2 string + idLeaders []*types.Leader + idDrops sdk.Int + args []string + err error + obj types.Pool + }{ + { + desc: "found", + idPair: objs[0].Pair, + idDenom1: objs[0].Denom1, + idDenom2: objs[0].Denom2, + idLeaders: objs[0].Leaders, + idDrops: objs[0].Drops, + args: common, + obj: objs[0], + }, + { + desc: "not found", + idPair: strconv.Itoa(100000), + idDenom1: strconv.Itoa(100000), + idDenom2: strconv.Itoa(100000), + idLeaders: []*types.Leader{ + { + Address: strconv.Itoa(100000), + Drops: sdk.NewInt(100000), + }, + }, + idDrops: sdk.NewInt(100000), + args: common, + err: status.Error(codes.InvalidArgument, "not found"), + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + args := []string{ + tc.idPair, + } + args = append(args, tc.args...) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdPool(), args) + if tc.err != nil { + stat, ok := status.FromError(tc.err) + require.True(t, ok) + require.ErrorIs(t, stat.Err(), tc.err) + } else { + require.NoError(t, err) + var resp types.QueryGetPoolResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NotNil(t, resp.Pool) + require.Equal(t, + nullify.Fill(&tc.obj), + nullify.Fill(&resp.Pool), + ) + } + }) + } +} + +func TestListPool(t *testing.T) { + net, objs := networkWithPoolObjects(t, 5) + + ctx := net.Validators[0].ClientCtx + request := func(next []byte, offset, limit uint64, total bool) []string { + args := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + if next == nil { + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) + } else { + args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) + } + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) + if total { + args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) + } + return args + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(objs); i += step { + args := request(nil, uint64(i), uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListPool(), args) + require.NoError(t, err) + var resp types.QueryAllPoolResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Pool), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Pool), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(objs); i += step { + args := request(next, 0, uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListPool(), args) + require.NoError(t, err) + var resp types.QueryAllPoolResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Pool), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Pool), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + args := request(nil, 0, uint64(len(objs)), true) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListPool(), args) + require.NoError(t, err) + var resp types.QueryAllPoolResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NoError(t, err) + require.Equal(t, len(objs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(objs), + nullify.Fill(resp.Pool), + ) + }) +} diff --git a/x/market/client/cli/tx.go b/x/market/client/cli/tx.go index 16876f9d..2ae9505a 100644 --- a/x/market/client/cli/tx.go +++ b/x/market/client/cli/tx.go @@ -30,6 +30,12 @@ func GetTxCmd() *cobra.Command { RunE: client.ValidateCmd, } + cmd.AddCommand(CmdCreatePool()) + cmd.AddCommand(CmdCreateDrop()) + cmd.AddCommand(CmdRedeemDrop()) + cmd.AddCommand(CmdCreateOrder()) + cmd.AddCommand(CmdCancelOrder()) + cmd.AddCommand(CmdMarketOrder()) // this line is used by starport scaffolding # 1 return cmd diff --git a/x/market/client/cli/tx_cancel_order.go b/x/market/client/cli/tx_cancel_order.go new file mode 100644 index 00000000..c62384ba --- /dev/null +++ b/x/market/client/cli/tx_cancel_order.go @@ -0,0 +1,43 @@ +package cli + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCancelOrder() *cobra.Command { + cmd := &cobra.Command{ + Use: "cancel-order [uid]", + Short: "Broadcast message cancel-order", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argUid := args[0] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgCancelOrder( + clientCtx.GetFromAddress().String(), + argUid, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/tx_create_drop.go b/x/market/client/cli/tx_create_drop.go new file mode 100644 index 00000000..591971c9 --- /dev/null +++ b/x/market/client/cli/tx_create_drop.go @@ -0,0 +1,45 @@ +package cli + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCreateDrop() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-drop [pair] [drops]", + Short: "Broadcast message create-drop", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argPair := args[0] + argDrops := args[1] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgCreateDrop( + clientCtx.GetFromAddress().String(), + argPair, + argDrops, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/tx_create_order.go b/x/market/client/cli/tx_create_order.go new file mode 100644 index 00000000..aa5c1906 --- /dev/null +++ b/x/market/client/cli/tx_create_order.go @@ -0,0 +1,57 @@ +package cli + +import ( + "strconv" + + "strings" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCreateOrder() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-order [denom-ask] [denom-bid] [order-type] [amount] [rate] [prev] [next]", + Short: "Broadcast message create-order", + Args: cobra.ExactArgs(7), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argDenomAsk := args[0] + argDenomBid := args[1] + argOrderType := args[2] + argAmount := args[3] + argRate := strings.Split(args[4], listSeparator) + argPrev := args[5] + argNext := args[6] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgCreateOrder( + clientCtx.GetFromAddress().String(), + argDenomAsk, + argDenomBid, + argOrderType, + argAmount, + argRate, + argPrev, + argNext, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/tx_create_pool.go b/x/market/client/cli/tx_create_pool.go new file mode 100644 index 00000000..8a4b36c9 --- /dev/null +++ b/x/market/client/cli/tx_create_pool.go @@ -0,0 +1,45 @@ +package cli + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCreatePool() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-pool [coin-a] [coin-b]", + Short: "Broadcast message create-pool", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argCoinA := args[0] + argCoinB := args[1] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgCreatePool( + clientCtx.GetFromAddress().String(), + argCoinA, + argCoinB, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/tx_market_order.go b/x/market/client/cli/tx_market_order.go new file mode 100644 index 00000000..4c5af02a --- /dev/null +++ b/x/market/client/cli/tx_market_order.go @@ -0,0 +1,51 @@ +package cli + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdMarketOrder() *cobra.Command { + cmd := &cobra.Command{ + Use: "market-order [denom-ask] [amount-ask] [denom-bid] [amount-bid] [slippage]", + Short: "Broadcast message market-order", + Args: cobra.ExactArgs(5), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argDenomAsk := args[0] + argAmountAsk := args[1] + argDenomBid := args[2] + argAmountBid := args[3] + argSlippage := args[4] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgMarketOrder( + clientCtx.GetFromAddress().String(), + argDenomAsk, + argAmountAsk, + argDenomBid, + argAmountBid, + argSlippage, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/client/cli/tx_redeem_drop.go b/x/market/client/cli/tx_redeem_drop.go new file mode 100644 index 00000000..9d460176 --- /dev/null +++ b/x/market/client/cli/tx_redeem_drop.go @@ -0,0 +1,43 @@ +package cli + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdRedeemDrop() *cobra.Command { + cmd := &cobra.Command{ + Use: "redeem-drop [uid]", + Short: "Broadcast message redeem-drop", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argUid := args[0] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgRedeemDrop( + clientCtx.GetFromAddress().String(), + argUid, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/market/genesis.go b/x/market/genesis.go index 0928f8b2..a0eb2ede 100644 --- a/x/market/genesis.go +++ b/x/market/genesis.go @@ -1,14 +1,35 @@ package market import ( - sdk "github.com/cosmos/cosmos-sdk/types" "market/x/market/keeper" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" ) // InitGenesis initializes the capability module's state from a provided genesis // state. func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + // Set all the pool + for _, elem := range genState.PoolList { + k.SetPool(ctx, elem) + } + // Set all the drop + for _, elem := range genState.DropList { + k.SetDrop(ctx, elem) + } + // Set all the member + for _, elem := range genState.MemberList { + k.SetMember(ctx, elem) + } + // Set all the burnings + for _, elem := range genState.BurningsList { + k.SetBurnings(ctx, elem) + } + // Set all the order + for _, elem := range genState.OrderList { + k.SetOrder(ctx, elem) + } // this line is used by starport scaffolding # genesis/module/init k.SetParams(ctx, genState.Params) } @@ -18,6 +39,11 @@ func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { genesis := types.DefaultGenesis() genesis.Params = k.GetParams(ctx) + genesis.PoolList = k.GetAllPool(ctx) + genesis.DropList = k.GetAllDrop(ctx) + genesis.MemberList = k.GetAllMember(ctx) + genesis.BurningsList = k.GetAllBurnings(ctx) + genesis.OrderList = k.GetAllOrder(ctx) // this line is used by starport scaffolding # genesis/module/export return genesis diff --git a/x/market/genesis_test.go b/x/market/genesis_test.go index f0765728..3676df37 100644 --- a/x/market/genesis_test.go +++ b/x/market/genesis_test.go @@ -3,27 +3,133 @@ package market_test import ( "testing" - "github.com/stretchr/testify/require" keepertest "market/testutil/keeper" "market/testutil/nullify" "market/x/market" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" ) func TestGenesis(t *testing.T) { genesisState := types.GenesisState{ Params: types.DefaultParams(), - + PoolList: []types.Pool{ + { + Pair: "0", + Denom1: "0", + Denom2: "0", + Leaders: []*types.Leader{ + { + Address: "0", + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + { + Pair: "1", + Denom1: "1", + Denom2: "1", + Leaders: []*types.Leader{ + { + Address: "1", + Drops: sdk.NewIntFromUint64(uint64(1)), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(1)), + }, + }, + DropList: []types.Drop{ + { + Uid: 0, + Owner: "0", + Pair: "0", + Drops: sdk.NewIntFromUint64(uint64(0)), + Product: sdk.NewIntFromUint64(uint64(0)), + Active: true, + }, + { + Uid: 1, + Owner: "1", + Pair: "1", + Drops: sdk.NewIntFromUint64(uint64(1)), + Product: sdk.NewIntFromUint64(uint64(0)), + Active: true, + }, + }, + MemberList: []types.Member{ + { + Pair: "0", + DenomA: "0", + DenomB: "0", + Balance: sdk.NewIntFromUint64(uint64(0)), + Previous: sdk.NewIntFromUint64(uint64(0)), + Limit: uint64(0), + Stop: uint64(0), + }, + { + Pair: "1", + DenomA: "1", + DenomB: "1", + Balance: sdk.NewIntFromUint64(uint64(1)), + Previous: sdk.NewIntFromUint64(uint64(1)), + Limit: uint64(1), + Stop: uint64(1), + }, + }, + BurningsList: []types.Burnings{ + { + Denom: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + }, + { + Denom: "1", + Amount: sdk.NewIntFromUint64(uint64(1)), + }, + }, + OrderList: []types.Order{ + { + Uid: 0, + Owner: "0", + Status: "active", + OrderType: "0", + DenomAsk: "0", + DenomBid: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + Rate: []sdk.Int{sdk.NewInt(int64(0)), sdk.NewInt(int64(0))}, + Prev: uint64(0), + Next: uint64(0), + }, + { + Uid: 1, + Owner: "1", + Status: "active", + OrderType: "1", + DenomAsk: "1", + DenomBid: "1", + Amount: sdk.NewIntFromUint64(uint64(1)), + Rate: []sdk.Int{sdk.NewInt(int64(1)), sdk.NewInt(int64(1))}, + Prev: uint64(1), + Next: uint64(1), + }, + }, // this line is used by starport scaffolding # genesis/test/state } - k, ctx := keepertest.MarketKeeper(t) - market.InitGenesis(ctx, *k, genesisState) - got := market.ExportGenesis(ctx, *k) + k := keepertest.CreateTestEnvironment(t) + market.InitGenesis(k.Context, *k.MarketKeeper, genesisState) + got := market.ExportGenesis(k.Context, *k.MarketKeeper) require.NotNil(t, got) nullify.Fill(&genesisState) nullify.Fill(got) + require.ElementsMatch(t, genesisState.PoolList, got.PoolList) + require.ElementsMatch(t, genesisState.DropList, got.DropList) + require.ElementsMatch(t, genesisState.MemberList, got.MemberList) + require.ElementsMatch(t, genesisState.BurningsList, got.BurningsList) + require.ElementsMatch(t, genesisState.OrderList, got.OrderList) // this line is used by starport scaffolding # genesis/test/assert } diff --git a/x/market/handler.go b/x/market/handler.go index b32fbcc8..d22d60a4 100644 --- a/x/market/handler.go +++ b/x/market/handler.go @@ -3,21 +3,40 @@ package market import ( "fmt" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "market/x/market/keeper" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) // NewHandler ... func NewHandler(k keeper.Keeper) sdk.Handler { - // this line is used by starport scaffolding # handler/msgServer + msgServer := keeper.NewMsgServerImpl(k) return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { ctx = ctx.WithEventManager(sdk.NewEventManager()) switch msg := msg.(type) { - // this line is used by starport scaffolding # 1 + case *types.MsgCreatePool: + res, err := msgServer.CreatePool(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + case *types.MsgCreateDrop: + res, err := msgServer.CreateDrop(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + case *types.MsgRedeemDrop: + res, err := msgServer.RedeemDrop(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + case *types.MsgCreateOrder: + res, err := msgServer.CreateOrder(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + case *types.MsgCancelOrder: + res, err := msgServer.CancelOrder(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + case *types.MsgMarketOrder: + res, err := msgServer.MarketOrder(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + // this line is used by starport scaffolding # 1 default: errMsg := fmt.Sprintf("unrecognized %s message type: %T", types.ModuleName, msg) return nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, errMsg) diff --git a/x/market/keeper/burnings.go b/x/market/keeper/burnings.go new file mode 100644 index 00000000..493c12f0 --- /dev/null +++ b/x/market/keeper/burnings.go @@ -0,0 +1,105 @@ +package keeper + +import ( + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetBurnings set a specific burnings in the store from its index +func (k Keeper) SetBurnings(ctx sdk.Context, burnings types.Burnings) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.BurningsKeyPrefix)) + b := k.cdc.MustMarshal(&burnings) + store.Set(types.BurningsKey( + burnings.Denom, + ), b) +} + +// GetBurnings returns a burnings from its index +func (k Keeper) GetBurnings( + ctx sdk.Context, + denom string, + +) (val types.Burnings, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.BurningsKeyPrefix)) + + b := store.Get(types.BurningsKey( + denom, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemoveBurnings removes a burnings from the store +func (k Keeper) RemoveBurnings( + ctx sdk.Context, + denom string, + +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.BurningsKeyPrefix)) + store.Delete(types.BurningsKey( + denom, + )) +} + +// GetAllBurnings returns all burnings +func (k Keeper) GetAllBurnings(ctx sdk.Context) (list []types.Burnings) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.BurningsKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Burnings + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetBurned get the amount of NOM Burned by ONEX +func (k Keeper) GetBurned(ctx sdk.Context) (burned types.Burned) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.KeyPrefix(types.BurnedKey) + a := store.Get(byteKey) + + // Burned doesn't exist return zero int + if a == nil { + return types.Burned{ + Amount: sdk.ZeroInt(), + } + } + + k.cdc.MustUnmarshal(a, &burned) + + return +} + +// SetBurned set the amount of NOM Burned by ONEX +func (k Keeper) AddBurned(ctx sdk.Context, amount sdk.Int) (burned types.Burned) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.KeyPrefix(types.BurnedKey) + + a := store.Get(byteKey) + + // Burned doesn't exist then initialize with amount + if a == nil { + burned = types.Burned{ + Amount: amount, + } + } else { + k.cdc.MustUnmarshal(a, &burned) + burned.Amount = burned.Amount.Add(amount) + } + + b := k.cdc.MustMarshal(&burned) + store.Set(byteKey, b) + + return +} diff --git a/x/market/keeper/burnings_test.go b/x/market/keeper/burnings_test.go new file mode 100644 index 00000000..1b423d73 --- /dev/null +++ b/x/market/keeper/burnings_test.go @@ -0,0 +1,64 @@ +package keeper_test + +import ( + "strconv" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func createNBurnings(keeper *keeper.Keeper, ctx sdk.Context, n int) []types.Burnings { + items := make([]types.Burnings, n) + for i := range items { + items[i].Denom = strconv.Itoa(i) + items[i].Amount = sdk.NewIntFromUint64(uint64(0)) + keeper.SetBurnings(ctx, items[i]) + } + return items +} + +func TestBurningsGet(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNBurnings(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + rst, found := keeper.MarketKeeper.GetBurnings(keeper.Context, + item.Denom, + ) + require.True(t, found) + require.Equal(t, + nullify.Fill(&item), + nullify.Fill(&rst), + ) + } +} +func TestBurningsRemove(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNBurnings(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + keeper.MarketKeeper.RemoveBurnings(keeper.Context, + item.Denom, + ) + _, found := keeper.MarketKeeper.GetBurnings(keeper.Context, + item.Denom, + ) + require.False(t, found) + } +} + +func TestBurningsGetAll(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNBurnings(keeper.MarketKeeper, keeper.Context, 10) + require.ElementsMatch(t, + nullify.Fill(items), + nullify.Fill(keeper.MarketKeeper.GetAllBurnings(keeper.Context)), + ) +} diff --git a/x/market/keeper/drop.go b/x/market/keeper/drop.go new file mode 100644 index 00000000..0bfcdcd2 --- /dev/null +++ b/x/market/keeper/drop.go @@ -0,0 +1,548 @@ +package keeper + +import ( + "math/big" + "sort" + "strings" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// SetDrop set a specific drop in the store from its index +func (k Keeper) SetDrop(ctx sdk.Context, drop types.Drop) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + a := k.cdc.MustMarshal(&drop) + store.Set(types.DropKey( + drop.Uid, + ), a) +} + +// GetDrop returns a drop from its index +func (k Keeper) GetDrop( + ctx sdk.Context, + uid uint64, +) (val types.Drop, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + + b := store.Get(types.DropKey( + uid, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// GetDrop returns a drop from its index +func (k Keeper) GetDropPairs( + ctx sdk.Context, + address string, +) (val types.DropPairs, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropPairsKeyPrefix)) + + a := store.Get(types.DropPairsKey( + address, + )) + if a == nil { + return val, false + } + + k.cdc.MustUnmarshal(a, &val) + return val, true +} + +// GetOwnerDrops returns drops from a single owner +func (k Keeper) GetDropsOwnerPairDetail( + ctx sdk.Context, + owner string, + pair string, +) (list []types.Drop) { + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropsKeyPrefix)) + + b := store1.Get(types.DropsKey( + owner, + pair, + )) + if b == nil { + return list + } + + store2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + + var drops types.Drops + var drop types.Drop + + k.cdc.MustUnmarshal(b, &drops) + + for _, uid := range drops.Uids { + + b := store2.Get(types.DropKey( + uid, + )) + + if b != nil { + k.cdc.MustUnmarshal(b, &drop) + list = append(list, drop) + } + } + + return +} + +// GetOwnerDrops returns drops from a single owner +func (k Keeper) GetDropsOwnerPair( + ctx sdk.Context, + owner string, + pair string, +) (drops types.Drops, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropsKeyPrefix)) + + b := store.Get(types.DropsKey( + owner, + pair, + )) + if b == nil { + return drops, false + } + + k.cdc.MustUnmarshal(b, &drops) + + return drops, true +} + +// RemoveDrop removes a drop from the store +func (k Keeper) RemoveDrop( + ctx sdk.Context, + uid uint64, +) { + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + + b := store1.Get(types.DropKey( + uid, + )) + + if b == nil { + return + } + + store1.Delete(types.DropKey( + uid, + )) +} + +// SetDrop set a specific drop in the store from its index +func (k Keeper) SetDropOwner( + ctx sdk.Context, + drop types.Drop, +) { + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropsKeyPrefix)) + + var drops types.Drops + + a := store1.Get(types.DropsKey( + drop.Owner, + drop.Pair, + )) + if a == nil { + drops.Sum = drop.Drops + drops.Uids = []uint64{drop.Uid} + } else { + k.cdc.MustUnmarshal(a, &drops) + + uids, _ := addUid(drops.Uids, drop.Uid) + + drops = types.Drops{ + Uids: uids, + Sum: drops.Sum.Add(drop.Drops), + } + } + + b := k.cdc.MustMarshal(&drops) + + store1.Set(types.DropsKey( + drop.Owner, + drop.Pair, + ), b) + + // Add drop pair to owner + store2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropPairsKeyPrefix)) + + var dropPairs types.DropPairs + + c := store2.Get(types.DropPairsKey( + drop.Owner, + )) + + if c == nil { + dropPairs.Pairs = []string{drop.Pair} + } else { + k.cdc.MustUnmarshal(c, &dropPairs) + dropPairs.Pairs, _ = addPair(dropPairs.Pairs, drop.Pair) + } + + d := k.cdc.MustMarshal(&dropPairs) + + store2.Set(types.DropPairsKey( + drop.Owner, + ), d) +} + +// RemoveDrop removes a drop from the store +func (k Keeper) RemoveDropOwner( + ctx sdk.Context, + drop types.Drop, +) { + // Remove uid from owner drop list + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropsKeyPrefix)) + + var drops types.Drops + + a := store1.Get(types.DropsKey( + drop.Owner, + drop.Pair, + )) + if a == nil { + return + } + + k.cdc.MustUnmarshal(a, &drops) + + drops.Uids, _ = removeUid(drops.Uids, drop.Uid) + drops.Sum = drops.Sum.Sub(drop.Drops) + + b := k.cdc.MustMarshal(&drops) + + store1.Set(types.DropsKey( + drop.Owner, + drop.Pair, + ), b) + + if !(len(drops.Uids) > 0) { + // Remove uid from owner drop list + store2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropPairsKeyPrefix)) + + var dropPairs types.DropPairs + + c := store2.Get(types.DropPairsKey( + drop.Owner, + )) + + k.cdc.MustUnmarshal(c, &dropPairs) + + dropPairs.Pairs, _ = removePair(dropPairs.Pairs, drop.Pair) + + d := k.cdc.MustMarshal(&dropPairs) + + store2.Set(types.DropPairsKey( + drop.Owner, + ), d) + } +} + +// GetAllDrop returns all drop +func (k Keeper) GetAllDrop(ctx sdk.Context) (list []types.Drop) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Drop + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetOwnerDrops returns drops from a single owner +func (k Keeper) GetPairs( + ctx sdk.Context, + owner string, +) (pairs types.DropPairs, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropPairsKeyPrefix)) + + b := store.Get(types.DropPairsKey( + owner, + )) + if b == nil { + return pairs, false + } + + k.cdc.MustUnmarshal(b, &pairs) + + return pairs, true +} + +// GetOrderOwner returns orders from a single owner +func (k Keeper) GetDropOwnerPair( + ctx sdk.Context, + owner string, + pair string, +) (list []types.Drop, found bool) { + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropsKeyPrefix)) + + a := store1.Get(types.DropsKey( + owner, + pair, + )) + if a == nil { + return list, false + } + + var drops types.Drops + + k.cdc.MustUnmarshal(a, &drops) + + store2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + + for _, uid := range drops.Uids { + var drop types.Drop + + b := store2.Get(types.DropKey( + uid, + )) + + if b != nil { + k.cdc.MustUnmarshal(b, &drop) + list = append(list, drop) + } + } + + return list, true +} + +// GetOrderOwner returns orders from a single owner +func (k Keeper) GetDropAmounts( + ctx sdk.Context, + uid uint64, +) (denom1 string, denom2 string, amount1 sdk.Int, amount2 sdk.Int, found bool) { + dropStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DropKeyPrefix)) + + a := dropStore.Get(types.DropKey( + uid, + )) + if a == nil { + return denom1, denom2, amount1, amount2, false + } + + var drop types.Drop + k.cdc.MustUnmarshal(a, &drop) + + pair := strings.Split(drop.Pair, ",") + + denom1 = pair[0] + denom2 = pair[1] + + memberStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + + b := memberStore.Get(types.MemberKey( + denom2, + denom1, + )) + if b == nil { + return denom1, denom2, amount1, amount2, false + } + + var member1 types.Member + k.cdc.MustUnmarshal(b, &member1) + + c := memberStore.Get(types.MemberKey( + denom1, + denom2, + )) + if c == nil { + return denom1, denom2, amount1, amount2, false + } + + var member2 types.Member + k.cdc.MustUnmarshal(c, &member2) + + poolStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + + d := poolStore.Get(types.PoolKey( + drop.Pair, + )) + if d == nil { + return denom1, denom2, amount1, amount2, false + } + + var pool types.Pool + k.cdc.MustUnmarshal(d, &pool) + + amount1, amount2, error := dropAmounts(drop.Drops, pool, member1, member2) + if error != nil { + return denom1, denom2, amount1, amount2, false + } + + found = true + + return +} + +func dropAmounts(drops sdk.Int, pool types.Pool, member1 types.Member, member2 types.Member) (sdk.Int, sdk.Int, error) { + // see `msg_server_redeem_drop` for our bigint strategy + // `dropAmtMember1 = (drops * member1.Balance) / pool.Drops` + tmp := big.NewInt(0) + tmp.Mul(drops.BigInt(), member1.Balance.BigInt()) + tmp.Quo(tmp, pool.Drops.BigInt()) + dropAmtMember1 := sdk.NewIntFromBigInt(tmp) + tmp = big.NewInt(0) + + if dropAmtMember1.LTE(sdk.ZeroInt()) { + return sdk.ZeroInt(), sdk.ZeroInt(), sdkerrors.Wrapf(types.ErrAmtZero, "%s", member1.DenomB) + } + + // `dropAmtMember2 = (drops * member2.Balance) / pool.Drops` + tmp.Mul(drops.BigInt(), member2.Balance.BigInt()) + tmp.Quo(tmp, pool.Drops.BigInt()) + dropAmtMember2 := sdk.NewIntFromBigInt(tmp) + //tmp = big.NewInt(0) + + if dropAmtMember2.LTE(sdk.ZeroInt()) { + return sdk.ZeroInt(), sdk.ZeroInt(), sdkerrors.Wrapf(types.ErrAmtZero, "%s", member2.DenomB) + } + + return dropAmtMember1, dropAmtMember2, nil +} + +// GetOrderOwner returns orders from a single owner +func (k Keeper) GetDropCoin( + ctx sdk.Context, + denomA string, + denomB string, + amountA sdk.Int, +) (amountB sdk.Int, drops sdk.Int, found bool) { + + memberStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + + b := memberStore.Get(types.MemberKey( + denomB, + denomA, + )) + if b == nil { + return amountB, drops, false + } + + var member1 types.Member + k.cdc.MustUnmarshal(b, &member1) + + c := memberStore.Get(types.MemberKey( + denomA, + denomB, + )) + if c == nil { + return amountB, drops, false + } + + var member2 types.Member + k.cdc.MustUnmarshal(c, &member2) + + poolStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + + prePair := []string{denomA, denomB} + sort.Strings(prePair) + pair := strings.Join(prePair, ",") + + d := poolStore.Get(types.PoolKey(pair)) + if d == nil { + return amountB, drops, false + } + + var pool types.Pool + k.cdc.MustUnmarshal(d, &pool) + + amountB, drops, error := dropCoin(amountA, pool, member1, member2) + if error != nil { + return amountB, drops, false + } + + found = true + + return +} + +func dropCoin(amountA sdk.Int, pool types.Pool, memberA types.Member, memberB types.Member) (sdk.Int, sdk.Int, error) { + // see `msg_server_redeem_drop` for our bigint strategy + // `dropAmtMember1 = (drops * member1.Balance) / pool.Drops` + tmp := big.NewInt(0) + tmp.Mul(amountA.BigInt(), pool.Drops.BigInt()) + tmp.Quo(tmp, memberA.Balance.BigInt()) + drops := sdk.NewIntFromBigInt(tmp) + tmp2 := big.NewInt(0) + tmp2.Mul(tmp, memberB.Balance.BigInt()) + tmp2.Quo(tmp2, pool.Drops.BigInt()) + amountB := sdk.NewIntFromBigInt(tmp2) + + return amountB, drops, nil +} + +// GetOrderOwner returns orders from a single owner +func (k Keeper) GetDropsToCoins( + ctx sdk.Context, + pair string, + drops string, +) (denom1 string, denom2 string, amount1 sdk.Int, amount2 sdk.Int, found bool) { + + dropsInt, ok := sdk.NewIntFromString(drops) + if !ok { + return denom1, denom2, amount1, amount2, false + } + + pairArray := strings.Split(pair, ",") + + denom1 = pairArray[0] + denom2 = pairArray[1] + + memberStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + + b := memberStore.Get(types.MemberKey( + denom2, + denom1, + )) + if b == nil { + return denom1, denom2, amount1, amount2, false + } + + var member1 types.Member + k.cdc.MustUnmarshal(b, &member1) + + c := memberStore.Get(types.MemberKey( + denom1, + denom2, + )) + if c == nil { + return denom1, denom2, amount1, amount2, false + } + + var member2 types.Member + k.cdc.MustUnmarshal(c, &member2) + + poolStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + + d := poolStore.Get(types.PoolKey( + pair, + )) + if d == nil { + return denom1, denom2, amount1, amount2, false + } + + var pool types.Pool + k.cdc.MustUnmarshal(d, &pool) + + amount1, amount2, error := dropAmounts(dropsInt, pool, member1, member2) + if error != nil { + return denom1, denom2, amount1, amount2, false + } + + found = true + + return +} diff --git a/x/market/keeper/drop_test.go b/x/market/keeper/drop_test.go new file mode 100644 index 00000000..cd6c418e --- /dev/null +++ b/x/market/keeper/drop_test.go @@ -0,0 +1,68 @@ +package keeper_test + +import ( + "strconv" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func createNDrop(keeper *keeper.Keeper, ctx sdk.Context, n int) []types.Drop { + items := make([]types.Drop, n) + for i := range items { + items[i].Uid = uint64(i) + items[i].Owner = strconv.Itoa(i) + items[i].Pair = strconv.Itoa(i) + items[i].Drops = sdk.NewIntFromUint64(uint64(i)) + items[i].Product = sdk.NewIntFromUint64(uint64(i)) + + keeper.SetDrop(ctx, items[i]) + } + return items +} + +func TestDropGet(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNDrop(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + rst, found := keeper.MarketKeeper.GetDrop(keeper.Context, + item.Uid, + ) + require.True(t, found) + require.Equal(t, + nullify.Fill(&item), + nullify.Fill(&rst), + ) + } +} +func TestDropRemove(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNDrop(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + keeper.MarketKeeper.RemoveDrop(keeper.Context, + item.Uid, + ) + _, found := keeper.MarketKeeper.GetDrop(keeper.Context, + item.Uid, + ) + require.False(t, found) + } +} + +func TestDropGetAll(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNDrop(keeper.MarketKeeper, keeper.Context, 10) + require.ElementsMatch(t, + nullify.Fill(items), + nullify.Fill(keeper.MarketKeeper.GetAllDrop(keeper.Context)), + ) +} diff --git a/x/market/keeper/grpc_query_bookends.go b/x/market/keeper/grpc_query_bookends.go new file mode 100644 index 00000000..2973f114 --- /dev/null +++ b/x/market/keeper/grpc_query_bookends.go @@ -0,0 +1,32 @@ +package keeper + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "market/x/market/types" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Bookends(goCtx context.Context, req *types.QueryBookendsRequest) (*types.QueryBookendsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + rate, err := types.RateStringToInt(req.Rate) + if err != nil { + return nil, err + } + + ends := k.BookEnds(ctx, req.GetCoinA(), req.GetCoinB(), req.GetOrderType(), rate) + + // TODO: Process the query + _ = ctx + + return &types.QueryBookendsResponse{CoinA: req.CoinA, CoinB: req.CoinB, OrderType: req.OrderType, Rate: req.Rate, Prev: ends[0], Next: ends[1]}, nil +} diff --git a/x/market/keeper/grpc_query_burnings.go b/x/market/keeper/grpc_query_burnings.go new file mode 100644 index 00000000..80527687 --- /dev/null +++ b/x/market/keeper/grpc_query_burnings.go @@ -0,0 +1,74 @@ +package keeper + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) BurningsAll(c context.Context, req *types.QueryAllBurningsRequest) (*types.QueryAllBurningsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var burningss []types.Burnings + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + burningsStore := prefix.NewStore(store, types.KeyPrefix(types.BurningsKeyPrefix)) + + pageRes, err := query.Paginate(burningsStore, req.Pagination, func(key []byte, value []byte) error { + var burnings types.Burnings + if err := k.cdc.Unmarshal(value, &burnings); err != nil { + return err + } + + burningss = append(burningss, burnings) + return nil + }) + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryAllBurningsResponse{Burnings: burningss, Pagination: pageRes}, nil +} + +func (k Keeper) Burnings(c context.Context, req *types.QueryGetBurningsRequest) (*types.QueryGetBurningsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetBurnings( + ctx, + req.Denom, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryGetBurningsResponse{Burnings: val}, nil +} + +func (k Keeper) Burned(c context.Context, req *types.QueryBurnedRequest) (*types.QueryBurnedResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + // Coin that will be burned + burnCoin := k.BurnCoin(ctx) + + val := k.GetBurned( + ctx, + ) + + return &types.QueryBurnedResponse{Denom: burnCoin, Amount: val.Amount.String()}, nil +} diff --git a/x/market/keeper/grpc_query_burnings_test.go b/x/market/keeper/grpc_query_burnings_test.go new file mode 100644 index 00000000..0abf4367 --- /dev/null +++ b/x/market/keeper/grpc_query_burnings_test.go @@ -0,0 +1,126 @@ +package keeper_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func TestBurningsQuerySingle(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNBurnings(keeper.MarketKeeper, keeper.Context, 2) + for _, tc := range []struct { + desc string + request *types.QueryGetBurningsRequest + response *types.QueryGetBurningsResponse + err error + }{ + { + desc: "First", + request: &types.QueryGetBurningsRequest{ + Denom: msgs[0].Denom, + }, + response: &types.QueryGetBurningsResponse{Burnings: msgs[0]}, + }, + { + desc: "Second", + request: &types.QueryGetBurningsRequest{ + Denom: msgs[1].Denom, + }, + response: &types.QueryGetBurningsResponse{Burnings: msgs[1]}, + }, + { + desc: "KeyNotFound", + request: &types.QueryGetBurningsRequest{ + Denom: strconv.Itoa(100000), + }, + err: status.Error(codes.InvalidArgument, "not found"), + }, + { + desc: "InvalidRequest", + err: status.Error(codes.InvalidArgument, "invalid request"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + response, err := keeper.MarketKeeper.Burnings(wctx, tc.request) + if tc.err != nil { + require.ErrorIs(t, err, tc.err) + } else { + require.NoError(t, err) + require.Equal(t, + nullify.Fill(tc.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestBurningsQueryPaginated(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNBurnings(keeper.MarketKeeper, keeper.Context, 5) + + request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllBurningsRequest { + return &types.QueryAllBurningsRequest{ + Pagination: &query.PageRequest{ + Key: next, + Offset: offset, + Limit: limit, + CountTotal: total, + }, + } + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.BurningsAll(wctx, request(nil, uint64(i), uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Burnings), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Burnings), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.BurningsAll(wctx, request(next, 0, uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Burnings), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Burnings), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + resp, err := keeper.MarketKeeper.BurningsAll(wctx, request(nil, 0, 0, true)) + require.NoError(t, err) + require.Equal(t, len(msgs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(msgs), + nullify.Fill(resp.Burnings), + ) + }) + t.Run("InvalidRequest", func(t *testing.T) { + _, err := keeper.MarketKeeper.BurningsAll(wctx, nil) + require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) + }) +} diff --git a/x/market/keeper/grpc_query_drop.go b/x/market/keeper/grpc_query_drop.go new file mode 100644 index 00000000..ce751726 --- /dev/null +++ b/x/market/keeper/grpc_query_drop.go @@ -0,0 +1,178 @@ +package keeper + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) DropAll(c context.Context, req *types.QueryAllDropRequest) (*types.QueryDropsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var drops []types.Drop + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + dropStore := prefix.NewStore(store, types.KeyPrefix(types.DropKeyPrefix)) + + pageRes, err := query.Paginate(dropStore, req.Pagination, func(key []byte, value []byte) error { + var drop types.Drop + if err := k.cdc.Unmarshal(value, &drop); err != nil { + return err + } + + drops = append(drops, drop) + return nil + }) + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryDropsResponse{Drops: drops, Pagination: pageRes}, nil +} + +func (k Keeper) Drop(c context.Context, req *types.QueryDropRequest) (*types.QueryDropResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetDrop( + ctx, + req.Uid, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryDropResponse{Drop: val}, nil +} + +func (k Keeper) DropPairs(c context.Context, req *types.QueryDropPairsRequest) (*types.QueryDropPairsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetDropPairs( + ctx, + req.Address, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryDropPairsResponse{Pairs: val.Pairs}, nil +} + +func (k Keeper) DropOwnerPair(c context.Context, req *types.QueryDropOwnerPairRequest) (*types.QueryDropsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + drops, found := k.GetDropOwnerPair( + ctx, + req.Address, + req.Pair, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryDropsResponse{Drops: drops}, nil +} + +func (k Keeper) DropAmounts(c context.Context, req *types.QueryDropAmountsRequest) (*types.QueryDropAmountsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + denom1, denom2, amount1, amount2, found := k.GetDropAmounts( + ctx, + req.Uid, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryDropAmountsResponse{ + Denom1: denom1, + Denom2: denom2, + Amount1: amount1.String(), + Amount2: amount2.String(), + }, nil +} + +func (k Keeper) DropCoin(c context.Context, req *types.QueryDropCoinRequest) (*types.QueryDropCoinResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + err := sdk.ValidateDenom(req.DenomA) + if err != nil { + return nil, status.Error(codes.InvalidArgument, "invalid denomA") + } + + err = sdk.ValidateDenom(req.DenomB) + if err != nil { + return nil, status.Error(codes.InvalidArgument, "invalid denomB") + } + + amountA, ok := sdk.NewIntFromString(req.AmountA) + if !ok { + return nil, status.Error(codes.InvalidArgument, "invalid amountA") + } + if amountA.LTE(sdk.ZeroInt()) { + return nil, status.Error(codes.InvalidArgument, "invalid amountA") + } + + amountB, drops, found := k.GetDropCoin( + ctx, + req.DenomA, + req.DenomB, + amountA, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryDropCoinResponse{ + AmountB: amountB.String(), + Drops: drops.String(), + }, nil +} + +func (k Keeper) DropsToCoins(c context.Context, req *types.QueryDropsToCoinsRequest) (*types.QueryDropAmountsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + denom1, denom2, amount1, amount2, found := k.GetDropsToCoins( + ctx, + req.Pair, + req.Drops, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryDropAmountsResponse{ + Denom1: denom1, + Denom2: denom2, + Amount1: amount1.String(), + Amount2: amount2.String(), + }, nil +} diff --git a/x/market/keeper/grpc_query_drop_test.go b/x/market/keeper/grpc_query_drop_test.go new file mode 100644 index 00000000..88f27dcc --- /dev/null +++ b/x/market/keeper/grpc_query_drop_test.go @@ -0,0 +1,126 @@ +package keeper_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func TestDropQuerySingle(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNDrop(keeper.MarketKeeper, keeper.Context, 2) + for _, tc := range []struct { + desc string + request *types.QueryDropRequest + response *types.QueryDropResponse + err error + }{ + { + desc: "First", + request: &types.QueryDropRequest{ + Uid: msgs[0].Uid, + }, + response: &types.QueryDropResponse{Drop: msgs[0]}, + }, + { + desc: "Second", + request: &types.QueryDropRequest{ + Uid: msgs[1].Uid, + }, + response: &types.QueryDropResponse{Drop: msgs[1]}, + }, + { + desc: "KeyNotFound", + request: &types.QueryDropRequest{ + Uid: 100000, + }, + err: status.Error(codes.InvalidArgument, "not found"), + }, + { + desc: "InvalidRequest", + err: status.Error(codes.InvalidArgument, "invalid request"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + response, err := keeper.MarketKeeper.Drop(wctx, tc.request) + if tc.err != nil { + require.ErrorIs(t, err, tc.err) + } else { + require.NoError(t, err) + require.Equal(t, + nullify.Fill(tc.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestDropQueryPaginated(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNDrop(keeper.MarketKeeper, keeper.Context, 5) + + request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllDropRequest { + return &types.QueryAllDropRequest{ + Pagination: &query.PageRequest{ + Key: next, + Offset: offset, + Limit: limit, + CountTotal: total, + }, + } + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.DropAll(wctx, request(nil, uint64(i), uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Drops), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Drops), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.DropAll(wctx, request(next, 0, uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Drops), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Drops), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + resp, err := keeper.MarketKeeper.DropAll(wctx, request(nil, 0, 0, true)) + require.NoError(t, err) + require.Equal(t, len(msgs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(msgs), + nullify.Fill(resp.Drops), + ) + }) + t.Run("InvalidRequest", func(t *testing.T) { + _, err := keeper.MarketKeeper.DropAll(wctx, nil) + require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) + }) +} diff --git a/x/market/keeper/grpc_query_member.go b/x/market/keeper/grpc_query_member.go new file mode 100644 index 00000000..01535130 --- /dev/null +++ b/x/market/keeper/grpc_query_member.go @@ -0,0 +1,59 @@ +package keeper + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) MemberAll(c context.Context, req *types.QueryAllMemberRequest) (*types.QueryAllMemberResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var members []types.Member + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + memberStore := prefix.NewStore(store, types.KeyPrefix(types.MemberKeyPrefix)) + + pageRes, err := query.Paginate(memberStore, req.Pagination, func(key []byte, value []byte) error { + var member types.Member + if err := k.cdc.Unmarshal(value, &member); err != nil { + return err + } + + members = append(members, member) + return nil + }) + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryAllMemberResponse{Member: members, Pagination: pageRes}, nil +} + +func (k Keeper) Member(c context.Context, req *types.QueryGetMemberRequest) (*types.QueryGetMemberResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetMember( + ctx, + req.DenomA, + req.DenomB, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryGetMemberResponse{Member: val}, nil +} diff --git a/x/market/keeper/grpc_query_member_test.go b/x/market/keeper/grpc_query_member_test.go new file mode 100644 index 00000000..035500e8 --- /dev/null +++ b/x/market/keeper/grpc_query_member_test.go @@ -0,0 +1,129 @@ +package keeper_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func TestMemberQuerySingle(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNMember(keeper.MarketKeeper, keeper.Context, 2) + for _, tc := range []struct { + desc string + request *types.QueryGetMemberRequest + response *types.QueryGetMemberResponse + err error + }{ + { + desc: "First", + request: &types.QueryGetMemberRequest{ + DenomA: msgs[0].DenomA, + DenomB: msgs[0].DenomB, + }, + response: &types.QueryGetMemberResponse{Member: msgs[0]}, + }, + { + desc: "Second", + request: &types.QueryGetMemberRequest{ + DenomA: msgs[1].DenomA, + DenomB: msgs[1].DenomB, + }, + response: &types.QueryGetMemberResponse{Member: msgs[1]}, + }, + { + desc: "KeyNotFound", + request: &types.QueryGetMemberRequest{ + DenomA: strconv.Itoa(100000), + DenomB: strconv.Itoa(100000), + }, + err: status.Error(codes.InvalidArgument, "not found"), + }, + { + desc: "InvalidRequest", + err: status.Error(codes.InvalidArgument, "invalid request"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + response, err := keeper.MarketKeeper.Member(wctx, tc.request) + if tc.err != nil { + require.ErrorIs(t, err, tc.err) + } else { + require.NoError(t, err) + require.Equal(t, + nullify.Fill(tc.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestMemberQueryPaginated(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNMember(keeper.MarketKeeper, keeper.Context, 5) + + request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllMemberRequest { + return &types.QueryAllMemberRequest{ + Pagination: &query.PageRequest{ + Key: next, + Offset: offset, + Limit: limit, + CountTotal: total, + }, + } + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.MemberAll(wctx, request(nil, uint64(i), uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Member), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Member), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.MemberAll(wctx, request(next, 0, uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Member), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Member), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + resp, err := keeper.MarketKeeper.MemberAll(wctx, request(nil, 0, 0, true)) + require.NoError(t, err) + require.Equal(t, len(msgs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(msgs), + nullify.Fill(resp.Member), + ) + }) + t.Run("InvalidRequest", func(t *testing.T) { + _, err := keeper.MarketKeeper.MemberAll(wctx, nil) + require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) + }) +} diff --git a/x/market/keeper/grpc_query_order.go b/x/market/keeper/grpc_query_order.go new file mode 100644 index 00000000..a24c3a4c --- /dev/null +++ b/x/market/keeper/grpc_query_order.go @@ -0,0 +1,131 @@ +package keeper + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) OrderAll(c context.Context, req *types.QueryAllOrderRequest) (*types.QueryOrdersResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var orders []types.Order + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + orderStore := prefix.NewStore(store, types.KeyPrefix(types.OrderKeyPrefix)) + + pageRes, err := query.Paginate(orderStore, req.Pagination, func(key []byte, value []byte) error { + var order types.Order + if err := k.cdc.Unmarshal(value, &order); err != nil { + return err + } + + orders = append(orders, order) + return nil + }) + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryOrdersResponse{Orders: orders, Pagination: pageRes}, nil +} + +func (k Keeper) Order(c context.Context, req *types.QueryOrderRequest) (*types.QueryOrderResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetOrder( + ctx, + req.Uid, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryOrderResponse{Order: val}, nil +} + +func (k Keeper) Book(goCtx context.Context, req *types.QueryBookRequest) (*types.QueryBookResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + book := k.GetBook(ctx, req.DenomA, req.DenomB, req.OrderType) + + return &types.QueryBookResponse{Book: book}, nil +} + +func (k Keeper) OrderOwner(c context.Context, req *types.QueryOrderOwnerRequest) (*types.QueryOrdersResponse, error) { + + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + orders := k.GetOrderOwner(ctx, req.Address) + + return &types.QueryOrdersResponse{Orders: orders}, nil +} + +func (k Keeper) OrderOwnerUids(c context.Context, req *types.QueryOrderOwnerRequest) (*types.QueryOrderOwnerUidsResponse, error) { + + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + orders := k.GetOrderOwnerUids(ctx, req.Address) + + return &types.QueryOrderOwnerUidsResponse{Orders: orders}, nil +} + +func (k Keeper) Quote(goCtx context.Context, req *types.QueryQuoteRequest) (*types.QueryQuoteResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + if req.DenomAsk != req.DenomAmount && req.DenomBid != req.DenomAmount { + return nil, sdkerrors.Wrapf(types.ErrDenomMismatch, "Denom %s not ask or bid", req.DenomAmount) + } + + amount, ok := sdk.NewIntFromString(req.Amount) + if !ok { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + + memberAsk, found := k.GetMember(ctx, req.DenomBid, req.DenomAsk) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", req.DenomAsk) + } + + memberBid, found := k.GetMember(ctx, req.DenomAsk, req.DenomBid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", req.DenomBid) + } + + denomResp, amountResp, error := k.GetQuote(ctx, memberAsk, memberBid, req.DenomAmount, amount) + if error != nil { + return nil, error + } + + return &types.QueryQuoteResponse{Denom: denomResp, Amount: amountResp.String()}, nil +} diff --git a/x/market/keeper/grpc_query_order_test.go b/x/market/keeper/grpc_query_order_test.go new file mode 100644 index 00000000..da7de78e --- /dev/null +++ b/x/market/keeper/grpc_query_order_test.go @@ -0,0 +1,126 @@ +package keeper_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func TestOrderQuerySingle(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNOrder(keeper.MarketKeeper, keeper.Context, 2) + for _, tc := range []struct { + desc string + request *types.QueryOrderRequest + response *types.QueryOrderResponse + err error + }{ + { + desc: "First", + request: &types.QueryOrderRequest{ + Uid: msgs[0].Uid, + }, + response: &types.QueryOrderResponse{Order: msgs[0]}, + }, + { + desc: "Second", + request: &types.QueryOrderRequest{ + Uid: msgs[1].Uid, + }, + response: &types.QueryOrderResponse{Order: msgs[1]}, + }, + { + desc: "KeyNotFound", + request: &types.QueryOrderRequest{ + Uid: 100000, + }, + err: status.Error(codes.InvalidArgument, "not found"), + }, + { + desc: "InvalidRequest", + err: status.Error(codes.InvalidArgument, "invalid request"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + response, err := keeper.MarketKeeper.Order(wctx, tc.request) + if tc.err != nil { + require.ErrorIs(t, err, tc.err) + } else { + require.NoError(t, err) + require.Equal(t, + nullify.Fill(tc.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestOrderQueryPaginated(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNOrder(keeper.MarketKeeper, keeper.Context, 5) + + request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllOrderRequest { + return &types.QueryAllOrderRequest{ + Pagination: &query.PageRequest{ + Key: next, + Offset: offset, + Limit: limit, + CountTotal: total, + }, + } + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.OrderAll(wctx, request(nil, uint64(i), uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Orders), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Orders), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.OrderAll(wctx, request(next, 0, uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Orders), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Orders), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + resp, err := keeper.MarketKeeper.OrderAll(wctx, request(nil, 0, 0, true)) + require.NoError(t, err) + require.Equal(t, len(msgs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(msgs), + nullify.Fill(resp.Orders), + ) + }) + t.Run("InvalidRequest", func(t *testing.T) { + _, err := keeper.MarketKeeper.OrderAll(wctx, nil) + require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) + }) +} diff --git a/x/market/keeper/grpc_query_params.go b/x/market/keeper/grpc_query_params.go index 09ea57a5..a0f33684 100644 --- a/x/market/keeper/grpc_query_params.go +++ b/x/market/keeper/grpc_query_params.go @@ -3,10 +3,11 @@ package keeper import ( "context" + "market/x/market/types" + sdk "github.com/cosmos/cosmos-sdk/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "market/x/market/types" ) func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { diff --git a/x/market/keeper/grpc_query_params_test.go b/x/market/keeper/grpc_query_params_test.go index 91a8d8bd..4dbb47c9 100644 --- a/x/market/keeper/grpc_query_params_test.go +++ b/x/market/keeper/grpc_query_params_test.go @@ -3,19 +3,20 @@ package keeper_test import ( "testing" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" testkeeper "market/testutil/keeper" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" ) func TestParamsQuery(t *testing.T) { - keeper, ctx := testkeeper.MarketKeeper(t) - wctx := sdk.WrapSDKContext(ctx) + keeper := testkeeper.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) params := types.DefaultParams() - keeper.SetParams(ctx, params) + keeper.MarketKeeper.SetParams(keeper.Context, params) - response, err := keeper.Params(wctx, &types.QueryParamsRequest{}) + response, err := keeper.MarketKeeper.Params(wctx, &types.QueryParamsRequest{}) require.NoError(t, err) require.Equal(t, &types.QueryParamsResponse{Params: params}, response) } diff --git a/x/market/keeper/grpc_query_pool.go b/x/market/keeper/grpc_query_pool.go new file mode 100644 index 00000000..b78f44fb --- /dev/null +++ b/x/market/keeper/grpc_query_pool.go @@ -0,0 +1,120 @@ +package keeper + +import ( + "context" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) PoolAll(c context.Context, req *types.QueryAllPoolRequest) (*types.QueryAllPoolResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var pools []types.Pool + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + poolStore := prefix.NewStore(store, types.KeyPrefix(types.PoolKeyPrefix)) + + pageRes, err := query.Paginate(poolStore, req.Pagination, func(key []byte, value []byte) error { + var pool types.Pool + if err := k.cdc.Unmarshal(value, &pool); err != nil { + return err + } + + pools = append(pools, pool) + return nil + }) + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryAllPoolResponse{Pool: pools, Pagination: pageRes}, nil +} + +func (k Keeper) Pool(c context.Context, req *types.QueryGetPoolRequest) (*types.QueryGetPoolResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetPool( + ctx, + req.Pair, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryGetPoolResponse{Pool: val}, nil +} + +func (k Keeper) History(c context.Context, req *types.QueryHistoryRequest) (*types.QueryHistoryResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetHistory( + ctx, + req.Pair, + req.Length, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryHistoryResponse{History: val}, nil +} + +func (k Keeper) VolumeAll(c context.Context, req *types.QueryAllVolumeRequest) (*types.QueryAllVolumeResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var volumes []types.Volume + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + volumeStore := prefix.NewStore(store, types.KeyPrefix(types.VolumeKeyPrefix)) + + pageRes, err := query.Paginate(volumeStore, req.Pagination, func(key []byte, value []byte) error { + var volume types.Volume + if err := k.cdc.Unmarshal(value, &volume); err != nil { + return err + } + + volumes = append(volumes, volume) + return nil + }) + + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &types.QueryAllVolumeResponse{Volumes: volumes, Pagination: pageRes}, nil +} + +func (k Keeper) Volume(c context.Context, req *types.QueryVolumeRequest) (*types.QueryVolumeResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetVolume( + ctx, + req.Denom, + ) + if !found { + return nil, status.Error(codes.InvalidArgument, "not found") + } + + return &types.QueryVolumeResponse{Amount: val.Amount.String()}, nil +} diff --git a/x/market/keeper/grpc_query_pool_test.go b/x/market/keeper/grpc_query_pool_test.go new file mode 100644 index 00000000..434dfe1a --- /dev/null +++ b/x/market/keeper/grpc_query_pool_test.go @@ -0,0 +1,126 @@ +package keeper_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/types" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func TestPoolQuerySingle(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNPool(keeper.MarketKeeper, keeper.Context, 2) + for _, tc := range []struct { + desc string + request *types.QueryGetPoolRequest + response *types.QueryGetPoolResponse + err error + }{ + { + desc: "First", + request: &types.QueryGetPoolRequest{ + Pair: msgs[0].Pair, + }, + response: &types.QueryGetPoolResponse{Pool: msgs[0]}, + }, + { + desc: "Second", + request: &types.QueryGetPoolRequest{ + Pair: msgs[1].Pair, + }, + response: &types.QueryGetPoolResponse{Pool: msgs[1]}, + }, + { + desc: "KeyNotFound", + request: &types.QueryGetPoolRequest{ + Pair: strconv.Itoa(100000), + }, + err: status.Error(codes.InvalidArgument, "not found"), + }, + { + desc: "InvalidRequest", + err: status.Error(codes.InvalidArgument, "invalid request"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + response, err := keeper.MarketKeeper.Pool(wctx, tc.request) + if tc.err != nil { + require.ErrorIs(t, err, tc.err) + } else { + require.NoError(t, err) + require.Equal(t, + nullify.Fill(tc.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestPoolQueryPaginated(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(keeper.Context) + msgs := createNPool(keeper.MarketKeeper, keeper.Context, 5) + + request := func(next []byte, offset, limit uint64, total bool) *types.QueryAllPoolRequest { + return &types.QueryAllPoolRequest{ + Pagination: &query.PageRequest{ + Key: next, + Offset: offset, + Limit: limit, + CountTotal: total, + }, + } + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.PoolAll(wctx, request(nil, uint64(i), uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Pool), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Pool), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(msgs); i += step { + resp, err := keeper.MarketKeeper.PoolAll(wctx, request(next, 0, uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Pool), step) + require.Subset(t, + nullify.Fill(msgs), + nullify.Fill(resp.Pool), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + resp, err := keeper.MarketKeeper.PoolAll(wctx, request(nil, 0, 0, true)) + require.NoError(t, err) + require.Equal(t, len(msgs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(msgs), + nullify.Fill(resp.Pool), + ) + }) + t.Run("InvalidRequest", func(t *testing.T) { + _, err := keeper.MarketKeeper.PoolAll(wctx, nil) + require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) + }) +} diff --git a/x/market/keeper/keeper.go b/x/market/keeper/keeper.go index ada57510..01ca3ae7 100644 --- a/x/market/keeper/keeper.go +++ b/x/market/keeper/keeper.go @@ -5,10 +5,12 @@ import ( "github.com/tendermint/tendermint/libs/log" + "market/x/market/types" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "market/x/market/types" ) type ( @@ -17,6 +19,8 @@ type ( storeKey sdk.StoreKey memKey sdk.StoreKey paramstore paramtypes.Subspace + + bankKeeper types.BankKeeper } ) @@ -26,6 +30,7 @@ func NewKeeper( memKey sdk.StoreKey, ps paramtypes.Subspace, + bankKeeper types.BankKeeper, ) *Keeper { // set KeyTable if it has not already been set if !ps.HasKeyTable() { @@ -38,9 +43,60 @@ func NewKeeper( storeKey: storeKey, memKey: memKey, paramstore: ps, + bankKeeper: bankKeeper, } } func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) } + +func (k Keeper) validateSenderBalance(ctx sdk.Context, senderAddress sdk.AccAddress, coins sdk.Coins) error { + for _, coin := range coins { + balance := k.bankKeeper.GetBalance(ctx, senderAddress, coin.Denom) + if balance.IsLT(coin) { + return sdkerrors.Wrapf( + types.ErrInsufficientBalance, "%s is smaller than %s", balance, coin) + } + } + + return nil +} + +func addUid(s []uint64, r uint64) ([]uint64, bool) { + for _, v := range s { + if v == r { + return s, false + } + } + + return append(s, r), true +} + +func removeUid(s []uint64, r uint64) ([]uint64, bool) { + for i, v := range s { + if v == r { + return append(s[:i], s[i+1:]...), true + } + } + return s, false +} + +func addPair(s []string, r string) ([]string, bool) { + for _, v := range s { + if v == r { + return s, false + } + } + + return append(s, r), true +} + +func removePair(s []string, r string) ([]string, bool) { + for i, v := range s { + if v == r { + return append(s[:i], s[i+1:]...), true + } + } + return s, false +} diff --git a/x/market/keeper/member.go b/x/market/keeper/member.go new file mode 100644 index 00000000..b2017bef --- /dev/null +++ b/x/market/keeper/member.go @@ -0,0 +1,96 @@ +package keeper + +import ( + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetMember set a specific member in the store from its index +func (k Keeper) SetMember(ctx sdk.Context, member types.Member) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + b := k.cdc.MustMarshal(&member) + store.Set(types.MemberSetKey( + member.DenomA, + member.DenomB, + //member.Balance, + //member.Previous, + //member.Limit, + //member.Stop, + //member.Protect, + ), b) +} + +// GetMember returns a member from its index +func (k Keeper) GetMember( + ctx sdk.Context, + denomA string, + denomB string, + +) (val types.Member, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + + b := store.Get(types.MemberKey( + denomA, + denomB, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +func (k Keeper) GetMemberWithPair( + ctx sdk.Context, + pair string, + denomA string, + denomB string, + +) (val types.Member, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + + b := store.Get(types.MemberKeyPair( + pair, + denomA, + denomB, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemoveMember removes a member from the store +func (k Keeper) RemoveMember( + ctx sdk.Context, + denomA string, + denomB string, + +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + store.Delete(types.MemberKey( + denomA, + denomB, + )) +} + +// GetAllMember returns all member +func (k Keeper) GetAllMember(ctx sdk.Context) (list []types.Member) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MemberKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Member + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/market/keeper/member_test.go b/x/market/keeper/member_test.go new file mode 100644 index 00000000..ce997ef9 --- /dev/null +++ b/x/market/keeper/member_test.go @@ -0,0 +1,71 @@ +package keeper_test + +import ( + "strconv" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func createNMember(keeper *keeper.Keeper, ctx sdk.Context, n int) []types.Member { + items := make([]types.Member, n) + for i := range items { + items[i].Pair = strconv.Itoa(i) + items[i].DenomA = strconv.Itoa(i) + items[i].DenomB = strconv.Itoa(i) + items[i].Balance = sdk.NewIntFromUint64(uint64(0)) + items[i].Previous = sdk.NewIntFromUint64(uint64(0)) + + keeper.SetMember(ctx, items[i]) + } + return items +} + +func TestMemberGet(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNMember(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + rst, found := keeper.MarketKeeper.GetMember(keeper.Context, + item.DenomA, + item.DenomB, + ) + require.True(t, found) + require.Equal(t, + nullify.Fill(&item), + nullify.Fill(&rst), + ) + } +} +func TestMemberRemove(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNMember(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + keeper.MarketKeeper.RemoveMember(keeper.Context, + item.DenomA, + item.DenomB, + ) + _, found := keeper.MarketKeeper.GetMember(keeper.Context, + item.DenomA, + item.DenomB, + ) + require.False(t, found) + } +} + +func TestMemberGetAll(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNMember(keeper.MarketKeeper, keeper.Context, 10) + require.ElementsMatch(t, + nullify.Fill(items), + nullify.Fill(keeper.MarketKeeper.GetAllMember(keeper.Context)), + ) +} diff --git a/x/market/keeper/msg_server_cancel_order.go b/x/market/keeper/msg_server_cancel_order.go new file mode 100644 index 00000000..0fabb542 --- /dev/null +++ b/x/market/keeper/msg_server_cancel_order.go @@ -0,0 +1,109 @@ +package keeper + +import ( + "context" + "strconv" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) CancelOrder(goCtx context.Context, msg *types.MsgCancelOrder) (*types.MsgCancelOrderResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + uid, _ := strconv.ParseUint(msg.Uid, 10, 64) + + order, found := k.GetOrder(ctx, uid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrOrderNotFound, "%s", msg.Uid) + } + + if order.Owner != msg.Creator { + return nil, sdkerrors.Wrapf(types.ErrNotOrderOwner, "%s", msg.Uid) + } + + memberBid, found := k.GetMember(ctx, order.DenomAsk, order.DenomBid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "%s", order.DenomBid) + } + + if order.Prev == 0 { + if memberBid.Stop != order.Uid && memberBid.Limit != order.Uid { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "%c", order.Uid) + } + + if order.Next == 0 { + if order.OrderType == "stop" { + memberBid.Stop = 0 + } + + if order.OrderType == "limit" { + memberBid.Limit = 0 + } + + k.SetMember(ctx, memberBid) + } else { + nextOrder, found := k.GetOrder(ctx, order.Next) + if !found { + return nil, sdkerrors.Wrapf(types.ErrOrderNotFound, "%c", order.Next) + } + + nextOrder.Prev = 0 + + if order.OrderType == "stop" { + memberBid.Stop = order.Next + } + + if order.OrderType == "limit" { + memberBid.Limit = order.Next + } + + k.SetMember(ctx, memberBid) + k.SetOrder(ctx, nextOrder) + } + } else { + prevOrder, found := k.GetOrder(ctx, order.Prev) + if !found { + return nil, sdkerrors.Wrapf(types.ErrOrderNotFound, "%c", order.Prev) + } + + if order.Next == 0 { + prevOrder.Next = 0 + k.SetOrder(ctx, prevOrder) + } else { + nextOrder, found := k.GetOrder(ctx, order.Next) + if !found { + return nil, sdkerrors.Wrapf(types.ErrOrderNotFound, "%c", order.Next) + } + + nextOrder.Prev = order.Prev + prevOrder.Next = order.Next + + k.SetOrder(ctx, prevOrder) + k.SetOrder(ctx, nextOrder) + } + } + + coinBid := sdk.NewCoin(order.DenomBid, order.Amount) + coinsBid := sdk.NewCoins(coinBid) + + owner, err := sdk.AccAddressFromBech32(order.Owner) + if err != nil { + return nil, err + } + + // Transfer order amount to module + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, owner, coinsBid) + if sdkError != nil { + return nil, sdkError + } + + order.Status = "canceled" + order.UpdTime = ctx.BlockHeader().Time.Unix() + k.RemoveOrderOwner(ctx, order.Owner, order.Uid) + k.SetOrder(ctx, order) + + return &types.MsgCancelOrderResponse{}, nil +} diff --git a/x/market/keeper/msg_server_cancel_order_test.go b/x/market/keeper/msg_server_cancel_order_test.go new file mode 100644 index 00000000..67e90353 --- /dev/null +++ b/x/market/keeper/msg_server_cancel_order_test.go @@ -0,0 +1,332 @@ +package keeper_test + +import ( + "strconv" + "strings" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/sample" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +func TestCancelOrder_case1_stop(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + // TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "120"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + // Create Order + var o = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: "stop", Amount: "0", Prev: "0", Next: "0"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + // Validate GetMember + memberA, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomBid, orders.DenomAsk) + + require.True(t, memberfound) + require.Equal(t, memberA.DenomA, denomB) + require.Equal(t, memberA.DenomB, denomA) + require.Equal(t, "33", memberA.Balance.String()) + require.Equal(t, memberA.Stop, uint64(0)) + + // Cancel Order + Uid := strconv.FormatUint(orders.Uid, 10) + var co = types.MsgCancelOrder{Creator: addr, Uid: Uid} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CancelOrder(sdk.WrapSDKContext(testInput.Context), &co) + require.NoError(t, err) + + // Validate GetMember + memberA, memberfound = testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomBid, orders.DenomAsk) + require.True(t, memberfound) + require.Equal(t, memberA.DenomA, denomB) + require.Equal(t, memberA.DenomB, denomA) + require.Equal(t, "33", memberA.Balance.String()) + require.Equal(t, memberA.Stop, uint64(0)) + + //Validate Order + orders, orderfound = testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.True(t, orders.Status == "canceled") + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + require.Equal(t, o.OrderType, "stop") + +} + +func TestCancelOrder_case1_limit(t *testing.T) { + + testInput := keepertest.CreateTestEnvironment(t) + // TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "120"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + balanceBefore := testInput.BankKeeper.GetBalance(testInput.Context, sdk.AccAddress(addr), denomB) + + // Create Order + var o = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: "limit", Amount: "0", Prev: "0", Next: "0"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + //Validate GetMember + memberBid, memberfoundBid := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomAsk, orders.DenomBid) + require.True(t, memberfoundBid) + require.Equal(t, memberBid.DenomA, denomA) + require.Equal(t, memberBid.DenomB, denomB) + require.Equal(t, "44", memberBid.Balance.String()) + require.Equal(t, memberBid.Stop, uint64(0)) + + memberBid.Stop = orders.Uid + testInput.MarketKeeper.SetMember(testInput.Context, memberBid) + + //Cancel Order + Uid := strconv.FormatUint(orders.Uid, 10) + var co = types.MsgCancelOrder{Creator: addr, Uid: Uid} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CancelOrder(sdk.WrapSDKContext(testInput.Context), &co) + require.NoError(t, err) + + balanceAfter := testInput.BankKeeper.GetBalance(testInput.Context, sdk.AccAddress(addr), denomB) + require.True(t, balanceBefore.Amount.Equal(balanceAfter.Amount)) + + memberBid, memberfoundBid = testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomAsk, orders.DenomBid) + require.True(t, memberfoundBid) + require.Equal(t, memberBid.DenomA, denomA) + require.Equal(t, memberBid.DenomB, denomB) + require.Equal(t, "44", memberBid.Balance.String()) + require.Equal(t, memberBid.Stop, uint64(orders.Uid)) + + //Validate Order + orders, orderfound = testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.True(t, orders.Status == "canceled") + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + require.Equal(t, orders.OrderType, "limit") + +} + +func TestCancelOrder_case2_stop(t *testing.T) { + + testInput := keepertest.CreateTestEnvironment(t) + // TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "120"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + balanceBefore := testInput.BankKeeper.GetBalance(testInput.Context, sdk.AccAddress(addr), denomB) + + //Create Order + var o = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: "stop", Amount: "0", Prev: "0", Next: "0"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + // Validate GetMember + memberAsk, memberAskfound := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomBid, orders.DenomAsk) + + require.True(t, memberAskfound) + require.Equal(t, memberAsk.DenomA, denomB) + require.Equal(t, memberAsk.DenomB, denomA) + require.Equal(t, "33", memberAsk.Balance.String()) + require.Equal(t, memberAsk.Stop, uint64(0)) + + o.Next = strconv.FormatUint(beforecount, 10) + o.Rate = []string{"70", "80"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + + orders, orderfound = testInput.MarketKeeper.GetOrder(testInput.Context, aftercount) + require.True(t, orderfound) + require.Equal(t, orders.Next, beforecount) + + // Cancel Order + Uid := strconv.FormatUint(orders.Uid, 10) + var co = types.MsgCancelOrder{Creator: addr, Uid: Uid} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CancelOrder(sdk.WrapSDKContext(testInput.Context), &co) + require.NoError(t, err) + + balanceAfter := testInput.BankKeeper.GetBalance(testInput.Context, sdk.AccAddress(addr), denomB) + require.True(t, balanceBefore.Amount.Equal(balanceAfter.Amount)) + + memberBid, memberBidfound := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomAsk, orders.DenomBid) + require.True(t, memberBidfound) + require.Equal(t, memberBid.DenomA, denomA) + require.Equal(t, memberBid.DenomB, denomB) + require.Equal(t, "44", memberBid.Balance.String()) + require.Equal(t, memberBid.Stop, beforecount) + + // Validate Order + orders, orderfound = testInput.MarketKeeper.GetOrder(testInput.Context, aftercount) + require.True(t, orderfound) + require.True(t, orders.Status == "canceled") + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + require.Equal(t, orders.OrderType, "stop") +} + +func TestCancelOrderEmptyPool(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + testdata, _, denomA, denomB, _ := common(t, testInput) + + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + balanceBefore := testInput.BankKeeper.GetBalance(testInput.Context, sdk.AccAddress(addr), denomB) + + //Create Order + var o = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: "stop", Amount: "0", Prev: "0", Next: "0"} + rate, _ := types.RateStringToInt(o.Rate) + bookends := testInput.MarketKeeper.BookEnds(testInput.Context, o.DenomAsk, o.DenomBid, o.OrderType, rate) + o.Prev = strconv.FormatUint(bookends[0], 10) + o.Next = strconv.FormatUint(bookends[1], 10) + _, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + require.Equal(t, beforecount+1, aftercount) + + //Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + // Validate GetMember + memberAsk, memberAskfound := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomBid, orders.DenomAsk) + + require.True(t, memberAskfound) + require.Equal(t, memberAsk.DenomA, denomB) + require.Equal(t, memberAsk.DenomB, denomA) + require.Equal(t, "33", memberAsk.Balance.String()) + require.Equal(t, memberAsk.Stop, uint64(0)) + + // Validate RedeemDrop + Uid := strconv.FormatUint(1, 10) + var rd = types.MsgRedeemDrop{Creator: addr, Uid: Uid} + createRedeemDropResponse, redeemdropErr := keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd) + require.NoError(t, redeemdropErr) + require.Contains(t, rd.GetCreator(), createRedeemDropResponse.String()) + + // Validate RedeemDrop + Uid = strconv.FormatUint(2, 10) + rd = types.MsgRedeemDrop{Creator: addr, Uid: Uid} + createRedeemDropResponse, redeemdropErr = keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd) + require.NoError(t, redeemdropErr) + require.Contains(t, rd.GetCreator(), createRedeemDropResponse.String()) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // Cancel Order + Uid = strconv.FormatUint(beforecount, 10) + var co = types.MsgCancelOrder{Creator: addr, Uid: Uid} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CancelOrder(sdk.WrapSDKContext(testInput.Context), &co) + require.NoError(t, err) + + balanceAfter := testInput.BankKeeper.GetBalance(testInput.Context, sdk.AccAddress(addr), denomB) + require.True(t, balanceBefore.Amount.Equal(balanceAfter.Amount)) + +} diff --git a/x/market/keeper/msg_server_create_drop.go b/x/market/keeper/msg_server_create_drop.go new file mode 100644 index 00000000..46e156d9 --- /dev/null +++ b/x/market/keeper/msg_server_create_drop.go @@ -0,0 +1,171 @@ +package keeper + +import ( + "context" + "sort" + "strings" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) CreateDrop(goCtx context.Context, msg *types.MsgCreateDrop) (*types.MsgCreateDropResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + pairMsg := strings.Split(msg.Pair, ",") + sort.Strings(pairMsg) + + denom1 := pairMsg[0] + denom2 := pairMsg[1] + + pair := strings.Join(pairMsg, ",") + + pool, found := k.GetPool(ctx, pair) + if !found { + return nil, sdkerrors.Wrapf(types.ErrPoolNotFound, "%s", pair) + } + + if pool.Drops.Equal(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrPoolInactive, "%s", pair) + } + + member1, found := k.GetMember(ctx, denom2, denom1) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "%s", pair) + } + + member2, found := k.GetMember(ctx, denom1, denom2) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "%s", pair) + } + + if member1.Balance.Equal(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrMemberBalanceZero, "Member %s", member1.DenomB) + } + + if member2.Balance.Equal(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrMemberBalanceZero, "Member %s", member2.DenomB) + } + + // Create the uid + uid := k.GetUidCount(ctx) + + drops, _ := sdk.NewIntFromString(msg.Drops) + + dropAmtMember1, dropAmtMember2, error := dropAmounts(drops, pool, member1, member2) + if error != nil { + return nil, error + } + + dropProduct := dropAmtMember1.Mul(dropAmtMember2) + + coin1 := sdk.NewCoin(denom1, dropAmtMember1) + coin2 := sdk.NewCoin(denom2, dropAmtMember2) + + coinPair := sdk.NewCoins(coin1, coin2) + + // moduleAcc := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName))) + // Get the borrower address + creator, _ := sdk.AccAddressFromBech32(msg.Creator) + + if err := k.validateSenderBalance(ctx, creator, coinPair); err != nil { + return nil, err + } + + // Use the module account as pool account + sdkError := k.bankKeeper.SendCoinsFromAccountToModule(ctx, creator, types.ModuleName, coinPair) + if sdkError != nil { + return nil, sdkError + } + + // Deposit into Pool + member1.Balance = member1.Balance.Add(dropAmtMember1) + k.SetMember(ctx, member1) + + member2.Balance = member2.Balance.Add(dropAmtMember2) + k.SetMember(ctx, member2) + + dropCreatorSum := drops + dropOwner, ok := k.GetDropsOwnerPair(ctx, msg.Creator, pair) + + if ok { + dropCreatorSum = dropCreatorSum.Add(dropOwner.Sum) + } + + pool = k.updateLeaders(ctx, pool, msg.Creator, dropCreatorSum) + + pool.Drops = pool.Drops.Add(drops) + + k.SetPool(ctx, pool) + + var drop = types.Drop{ + Uid: uid, + Owner: msg.Creator, + Pair: pair, + Drops: drops, + Product: dropProduct, + Active: true, + } + + // Add the drop to the keeper + k.SetDrop( + ctx, + drop, + ) + + k.SetDropOwner( + ctx, + drop, + ) + + // Update drop uid count + k.SetUidCount(ctx, uid+1) + + return &types.MsgCreateDropResponse{}, nil +} + +func (k msgServer) updateLeaders(ctx sdk.Context, pool types.Pool, dropCreator string, dropCreatorSum sdk.Int) types.Pool { + maxLeaders := len(strings.Split(k.EarnRates(ctx), ",")) + + for i := 0; i < len(pool.Leaders); i++ { + if pool.Leaders[i].Address == dropCreator { + pool.Leaders = pool.Leaders[:i+copy(pool.Leaders[i:], pool.Leaders[i+1:])] + } + } + + if dropCreatorSum.Equal(sdk.ZeroInt()) { + return pool + } + + if len(pool.Leaders) == 0 { + pool.Leaders = append(pool.Leaders, &types.Leader{ + Address: dropCreator, + Drops: dropCreatorSum, + }) + } else { + for i := 0; i < len(pool.Leaders); i++ { + if dropCreatorSum.GT(pool.Leaders[i].Drops) { + if len(pool.Leaders) < maxLeaders { + pool.Leaders = append(pool.Leaders, pool.Leaders[len(pool.Leaders)-1]) + } + copy(pool.Leaders[i+1:], pool.Leaders[i:]) + pool.Leaders[i] = &types.Leader{ + Address: dropCreator, + Drops: dropCreatorSum, + } + break + } else { + if (i == len(pool.Leaders)-1) && len(pool.Leaders) < maxLeaders { + pool.Leaders = append(pool.Leaders, &types.Leader{ + Address: dropCreator, + Drops: dropCreatorSum, + }) + break + } + } + } + } + return pool +} diff --git a/x/market/keeper/msg_server_create_drop_test.go b/x/market/keeper/msg_server_create_drop_test.go new file mode 100644 index 00000000..8c6c35a2 --- /dev/null +++ b/x/market/keeper/msg_server_create_drop_test.go @@ -0,0 +1,547 @@ +package keeper_test + +import ( + "strconv" + "strings" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/sample" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/stretchr/testify/require" +) + +func TestCreateDrop(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + // TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress2, err := sdk.AccAddressFromBech32(addr2) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress2, coinPair)) + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress3, err := sdk.AccAddressFromBech32(addr3) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress3, coinPair)) + + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + + // Validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + + // Validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + + owner, ok := testInput.MarketKeeper.GetDropsOwnerPair(testInput.Context, addr, pair) + require.True(t, ok) + require.Truef(t, owner.Sum.Equal(sdk.NewInt(1200)), owner.Sum.String()) + + // Validate GetPool + rst1, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst1.Pair, pair) + require.Equal(t, "1200", rst1.Drops.String()) + require.Equal(t, 1, len(rst1.Leaders)) + require.Equal(t, "1200", rst1.Leaders[0].Drops.String()) + + beforecount = aftercount + + // Validate CreateDrop + var d = types.MsgCreateDrop{Creator: addr2, Pair: pair, Drops: "120"} + createDropResponse, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + // Validate SetUidCount function. + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + pairs, ok := testInput.MarketKeeper.GetDropPairs(testInput.Context, addr) + require.True(t, ok) + require.Truef(t, pairs.Pairs[0] == pair, pairs.String()) + + // Validate GetPool + rst, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + require.Equal(t, "1320", rst.Drops.String()) + require.Equalf(t, addr, rst.Leaders[0].Address, rst.Leaders[0].Address) + require.Equalf(t, 2, len(rst.Leaders), rst.Leaders[1].Address) + require.Equal(t, "1200", rst.Leaders[0].Drops.String()) + + // Validate GetMember + members, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 := testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.Equal(t, "33", members.Balance.String()) + + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + require.Equal(t, "44", members1.Balance.String()) + + owner, ok = testInput.MarketKeeper.GetDropsOwnerPair(testInput.Context, addr, pair) + require.True(t, ok) + require.Truef(t, owner.Sum.Equal(sdk.NewInt(1200)), owner.Sum.String()) + + // Validate GetDrop + drops, dropFound = testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + require.Equal(t, drops.Drops.String(), d.Drops) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + + beforecount = aftercount + + // Validate CreateDrop + var e = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "120"} + createDropResponse, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &e) + require.NoError(t, err) + + // Validate SetUidCount function. + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + pairs, ok = testInput.MarketKeeper.GetDropPairs(testInput.Context, addr) + require.True(t, ok) + require.Truef(t, pairs.Pairs[0] == pair, pairs.String()) + + // Validate GetPool + rst2, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst2.Pair, pair) + require.Equal(t, "1440", rst2.Drops.String()) + require.Equal(t, "1320", rst2.Leaders[0].Drops.String()) + require.Equal(t, rst2.Leaders[0].Address, addr) + require.Equal(t, rst2.Leaders[1].Address, addr2) + require.Equal(t, 2, len(rst2.Leaders)) + require.Equal(t, "1320", rst2.Leaders[0].Drops.String()) + + // Validate GetDrop + drops, dropFound = testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + require.Equal(t, drops.Drops.String(), e.Drops) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + + beforecount = aftercount + + // Validate CreateDrop + var f = types.MsgCreateDrop{Creator: addr3, Pair: pair, Drops: "1000"} + createDropResponse, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &f) + require.NoError(t, err) + + // Validate SetUidCount function. + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + pairs, ok = testInput.MarketKeeper.GetDropPairs(testInput.Context, addr3) + require.True(t, ok) + require.Truef(t, pairs.Pairs[0] == pair, pairs.String()) + + // Validate GetPool + rst, found = testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + require.Equal(t, "2440", rst.Drops.String()) + require.Equal(t, 3, len(rst.Leaders)) + require.Equal(t, "1320", rst.Leaders[0].Drops.String()) + + // Validate GetDrop + drops, dropFound = testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + require.Equal(t, drops.Drops.String(), f.Drops) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + + beforecount = aftercount + + // Validate CreateDrop + var g = types.MsgCreateDrop{Creator: addr3, Pair: pair, Drops: "400"} + createDropResponse, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &g) + require.NoError(t, err) + + // Validate SetUidCount function. + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate GetPool + rst, found = testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + require.Equal(t, "2840", rst.Drops.String()) + require.Equal(t, 3, len(rst.Leaders)) + require.Equal(t, "1400", rst.Leaders[0].Drops.String()) + require.Equal(t, "1320", rst.Leaders[1].Drops.String()) + require.Equal(t, "120", rst.Leaders[2].Drops.String()) + require.Equalf(t, addr3, rst.Leaders[0].Address, rst.Leaders[0].Address) + require.Equalf(t, addr, rst.Leaders[1].Address, addr3) + require.Equalf(t, addr2, rst.Leaders[2].Address, rst.Leaders[2].Address) + + // Validate GetDrop + drops, dropFound = testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + require.Equal(t, drops.Drops.String(), g.Drops) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + +} + +func TestCreateDrop_Pool_Not_Found(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + // TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + + // Validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + + // Validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + + // Validate CreateDrop + scenarios := []struct { + coinAStr string + coinBStr string + RateAstrArray []string + RateBstrArray []string + Creator string + }{ + {coinAStr: "20CoinC", coinBStr: "20CoinD", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}, Creator: addr}, + {coinAStr: "20CoinD", coinBStr: "20CoinA", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}, Creator: sample.AccAddress()}, + } + for _, s := range scenarios { + coinPair, _ = sample.SampleCoins(s.coinAStr, s.coinBStr) + denomA, denomB = sample.SampleDenoms(coinPair) + pair = strings.Join([]string{denomA, denomB}, ",") + var d = types.MsgCreateDrop{Creator: s.Creator, Pair: pair, Drops: "70"} + _, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.Error(t, err) + require.ErrorContains(t, err, "the pool not found") + + } +} + +func TestCreateDrop_Pool_Not_Active(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + //MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + //SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + //Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + //validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + //validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + //validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + + //Validate RedeemDrop + Uid := strconv.FormatUint(drops.Uid, 10) + var rd = types.MsgRedeemDrop{Creator: addr, Uid: Uid} + createRedeemDropResponse, redeemdropErr := keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd) + require.NoError(t, redeemdropErr) + require.Contains(t, rd.GetCreator(), createRedeemDropResponse.String()) + + //validate CreateDrop (Inactive Pool) + scenarios := []struct { + coinAStr string + coinBStr string + RateAstrArray []string + RateBstrArray []string + Creator string + }{ + {coinAStr: "20CoinA", coinBStr: "20CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}, Creator: addr}, + {coinAStr: "20CoinB", coinBStr: "20CoinA", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}, Creator: sample.AccAddress()}, + } + for _, s := range scenarios { + coinPair, _ = sample.SampleCoins(s.coinAStr, s.coinBStr) + denomA, denomB = sample.SampleDenoms(coinPair) + pair = strings.Join([]string{denomA, denomB}, ",") + var d = types.MsgCreateDrop{Creator: s.Creator, Pair: pair, Drops: "70"} + _, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.Error(t, err) + require.ErrorContains(t, err, "the pool is inactive") + + } + + // GetUidCount before CreatePool + beforecount = testInput.MarketKeeper.GetUidCount(testInput.Context) + //Create Pool + p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + //validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + //validate SetUidCount function. + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + //validate CreateDrop (Active Pool) + scenarios = []struct { + coinAStr string + coinBStr string + RateAstrArray []string + RateBstrArray []string + Creator string + }{ + {coinAStr: "20CoinA", coinBStr: "20CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}, Creator: addr}, + } + for _, s := range scenarios { + coinPair, _ = sample.SampleCoins(s.coinAStr, s.coinBStr) + denomA, denomB = sample.SampleDenoms(coinPair) + pair = strings.Join([]string{denomA, denomB}, ",") + var d = types.MsgCreateDrop{Creator: s.Creator, Pair: pair, Drops: "70"} + _, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + + require.NoError(t, err) + } +} + +func TestCreateDrop_Negative(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB"} + coinPair, _ := sample.SampleCoins("140CoinA", "140CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + //MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + //SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + //Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + //validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + //validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + //validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + //validate CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "120"} + createDropResponse, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + + //validate GetMember + members, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 := testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.Equal(t, "33", members.Balance.String()) + + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + require.Equal(t, "44", members1.Balance.String()) + +} + +func TestCreateDrop_ValidateSenderBalance(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"30", "40"}, RateBstrArray: []string{"50", "60"}} + coinPair, _ := sample.SampleCoins("35CoinA", "45CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + //MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + //SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + //Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + //validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + //validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + //validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + //validate CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "2000"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.Error(t, err) + require.ErrorContains(t, err, "insufficient balance") + +} + +func TestCreateDrop_InvalidDrop(t *testing.T) { + + coinPair, _ := sample.SampleCoins("35CoinA", "45CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // Validate CreateDrop + dropTest := types.NewMsgCreateDrop(addr, pair, "-1") + err := dropTest.ValidateBasic() + require.Error(t, err) + +} + +func TestZeroAmtPaid(t *testing.T) { + + testInput := keepertest.CreateTestEnvironment(t) + + // TestData + testdata := testData{coinAStr: "1CoinA", coinBStr: "4000CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "7000CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + + // Validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + + // Validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + + // Validate GetPool + rst1, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst1.Pair, pair) + require.Equal(t, "4000", rst1.Drops.String()) + require.Equal(t, 1, len(rst1.Leaders)) + require.Equal(t, "4000", rst1.Leaders[0].Drops.String()) + + // Validate CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "1"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.Error(t, err) +} diff --git a/x/market/keeper/msg_server_create_order.go b/x/market/keeper/msg_server_create_order.go new file mode 100644 index 00000000..fa13c0eb --- /dev/null +++ b/x/market/keeper/msg_server_create_order.go @@ -0,0 +1,956 @@ +package keeper + +import ( + "context" + "math/big" + "strconv" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) CreateOrder(goCtx context.Context, msg *types.MsgCreateOrder) (*types.MsgCreateOrderResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + amount, _ := sdk.NewIntFromString(msg.Amount) + + coinBid := sdk.NewCoin(msg.DenomBid, amount) + + coinsBid := sdk.NewCoins(coinBid) + + creator, _ := sdk.AccAddressFromBech32(msg.Creator) + + // Check if order creator has available balance + if err := k.validateSenderBalance(ctx, creator, coinsBid); err != nil { + return nil, err + } + + memberAsk, found := k.GetMember(ctx, msg.DenomBid, msg.DenomAsk) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", msg.DenomAsk) + } + + memberBid, found := k.GetMember(ctx, msg.DenomAsk, msg.DenomBid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", msg.DenomBid) + } + + productBeg := memberAsk.Balance.Mul(memberBid.Balance) + + rate, err := types.RateStringToInt(msg.Rate) + if err != nil { + return nil, err + } + + prev, _ := strconv.ParseUint(msg.Prev, 10, 64) + + next, _ := strconv.ParseUint(msg.Next, 10, 64) + + // Create the uid + uid := k.GetUidCount(ctx) + + var order = types.Order{ + Uid: uid, + Owner: msg.Creator, + Status: "active", + DenomAsk: msg.DenomAsk, + DenomBid: msg.DenomBid, + OrderType: msg.OrderType, + Amount: amount, + Rate: rate, + Prev: prev, + Next: next, + BegTime: ctx.BlockHeader().Time.Unix(), + UpdTime: 0, + } + + // Case 1 + // Only order in book + if prev == 0 && next == 0 { + + /********************************************************************** + * THEN Member[AskCoin, BidCoin] stop/limit field must be 0 * + * Stop / Limit = 0 means that the book is empty * + **********************************************************************/ + if msg.OrderType == "stop" { + if memberBid.Stop != 0 { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Bid Member stop field not 0") + } + + // Update MemberBid Stop Head + memberBid.Stop = uid + } + + if msg.OrderType == "limit" { + if memberBid.Limit != 0 { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Bid Member limit field not 0") + } + + // Update MemberBid Limit Head + memberBid.Limit = uid + } + + k.SetMember(ctx, memberBid) + + } + + // Case 2 + // New head of the book + if order.Prev == 0 && order.Next > 0 { + + nextOrder, _ := k.GetOrder(ctx, next) + if !(nextOrder.Status == "active") { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Next order not active") + } + if !(nextOrder.DenomAsk == order.DenomAsk && nextOrder.DenomBid == order.DenomBid) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Incorrect book") + } + if nextOrder.Prev != 0 { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Next order not currently head of book") + } + + if msg.OrderType == "stop" { + + if types.LTE(order.Rate, nextOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate less than or equal Next") + } + + // Set order as new head of MemberBid Stop + memberBid.Stop = uid + + } + + if msg.OrderType == "limit" { + + if types.GTE(order.Rate, nextOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate greater than or equal Next") + } + + // Set order as new head of MemberBid Limit + memberBid.Limit = uid + + } + + // Set nextOrder prev field to order + nextOrder.Prev = uid + + // Update Next Order + k.SetOrder(ctx, nextOrder) + + // Update Member Bid + k.SetMember(ctx, memberBid) + } + + // Case 3 + // New tail of book + if order.Prev > 0 && order.Next == 0 { + + prevOrder, _ := k.GetOrder(ctx, prev) + + if !(prevOrder.Status == "active") { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Prev order not active") + } + if prevOrder.Next != 0 { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Prev order not currently tail of book") + } + if !(prevOrder.DenomAsk == order.DenomAsk && prevOrder.DenomBid == order.DenomBid) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Incorrect book") + } + + if msg.OrderType == "stop" { + + if types.GT(order.Rate, prevOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate greater than Prev") + } + + } + + if msg.OrderType == "limit" { + + if types.LT(order.Rate, prevOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate less than Prev") + } + + } + + // Set nextOrder Next field to Order + prevOrder.Next = uid + + // Update Previous Order + k.SetOrder(ctx, prevOrder) + } + + // Case 4 + // IF next position and prev position are stated + if order.Prev > 0 && order.Next > 0 { + prevOrder, _ := k.GetOrder(ctx, prev) + nextOrder, _ := k.GetOrder(ctx, next) + + if !(prevOrder.Status == "active") { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Prev order not active") + } + if !(prevOrder.DenomAsk == order.DenomAsk && prevOrder.DenomBid == order.DenomBid) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Incorrect book") + } + + if !(nextOrder.Status == "active") { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Next order not active") + } + if !(nextOrder.DenomAsk == order.DenomAsk && nextOrder.DenomBid == order.DenomBid) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Incorrect book") + } + + if !(nextOrder.Prev == prevOrder.Uid && prevOrder.Next == nextOrder.Uid) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Prev and Next are not adjacent") + } + + if msg.OrderType == "stop" { + + if types.GT(order.Rate, prevOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate greater than Prev") + } + + if types.LTE(order.Rate, nextOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate less than or equal to Next") + } + + prevOrder.Next = uid + nextOrder.Prev = uid + + // Update Previous and Next Orders + k.SetOrder(ctx, prevOrder) + k.SetOrder(ctx, nextOrder) + } + + if msg.OrderType == "limit" { + + if types.LT(order.Rate, prevOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate less than Prev") + } + + if types.GTE(order.Rate, nextOrder.Rate) { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "Order rate greater than or equal to Next") + } + + prevOrder.Next = uid + nextOrder.Prev = uid + + // Update Previous and Next Orders + k.SetOrder(ctx, prevOrder) + k.SetOrder(ctx, nextOrder) + } + } + + // Transfer order amount to module + sdkError := k.bankKeeper.SendCoinsFromAccountToModule(ctx, creator, types.ModuleName, coinsBid) + if sdkError != nil { + return nil, sdkError + } + + // Increment UID Counter + k.SetUidCount(ctx, uid+1) + + k.SetOrder(ctx, order) + k.SetOrderOwner(ctx, order.Owner, order.Uid) + + if msg.OrderType == "stop" { + // Execute Ask Limit first which will check stops + // if there are no Ask Limits enabled. This is a safe + // guard in the case there is a stop run. + // Stop run would potentially take place if + // stop book is checked first repeatedly during price fall + memberBid, memberAsk, err := ExecuteLimit(k, ctx, msg.DenomBid, msg.DenomAsk, memberBid, memberAsk) + if err != nil { + return nil, err + } + memberAsk, memberBid, err = ExecuteLimit(k, ctx, msg.DenomAsk, msg.DenomBid, memberAsk, memberBid) + if err != nil { + return nil, err + } + _, _, err = ExecuteOverlap(k, ctx, msg.DenomBid, msg.DenomAsk, memberBid, memberAsk) + if err != nil { + return nil, err + } + } else if msg.OrderType == "limit" { + + memberAsk, memberBid, err := ExecuteLimit(k, ctx, msg.DenomAsk, msg.DenomBid, memberAsk, memberBid) + if err != nil { + return nil, err + } + memberBid, memberAsk, err = ExecuteLimit(k, ctx, msg.DenomBid, msg.DenomAsk, memberBid, memberAsk) + if err != nil { + return nil, err + } + + if memberBid.Limit != 0 && memberAsk.Limit != 0 { + limitHeadBid, _ := k.GetOrder(ctx, memberBid.Limit) + limitHeadAsk, _ := k.GetOrder(ctx, memberAsk.Limit) + + for types.LTE(limitHeadBid.Rate, []sdk.Int{limitHeadAsk.Rate[1], limitHeadAsk.Rate[0]}) { + + memberBid, memberAsk, err = ExecuteOverlap(k, ctx, msg.DenomBid, msg.DenomAsk, memberBid, memberAsk) + if err != nil { + return nil, err + } + + memberAsk, memberBid, err = ExecuteLimit(k, ctx, msg.DenomAsk, msg.DenomBid, memberAsk, memberBid) + if err != nil { + return nil, err + } + + memberBid, memberAsk, err = ExecuteLimit(k, ctx, msg.DenomBid, msg.DenomAsk, memberBid, memberAsk) + if err != nil { + return nil, err + } + + if memberBid.Limit == 0 || memberAsk.Limit == 0 { + break + } + + limitHeadBid, found = k.GetOrder(ctx, memberBid.Limit) + if !found { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "No order found") + } + + limitHeadAsk, found = k.GetOrder(ctx, memberAsk.Limit) + if !found { + return nil, sdkerrors.Wrapf(types.ErrInvalidOrder, "No order found") + } + } + } + } + + memberAsk, found = k.GetMember(ctx, msg.DenomBid, msg.DenomAsk) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", msg.DenomAsk) + } + + memberBid, found = k.GetMember(ctx, msg.DenomAsk, msg.DenomBid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", msg.DenomBid) + } + + if memberAsk.Balance.Mul(memberBid.Balance).Equal(productBeg) { + return &types.MsgCreateOrderResponse{Uid: order.Uid}, nil + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return nil, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Trade %s", memberAsk.Pair) + } + + profitAsk, profitBid := k.Profit(productBeg, memberAsk, memberBid) + + pool, _ := k.GetPool(ctx, memberAsk.Pair) + if !found { + return nil, sdkerrors.Wrapf(types.ErrPoolNotFound, "Pool %s", memberAsk.Pair) + } + + memberAsk, err = k.Payout(ctx, profitAsk, memberAsk, pool) + if err != nil { + return nil, err + } + + memberBid, err = k.Payout(ctx, profitBid, memberBid, pool) + if err != nil { + return nil, err + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return nil, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Payout %s", memberAsk.Pair) + } + + memberAsk, err = k.Burn(ctx, profitAsk, memberAsk) + if err != nil { + return nil, err + } + + memberBid, err = k.Burn(ctx, profitBid, memberBid) + if err != nil { + return nil, err + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return nil, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Burn %s", memberAsk.Pair) + } + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + + return &types.MsgCreateOrderResponse{Uid: order.Uid}, nil +} + +func ExecuteOverlap(k msgServer, ctx sdk.Context, denomAsk string, denomBid string, memberAsk types.Member, memberBid types.Member) (types.Member, types.Member, error) { + // Added for error reversion + memberAskInit := memberAsk + memberBidInit := memberBid + + if memberAsk.Balance.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + if memberBid.Balance.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + // IF Limit Head is equal to 0 THEN the Limit Book is EMPTY + if memberBid.Limit == 0 || memberAsk.Limit == 0 { + return memberAsk, memberBid, nil + } + + limitHeadBid, _ := k.GetOrder(ctx, memberBid.Limit) + limitHeadAsk, _ := k.GetOrder(ctx, memberAsk.Limit) + + if types.GT(limitHeadBid.Rate, []sdk.Int{limitHeadAsk.Rate[1], limitHeadAsk.Rate[0]}) { + return memberAskInit, memberBidInit, nil + } + + pool, _ := k.GetPool(ctx, memberAsk.Pair) + + amountDenomBidAskOrder := (limitHeadAsk.Amount.Mul(limitHeadAsk.Rate[0])).Quo(limitHeadAsk.Rate[1]) + amountDenomAskBidOrder := (limitHeadBid.Amount.Mul(limitHeadBid.Rate[0])).Quo(limitHeadBid.Rate[1]) + + if limitHeadBid.Amount.GTE(amountDenomBidAskOrder) && limitHeadAsk.Amount.GTE(amountDenomAskBidOrder) { + // Both orders may be filled + limitHeadAsk.Status = "filled" + limitHeadAsk.UpdTime = ctx.BlockHeader().Time.Unix() + memberAsk.Limit = limitHeadAsk.Next + + if limitHeadAsk.Next != 0 { + limitHeadAskNext, _ := k.GetOrder(ctx, limitHeadAsk.Next) + limitHeadAskNext.Prev = 0 + k.SetOrder(ctx, limitHeadAskNext) + } + + limitHeadBid.Status = "filled" + limitHeadBid.UpdTime = ctx.BlockHeader().Time.Unix() + memberBid.Limit = limitHeadBid.Next + + if limitHeadBid.Next != 0 { + limitHeadBidNext, _ := k.GetOrder(ctx, limitHeadBid.Next) + limitHeadBidNext.Prev = 0 + k.SetOrder(ctx, limitHeadBidNext) + } + + limitHeadBid.Prev = uint64(0) + limitHeadBid.Next = limitHeadAsk.Uid + limitHeadAsk.Prev = limitHeadBid.Uid + limitHeadAsk.Next = pool.History + + pool.History = limitHeadBid.Uid + if pool.Denom1 == limitHeadBid.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(limitHeadBid.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(limitHeadAsk.Amount) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(limitHeadAsk.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(limitHeadBid.Amount) + } + + k.IncVolume(ctx, limitHeadBid.DenomBid, limitHeadBid.Amount) + k.IncVolume(ctx, limitHeadAsk.DenomBid, limitHeadAsk.Amount) + + ownerAsk, _ := sdk.AccAddressFromBech32(limitHeadAsk.Owner) + ownerBid, _ := sdk.AccAddressFromBech32(limitHeadBid.Owner) + + coinAskOrder := sdk.NewCoin(denomBid, amountDenomBidAskOrder) + coinsAskOrder := sdk.NewCoins(coinAskOrder) + + coinBidOrder := sdk.NewCoin(denomAsk, amountDenomAskBidOrder) + coinsBidOrder := sdk.NewCoins(coinBidOrder) + + profitAskCoin := limitHeadAsk.Amount.Sub(amountDenomAskBidOrder) + memberAsk.Balance = memberAsk.Balance.Add(profitAskCoin) + + profitBidCoin := limitHeadBid.Amount.Sub(amountDenomBidAskOrder) + memberBid.Balance = memberBid.Balance.Add(profitBidCoin) + + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, ownerAsk, coinsAskOrder) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + sdkError = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, ownerBid, coinsBidOrder) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + k.RemoveOrderOwner(ctx, limitHeadAsk.Owner, limitHeadAsk.Uid) + k.RemoveOrderOwner(ctx, limitHeadBid.Owner, limitHeadBid.Uid) + + k.SetOrder(ctx, limitHeadBid) + k.SetOrder(ctx, limitHeadAsk) + + k.SetPool(ctx, pool) + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + + return memberAsk, memberBid, nil + } + + if limitHeadBid.Amount.GTE(amountDenomBidAskOrder) { + + // Add partially filled order to history + // Keep remainder of order into book + partialFillOrder := limitHeadBid + + partialFillOrder.Uid = k.GetUidCount(ctx) + k.SetUidCount(ctx, partialFillOrder.Uid+1) + + partialFillOrder.Amount = amountDenomBidAskOrder + partialFillOrder.Status = "filled" + partialFillOrder.UpdTime = ctx.BlockHeader().Time.Unix() + partialFillOrder.Next = limitHeadAsk.Uid + partialFillOrder.Prev = uint64(0) + + // Complete fill of Ask Order + limitHeadAsk.Status = "filled" + limitHeadAsk.UpdTime = ctx.BlockHeader().Time.Unix() + memberAsk.Limit = limitHeadAsk.Next + + if limitHeadAsk.Next != 0 { + limitHeadAskNext, _ := k.GetOrder(ctx, limitHeadAsk.Next) + limitHeadAskNext.Prev = 0 + k.SetOrder(ctx, limitHeadAskNext) + } + + limitHeadAsk.Prev = partialFillOrder.Uid + limitHeadAsk.Next = pool.History + + pool.History = partialFillOrder.Uid + + if pool.Denom1 == partialFillOrder.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(partialFillOrder.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(limitHeadAsk.Amount) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(limitHeadAsk.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(partialFillOrder.Amount) + } + + k.IncVolume(ctx, partialFillOrder.DenomBid, partialFillOrder.Amount) + k.IncVolume(ctx, limitHeadAsk.DenomBid, limitHeadAsk.Amount) + + limitHeadBid.Amount = limitHeadBid.Amount.Sub(amountDenomBidAskOrder) + + ownerAsk, _ := sdk.AccAddressFromBech32(limitHeadAsk.Owner) + ownerBid, _ := sdk.AccAddressFromBech32(limitHeadBid.Owner) + + coinAskOrder := sdk.NewCoin(denomBid, amountDenomBidAskOrder) + coinsAskOrder := sdk.NewCoins(coinAskOrder) + + amountDenomAskBidPartialOrder := (amountDenomBidAskOrder.Mul(limitHeadBid.Rate[0])).Quo(limitHeadBid.Rate[1]) + + coinBidOrder := sdk.NewCoin(denomAsk, amountDenomAskBidPartialOrder) + coinsBidOrder := sdk.NewCoins(coinBidOrder) + + profitAskCoin := limitHeadAsk.Amount.Sub(amountDenomAskBidPartialOrder) + memberAsk.Balance = memberAsk.Balance.Add(profitAskCoin) + + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, ownerAsk, coinsAskOrder) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + sdkError = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, ownerBid, coinsBidOrder) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + k.RemoveOrderOwner(ctx, limitHeadAsk.Owner, limitHeadAsk.Uid) + k.SetOrderOwner(ctx, limitHeadBid.Owner, limitHeadBid.Uid) + + k.SetOrder(ctx, limitHeadBid) + k.SetOrder(ctx, limitHeadAsk) + k.SetOrder(ctx, partialFillOrder) + + k.SetPool(ctx, pool) + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + return memberAsk, memberBid, nil + } + + if limitHeadAsk.Amount.GTE(amountDenomAskBidOrder) { + // Add partially filled order to history + // Keep remainder of order into book + partialFillOrder := limitHeadAsk + + partialFillOrder.Uid = k.GetUidCount(ctx) + k.SetUidCount(ctx, partialFillOrder.Uid+1) + + partialFillOrder.Amount = amountDenomAskBidOrder + partialFillOrder.Status = "filled" + partialFillOrder.UpdTime = ctx.BlockHeader().Time.Unix() + partialFillOrder.Next = limitHeadBid.Uid + partialFillOrder.Prev = uint64(0) + + // Complete fill of Bid Order + limitHeadBid.Status = "filled" + limitHeadBid.UpdTime = ctx.BlockHeader().Time.Unix() + memberBid.Limit = limitHeadBid.Next + + if limitHeadBid.Next != 0 { + limitHeadBidNext, _ := k.GetOrder(ctx, limitHeadBid.Next) + limitHeadBidNext.Prev = 0 + k.SetOrder(ctx, limitHeadBidNext) + } + + limitHeadBid.Prev = partialFillOrder.Uid + limitHeadBid.Next = pool.History + + pool.History = partialFillOrder.Uid + + if pool.Denom1 == limitHeadBid.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(limitHeadBid.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(partialFillOrder.Amount) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(partialFillOrder.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(limitHeadBid.Amount) + } + + k.IncVolume(ctx, limitHeadBid.DenomBid, limitHeadBid.Amount) + k.IncVolume(ctx, partialFillOrder.DenomBid, partialFillOrder.Amount) + + limitHeadAsk.Amount = limitHeadAsk.Amount.Sub(amountDenomAskBidOrder) + + ownerAsk, _ := sdk.AccAddressFromBech32(limitHeadAsk.Owner) + ownerBid, _ := sdk.AccAddressFromBech32(limitHeadBid.Owner) + + amountDenomBidAskPartialOrder := (amountDenomAskBidOrder.Mul(limitHeadAsk.Rate[0])).Quo(limitHeadAsk.Rate[1]) + + coinAskOrder := sdk.NewCoin(denomBid, amountDenomBidAskPartialOrder) + coinsAskOrder := sdk.NewCoins(coinAskOrder) + + coinBidOrder := sdk.NewCoin(denomAsk, amountDenomAskBidOrder) + coinsBidOrder := sdk.NewCoins(coinBidOrder) + + profitBidCoin := limitHeadBid.Amount.Sub(amountDenomBidAskPartialOrder) + memberBid.Balance = memberBid.Balance.Add(profitBidCoin) + + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, ownerAsk, coinsAskOrder) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + sdkError = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, ownerBid, coinsBidOrder) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + k.RemoveOrderOwner(ctx, limitHeadBid.Owner, limitHeadBid.Uid) + + k.SetOrder(ctx, limitHeadBid) + k.SetOrder(ctx, limitHeadAsk) + k.SetOrder(ctx, partialFillOrder) + + k.SetPool(ctx, pool) + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + return memberAsk, memberBid, nil + } + + return memberAskInit, memberBidInit, nil +} + +func ExecuteLimit(k msgServer, ctx sdk.Context, denomAsk string, denomBid string, memberAsk types.Member, memberBid types.Member) (types.Member, types.Member, error) { + // Added for error reversion + memberAskInit := memberAsk + memberBidInit := memberBid + + if memberAsk.Balance.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + if memberBid.Balance.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + // IF Limit Head is equal to 0 THEN the Limit Book is EMPTY + if memberBid.Limit == 0 { + memberBid, memberAsk, sdkError := ExecuteStop(k, ctx, denomBid, denomAsk, memberBid, memberAsk) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + return memberAsk, memberBid, nil + } + + limitHead, _ := k.GetOrder(ctx, memberBid.Limit) + + if types.LTE([]sdk.Int{memberAsk.Balance, memberBid.Balance}, limitHead.Rate) { + memberBid, memberAsk, sdkError := ExecuteStop(k, ctx, denomBid, denomAsk, memberBid, memberAsk) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + return memberAsk, memberBid, nil + } + + // Execute Head Limit + + // Max Member(Bid) Balance B(f) + // The AMM Balance of the Bid Coin corresponding to Limit Order Exchange Rate + // Model: Constant Product + // A(i): Initial Balance of Ask Coin in AMM Pool + // B(f): Final Balance of Bid Coin in AMM Pool + // Exch(f): + // A(i)*B(i)=A(f)*B(f) + // Exch(f)=A(f)/B(f) -> B(f)*Exch(f)=A(f) + // A(i)*B(i)=B(f)*Exch(f)*B(f) -> A(i)*B(i)=Exch(f)*B(f)^2 + // (A(i)*B(i))/Exch(f)=B(f)^2 + // B(f)=SQRT((A(i)*B(i))/Exch(f)) + // `maxMemberBidBalance = sqrt(((memberAsk.Balance * memberBid.Balance) * limitHead.Rate[1]) + // / limitHead.Rate[0])` + // `memberAsk.Balance.Mul(memberBid.Balance)` is allowed, but multiplying further + // by the rate would be a limiting factor on orders, use a bigint multiplication for + // that step. The `Rate`s are limited to 64 bits by the `ValidateBasic`. + tmp := big.NewInt(0) + tmp.Mul(memberAsk.Balance.Mul(memberBid.Balance).BigInt(), limitHead.Rate[1].BigInt()) + tmp.Quo(tmp, limitHead.Rate[0].BigInt()) + tmp.Sqrt(tmp) + maxMemberBidBalance := sdk.NewIntFromBigInt(tmp) + + // Maximum amountBid of the Bid Coin that the AMM may accept at Limit Order Exchange Rate + maxAmountBid := maxMemberBidBalance.Sub(memberBid.Balance) + + // Strike Bid Amount: The amountBid of the bid coin exchanged + var strikeAmountBid sdk.Int + + // Strike Bid Amount given by the user exchange account and received by the + // Pair AMM Pool B Member is the lesser of maxPoolBid or limit amountBid + if limitHead.Amount.LTE(maxAmountBid) { + strikeAmountBid = limitHead.Amount + memberBid.Limit = limitHead.Next + if limitHead.Next != 0 { + limitNext, _ := k.GetOrder(ctx, limitHead.Next) + limitNext.Prev = 0 + k.SetOrder(ctx, limitNext) + } + } else { + strikeAmountBid = maxAmountBid + } + + // StrikeAmountAsk = StrikeAmountBid * ExchangeRate(A/B) + // Exchange Rate is held constant at initial AMM balances + strikeAmountAsk := (strikeAmountBid.Mul(limitHead.Rate[0])).Quo(limitHead.Rate[1]) + + // Edge case where strikeAskAmount rounds to 0 + // Rounding favors AMM vs Order + if strikeAmountAsk.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + pool, _ := k.GetPool(ctx, memberAsk.Pair) + + if limitHead.Amount.Equal(strikeAmountBid) { + limitHead.Status = "filled" + limitHead.UpdTime = ctx.BlockHeader().Time.Unix() + limitHead.Prev = 0 + k.RemoveOrderOwner(ctx, limitHead.Owner, limitHead.Uid) + + if pool.History == 0 { + limitHead.Next = 0 + } else { + prevFilledOrder, _ := k.GetOrder(ctx, pool.History) + prevFilledOrder.Prev = limitHead.Uid + limitHead.Next = prevFilledOrder.Uid + k.SetOrder(ctx, prevFilledOrder) + } + + pool.History = limitHead.Uid + + if pool.Denom1 == limitHead.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(limitHead.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(strikeAmountAsk) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(strikeAmountAsk) + pool.Volume2.Amount = pool.Volume2.Amount.Add(limitHead.Amount) + } + + k.IncVolume(ctx, limitHead.DenomBid, limitHead.Amount) + k.IncVolume(ctx, limitHead.DenomAsk, strikeAmountAsk) + + } else { + // Add partially filled order to history + // Keep remainder of order into book + partialFillOrder := limitHead + + partialFillOrder.Uid = k.GetUidCount(ctx) + k.SetUidCount(ctx, partialFillOrder.Uid+1) + + partialFillOrder.Amount = strikeAmountBid + partialFillOrder.Status = "filled" + partialFillOrder.UpdTime = ctx.BlockHeader().Time.Unix() + + limitHead.Amount = limitHead.Amount.Sub(strikeAmountBid) + + if pool.History == 0 { + partialFillOrder.Prev = 0 + partialFillOrder.Next = 0 + } else { + prevFilledOrder, _ := k.GetOrder(ctx, pool.History) + prevFilledOrder.Prev = partialFillOrder.Uid + k.SetOrder(ctx, prevFilledOrder) + partialFillOrder.Prev = 0 + partialFillOrder.Next = prevFilledOrder.Uid + } + + pool.History = partialFillOrder.Uid + + if pool.Denom1 == partialFillOrder.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(partialFillOrder.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(strikeAmountAsk) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(strikeAmountAsk) + pool.Volume2.Amount = pool.Volume2.Amount.Add(partialFillOrder.Amount) + } + + k.IncVolume(ctx, limitHead.DenomBid, limitHead.Amount) + k.IncVolume(ctx, limitHead.DenomAsk, strikeAmountAsk) + + k.SetOrder(ctx, partialFillOrder) + k.SetOrderOwner(ctx, limitHead.Owner, limitHead.Uid) + } + + k.SetOrder(ctx, limitHead) + k.SetPool(ctx, pool) + + // moduleAcc := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName))) + // Get the borrower address + owner, _ := sdk.AccAddressFromBech32(limitHead.Owner) + + coinAsk := sdk.NewCoin(denomAsk, strikeAmountAsk) + coinsAsk := sdk.NewCoins(coinAsk) + + // Transfer ask order amount to owner account + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, owner, coinsAsk) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + memberBid.Previous = memberBid.Balance + memberAsk.Previous = memberAsk.Balance + + memberBid.Balance = memberBid.Balance.Add(strikeAmountBid) + memberAsk.Balance = memberAsk.Balance.Sub(strikeAmountAsk) + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + return memberAsk, memberBid, nil +} + +func ExecuteStop(k msgServer, ctx sdk.Context, denomAsk string, denomBid string, memberAsk types.Member, memberBid types.Member) (types.Member, types.Member, error) { + // Added for error reversion + memberAskInit := memberAsk + memberBidInit := memberBid + + if memberAsk.Balance.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + if memberBid.Balance.Equal(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + // Checking for existence of stop order at the memberBid head + if memberBid.Stop == 0 { + return memberAskInit, memberBidInit, nil + } + + stopHead, _ := k.GetOrder(ctx, memberBid.Stop) + + if types.GTE([]sdk.Int{memberAsk.Balance, memberBid.Balance}, stopHead.Rate) { + return memberAskInit, memberBidInit, nil + } + + // Strike Bid Amount: The amountBid of the bid coin exchanged + strikeAmountBid := stopHead.Amount + + // A(i)*B(i) = A(f)*B(f) + // A(f) = A(i)*B(i)/B(f) + // strikeAmountAsk = A(i) - A(f) = A(i) - A(i)*B(i)/B(f) + // Compensate for rounding: strikeAmountAsk = A(i) - A(f) = A(i) - [A(i)*B(i)/B(f)+1] + strikeAmountAsk := memberAsk.Balance.Sub(((memberAsk.Balance.Mul(memberBid.Balance)).Quo(memberBid.Balance.Add(strikeAmountBid))).Add(sdk.NewInt(1))) + + // Edge case where strikeAskAmount rounds to 0 + // Rounding favors AMM vs Order + if strikeAmountAsk.LTE(sdk.ZeroInt()) { + return memberAskInit, memberBidInit, nil + } + + // THEN set Head(Stop).Status to filled as entire order will be filled + stopHead.Status = "filled" + stopHead.UpdTime = ctx.BlockHeader().Time.Unix() + k.RemoveOrderOwner(ctx, stopHead.Owner, stopHead.Uid) + + // Set Next Position as Head of Stop Book + memberBid.Stop = stopHead.Next + + if stopHead.Next != 0 { + stopNext, _ := k.GetOrder(ctx, stopHead.Next) + stopNext.Prev = 0 + k.SetOrder(ctx, stopNext) + } + + // At this point the Head(Stop) position has been deactivated and the Next + // Stop position has been set as the Head Stop + + // moduleAcc := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName))) + // Get the borrower address + owner, _ := sdk.AccAddressFromBech32(stopHead.Owner) + + coinAsk := sdk.NewCoin(denomAsk, strikeAmountAsk) + coinsAsk := sdk.NewCoins(coinAsk) + + // Transfer ask order amount to owner account + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, owner, coinsAsk) + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + // Update pool order history + pool, _ := k.GetPool(ctx, memberAsk.Pair) + pool.History = stopHead.Uid + + if pool.Denom1 == stopHead.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(stopHead.Amount) + pool.Volume2.Amount = pool.Volume2.Amount.Add(strikeAmountAsk) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(strikeAmountAsk) + pool.Volume2.Amount = pool.Volume2.Amount.Add(stopHead.Amount) + } + + k.IncVolume(ctx, stopHead.DenomBid, stopHead.Amount) + k.IncVolume(ctx, stopHead.DenomAsk, strikeAmountAsk) + + // order filled + // just add the order to history + if pool.History == 0 { + stopHead.Prev = 0 + stopHead.Next = 0 + } else { + prevFilledOrder, _ := k.GetOrder(ctx, pool.History) + prevFilledOrder.Prev = stopHead.Uid + stopHead.Prev = 0 + stopHead.Next = prevFilledOrder.Uid + k.SetOrder(ctx, prevFilledOrder) + k.SetOrderOwner(ctx, stopHead.Owner, stopHead.Uid) + } + + k.SetOrder(ctx, stopHead) + k.SetPool(ctx, pool) + + memberBid.Previous = memberBid.Balance + memberAsk.Previous = memberAsk.Balance + + memberBid.Balance = memberBid.Balance.Add(strikeAmountBid) + memberAsk.Balance = memberAsk.Balance.Sub(strikeAmountAsk) + + if sdkError != nil { + return memberAskInit, memberBidInit, sdkError + } + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + return memberAsk, memberBid, nil +} diff --git a/x/market/keeper/msg_server_create_order_test.go b/x/market/keeper/msg_server_create_order_test.go new file mode 100644 index 00000000..a2fdaf49 --- /dev/null +++ b/x/market/keeper/msg_server_create_order_test.go @@ -0,0 +1,385 @@ +package keeper_test + +import ( + "strconv" + "strings" + "testing" + "time" + + keepertest "market/testutil/keeper" + "market/testutil/sample" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +func common(t *testing.T, testInput keepertest.TestInput) ( + testdata testData, + coinPair sdk.Coins, + denomA string, + denomB string, + pair string, +) { + + // TestData + testdata = testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ = sample.SampleCoins("140CoinA", "140CoinB") + denomA, denomB = sample.SampleDenoms(coinPair) + pair = strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // CreateDrop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "120"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + return +} + +func TestCreateOrder(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + testdata, _, denomA, denomB, _ := common(t, testInput) + + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + //Create Order + var o = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: "stop", Amount: "0", Prev: "0", Next: "0"} + rate, _ := types.RateStringToInt(o.Rate) + bookends := testInput.MarketKeeper.BookEnds(testInput.Context, o.DenomAsk, o.DenomBid, o.OrderType, rate) + o.Prev = strconv.FormatUint(bookends[0], 10) + o.Next = strconv.FormatUint(bookends[1], 10) + _, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + require.Equal(t, beforecount+1, aftercount) + + //Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + // Validate GetMember + memberAsk, memberAskfound := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomBid, orders.DenomAsk) + + require.True(t, memberAskfound) + require.Equal(t, memberAsk.DenomA, denomB) + require.Equal(t, memberAsk.DenomB, denomA) + require.Equal(t, "33", memberAsk.Balance.String()) + require.Equal(t, memberAsk.Stop, uint64(0)) + +} + +func TestBookEnds(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + testdata, _, denomA, denomB, _ := common(t, testInput) + + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + orderType1 := "limit" + + //Create Order + var o = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: orderType1, Amount: "10", Prev: "0", Next: "0"} + rate, err := types.RateStringToInt(o.Rate) + require.NoError(t, err) + ends := testInput.MarketKeeper.BookEnds(testInput.Context, o.DenomAsk, o.DenomBid, o.OrderType, rate) + require.NoError(t, err) + o.Prev = strconv.FormatUint(ends[0], 10) + o.Next = strconv.FormatUint(ends[1], 10) + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &o) + require.NoError(t, err) + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + // Create Order Msg Type + beforecount = aftercount + var q = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: testdata.RateAstrArray, OrderType: orderType1, Amount: "10", Prev: "0", Next: "0"} + rate, err = types.RateStringToInt(q.Rate) + require.NoError(t, err) + + // Get Bookends + ends = testInput.MarketKeeper.BookEnds(testInput.Context, q.DenomAsk, q.DenomBid, q.OrderType, rate) + require.NoError(t, err) + q.Prev = strconv.FormatUint(ends[0], 10) + q.Next = strconv.FormatUint(ends[1], 10) + + // Create Order + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &q) + require.NoError(t, err) + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders2, orderfound2 := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound2) + require.Equal(t, orders2.DenomBid, denomB) + require.Equal(t, orders2.DenomAsk, denomA) + require.Equal(t, orders2.Amount.String(), o.Amount) + + // Create Order Msg Type + beforecount = aftercount + var r = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: []string{"1", "1000"}, OrderType: orderType1, Amount: "10", Prev: "0", Next: "0"} + rate, err = types.RateStringToInt(r.Rate) + require.NoError(t, err) + + timeout := time.After(10 * time.Second) + done := make(chan bool) + go func() { + // Get Bookends + ends = testInput.MarketKeeper.BookEnds(testInput.Context, r.DenomAsk, r.DenomBid, r.OrderType, rate) + require.NoError(t, err) + r.Prev = strconv.FormatUint(ends[0], 10) + r.Next = strconv.FormatUint(ends[1], 10) + time.Sleep(5 * time.Second) + done <- true + }() + + select { + case <-timeout: + t.Fatal("Test didn't finish in time") + case <-done: + } + + // Create Order + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &r) + require.NoError(t, err) + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders3, orderfound3 := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound3) + require.Equal(t, orders3.DenomBid, denomB) + require.Equal(t, orders3.DenomAsk, denomA) + require.Equal(t, orders3.Amount.String(), o.Amount) + + // Validate GetMember + memberAsk, memberAskfound := testInput.MarketKeeper.GetMember(testInput.Context, orders.DenomBid, orders.DenomAsk) + + require.True(t, memberAskfound) + require.Equal(t, memberAsk.DenomA, denomB) + require.Equal(t, memberAsk.DenomB, denomA) + require.Equal(t, "33", memberAsk.Balance.String()) + require.Equal(t, memberAsk.Stop, uint64(0)) + + // Create Order Msg Type + beforecount = aftercount + var s = types.MsgCreateOrder{Creator: addr, DenomAsk: denomA, DenomBid: denomB, Rate: []string{"65", "70"}, OrderType: orderType1, Amount: "10", Prev: "0", Next: "0"} + rate, err = types.RateStringToInt(s.Rate) + require.NoError(t, err) + + timeout = time.After(10 * time.Second) + done = make(chan bool) + go func() { + // Get Bookends + ends = testInput.MarketKeeper.BookEnds(testInput.Context, s.DenomAsk, s.DenomBid, s.OrderType, rate) + require.NoError(t, err) + s.Prev = strconv.FormatUint(ends[0], 10) + s.Next = strconv.FormatUint(ends[1], 10) + time.Sleep(5 * time.Second) + done <- true + }() + + select { + case <-timeout: + t.Fatal("Test didn't finish in time") + case <-done: + } + + // Create Order + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &s) + require.NoError(t, err) + aftercount = testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate Order + orders4, orderfound4 := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound4) + require.Equal(t, orders4.DenomBid, denomB) + require.Equal(t, orders4.DenomAsk, denomA) + require.Equal(t, orders4.Amount.String(), o.Amount) + +} + +func TestCreateOrder_BothFillOverlap(t *testing.T) { + + testInput := keepertest.CreateTestEnvironment(t) + + _, _, _, _, pair := common(t, testInput) + + // beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + scenarios := []types.MsgCreateOrder{ + {Creator: addr, DenomAsk: "CoinA", DenomBid: "CoinB", Rate: []string{"3", "4"}, OrderType: "limit", Amount: "40", Prev: "0", Next: "0"}, + {Creator: addr, DenomAsk: "CoinB", DenomBid: "CoinA", Rate: []string{"4", "3"}, OrderType: "limit", Amount: "30", Prev: "0", Next: "0"}, + } + + var uid uint64 + + for _, s := range scenarios { + + orderresponse, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &s) + require.NoError(t, err) + uid = orderresponse.Uid + } + + order, found := testInput.MarketKeeper.GetOrder(testInput.Context, uid) + require.True(t, found) + require.True(t, order.Status == "filled") + + allorders := testInput.MarketKeeper.GetAllOrder(testInput.Context) + require.Truef(t, allorders[0].Uid == 3, strconv.FormatUint(allorders[0].Uid, 10)) + require.Truef(t, allorders[0].Status == "filled", allorders[0].Status) + require.Equal(t, sdk.NewInt(70).String(), allorders[0].Amount.Add(allorders[1].Amount).String()) + + history, _ := testInput.MarketKeeper.GetHistory(testInput.Context, "CoinA,CoinB", "10") + require.Equal(t, "40", history[0].Amount) + require.Equal(t, "30", history[1].Amount) + + volumes := testInput.MarketKeeper.GetAllVolumes(testInput.Context) + require.Equalf(t, "30", volumes[0].Amount.String(), volumes[0].Amount.String()) + require.Equalf(t, "40", volumes[1].Amount.String(), volumes[0].Amount.String()) + + require.True(t, len(allorders) == 2) + + // Validate Order + orderowner := testInput.MarketKeeper.GetOrderOwner(testInput.Context, addr) + require.True(t, len(orderowner) == 0) + + // Validate GetPool + pool, _ := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.Equal(t, strconv.FormatUint(pool.History, 10), strconv.FormatUint(allorders[0].Uid, 10)) +} + +func TestCreateOrder_OneSide1FillOverlap(t *testing.T) { + + testInput := keepertest.CreateTestEnvironment(t) + + _, _, _, _, pair := common(t, testInput) + + // beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + scenarios := []types.MsgCreateOrder{ + {Creator: addr, DenomAsk: "CoinA", DenomBid: "CoinB", Rate: []string{"3", "4"}, OrderType: "limit", Amount: "50", Prev: "0", Next: "0"}, + {Creator: addr, DenomAsk: "CoinB", DenomBid: "CoinA", Rate: []string{"4", "3"}, OrderType: "limit", Amount: "30", Prev: "0", Next: "0"}, + } + + var uid uint64 + + for _, s := range scenarios { + + orderresponse, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &s) + require.NoError(t, err) + uid = orderresponse.Uid + } + + order, found := testInput.MarketKeeper.GetOrder(testInput.Context, uid) + require.True(t, found) + require.True(t, order.Status == "filled") + + allorders := testInput.MarketKeeper.GetAllOrder(testInput.Context) + require.Truef(t, allorders[0].Uid == 3, strconv.FormatUint(allorders[0].Uid, 10)) + require.Truef(t, allorders[0].Status == "active", allorders[0].Status) + require.Equal(t, sdk.NewInt(10).String(), allorders[0].Amount.String()) + + history, _ := testInput.MarketKeeper.GetHistory(testInput.Context, "CoinA,CoinB", "10") + require.Equal(t, "40", history[0].Amount) + require.Equal(t, "30", history[1].Amount) + + volumes := testInput.MarketKeeper.GetAllVolumes(testInput.Context) + require.Equalf(t, "30", volumes[0].Amount.String(), volumes[0].Amount.String()) + require.Equalf(t, "40", volumes[1].Amount.String(), volumes[0].Amount.String()) + + require.Equal(t, 3, len(allorders)) + + // Validate Order + orderowner := testInput.MarketKeeper.GetOrderOwner(testInput.Context, addr) + require.True(t, len(orderowner) == 1) + + // Validate GetPool + pool, _ := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.Equal(t, strconv.FormatUint(pool.History, 10), strconv.FormatUint(allorders[2].Uid, 10)) + + member, found := testInput.MarketKeeper.GetMember(testInput.Context, "CoinA", "CoinB") + require.True(t, found) + require.Equal(t, member.Limit, allorders[0].Uid) +} + +func TestCreateOrder_OneSide2FillOverlap(t *testing.T) { + + testInput := keepertest.CreateTestEnvironment(t) + + _, _, _, _, pair := common(t, testInput) + + // beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + scenarios := []types.MsgCreateOrder{ + {Creator: addr, DenomAsk: "CoinA", DenomBid: "CoinB", Rate: []string{"3", "4"}, OrderType: "limit", Amount: "40", Prev: "0", Next: "0"}, + {Creator: addr, DenomAsk: "CoinB", DenomBid: "CoinA", Rate: []string{"4", "3"}, OrderType: "limit", Amount: "40", Prev: "0", Next: "0"}, + } + + var uid uint64 + + for _, s := range scenarios { + + orderresponse, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateOrder(sdk.WrapSDKContext(testInput.Context), &s) + require.NoError(t, err) + uid = orderresponse.Uid + } + + order, found := testInput.MarketKeeper.GetOrder(testInput.Context, uid) + require.True(t, found) + require.True(t, order.Status == "active") + + allorders := testInput.MarketKeeper.GetAllOrder(testInput.Context) + require.Truef(t, allorders[0].Uid == 3, strconv.FormatUint(allorders[0].Uid, 10)) + require.Truef(t, allorders[1].Status == "active", allorders[1].Status) + require.Equal(t, sdk.NewInt(10).String(), allorders[1].Amount.String()) + + history, _ := testInput.MarketKeeper.GetHistory(testInput.Context, "CoinA,CoinB", "10") + require.Equal(t, "30", history[0].Amount) + require.Equal(t, "40", history[1].Amount) + + require.Equal(t, 3, len(allorders)) + + // Validate Order + orderowner := testInput.MarketKeeper.GetOrderOwner(testInput.Context, addr) + require.True(t, len(orderowner) == 1) + + // Validate GetPool + pool, _ := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.Equal(t, strconv.FormatUint(pool.History, 10), strconv.FormatUint(allorders[2].Uid, 10)) + + member, found := testInput.MarketKeeper.GetMember(testInput.Context, "CoinB", "CoinA") + require.True(t, found) + require.Equal(t, member.Limit, allorders[1].Uid) + + bookends := testInput.MarketKeeper.BookEnds(testInput.Context, "CoinB", "CoinA", "limit", []sdk.Int{sdk.NewInt(3), sdk.NewInt(3)}) + require.Equal(t, strconv.FormatUint(uint64(0), 10), strconv.FormatUint(bookends[0], 10)) +} diff --git a/x/market/keeper/msg_server_create_pool.go b/x/market/keeper/msg_server_create_pool.go new file mode 100644 index 00000000..541e0545 --- /dev/null +++ b/x/market/keeper/msg_server_create_pool.go @@ -0,0 +1,157 @@ +package keeper + +import ( + "context" + "strings" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) CreatePool(goCtx context.Context, msg *types.MsgCreatePool) (*types.MsgCreatePoolResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // CoinAmsg and CoinBmsg pre-sort from raw msg + coinA, err := sdk.ParseCoinNormalized(msg.CoinA) + if err != nil { + panic(err) + } + + coinB, err := sdk.ParseCoinNormalized(msg.CoinB) + if err != nil { + panic(err) + } + + coinPair := sdk.NewCoins(coinA, coinB) + + // NewCoins sorts denoms. + // The sorted pair joined by "," is used as the key for the pool. + denom1 := coinPair.GetDenomByIndex(0) + denom2 := coinPair.GetDenomByIndex(1) + pair := strings.Join([]string{denom1, denom2}, ",") + + // Test if pool either exists and active or exists and inactive + // Inactive pool will be dry or have no drops + member1, _ := k.GetMember(ctx, denom2, denom1) + + member2, _ := k.GetMember(ctx, denom1, denom2) + + pool, found := k.GetPool(ctx, pair) + if found { + if !member1.Balance.Equal(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrPoolAlreadyExists, "%s", pair) + } + + if !member2.Balance.Equal(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrPoolAlreadyExists, "%s", pair) + } + + if !pool.Drops.Equal(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrPoolAlreadyExists, "%s", pair) + } + } + + // Get the borrower address + creator, _ := sdk.AccAddressFromBech32(msg.Creator) + + // All coins added to pools are deposited into the module account until redemption + sdkError := k.bankKeeper.SendCoinsFromAccountToModule(ctx, creator, types.ModuleName, coinPair) + if sdkError != nil { + return nil, sdkError + } + + // Drops define proportional ownership to the liquidity in the pool + drops := coinPair.AmountOf(denom1).Mul(coinPair.AmountOf(denom2)) + + leader := types.Leader{ + Address: msg.Creator, + Drops: drops, + } + + if found { + pool.Drops = drops + pool.Leaders = []*types.Leader{&leader} + member1.Balance = coinPair.AmountOf(denom1) + member2.Balance = coinPair.AmountOf(denom2) + } else { + pool = types.Pool{ + Pair: pair, + Leaders: []*types.Leader{&leader}, + Denom1: coinPair.GetDenomByIndex(0), + Denom2: coinPair.GetDenomByIndex(1), + Volume1: &types.Volume{ + Denom: coinPair.GetDenomByIndex(0), + Amount: sdk.ZeroInt(), + }, + Volume2: &types.Volume{ + Denom: coinPair.GetDenomByIndex(1), + Amount: sdk.ZeroInt(), + }, + Drops: drops, + History: uint64(0), + } + + member1 = types.Member{ + Pair: pair, + DenomA: denom2, + DenomB: denom1, + Balance: coinPair.AmountOf(denom1), + Limit: 0, + Stop: 0, + } + + member2 = types.Member{ + Pair: pair, + DenomA: denom1, + DenomB: denom2, + Balance: coinPair.AmountOf(denom2), + Limit: 0, + Stop: 0, + } + } + + // Create the uid + count := k.GetUidCount(ctx) + + var drop = types.Drop{ + Uid: count, + Owner: msg.Creator, + Pair: pair, + Drops: drops, + Product: drops, + Active: true, + } + + k.SetPool( + ctx, + pool, + ) + + k.SetMember( + ctx, + member1, + ) + + k.SetMember( + ctx, + member2, + ) + + // Add the drop to the keeper + k.SetDrop( + ctx, + drop, + ) + + k.SetDropOwner( + ctx, + drop, + ) + + // Update drop uid count + k.SetUidCount(ctx, count+1) + + return &types.MsgCreatePoolResponse{}, nil +} diff --git a/x/market/keeper/msg_server_create_pool_test.go b/x/market/keeper/msg_server_create_pool_test.go new file mode 100644 index 00000000..a98a9262 --- /dev/null +++ b/x/market/keeper/msg_server_create_pool_test.go @@ -0,0 +1,271 @@ +package keeper_test + +import ( + "strings" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/sample" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/stretchr/testify/require" +) + +type testData struct { + coinAStr string + coinBStr string + RateAstrArray []string + RateBstrArray []string +} + +var addr string = sample.AccAddress() +var addr2 string = sample.AccAddress() +var addr3 string = sample.AccAddress() + +func TestCreatePool(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "20CoinA", coinBStr: "20CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}} + coinPair, _ := sample.SampleCoins(testdata.coinAStr, testdata.coinBStr) + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + //MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + //SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + //Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + //validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + //validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + //validate GetPool + + rst, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + //validate GetMember + members, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 := testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + //validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + +} + +func TestCreatePool_PoolAlreadyExist(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + count := 0 + scenarios := []struct { + coinAStr string + coinBStr string + RateAstrArray []string + RateBstrArray []string + }{ + {coinAStr: "20CoinA", coinBStr: "20CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}}, + {coinAStr: "20CoinA", coinBStr: "20CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}}, + // check reversed + {coinAStr: "20CoinB", coinBStr: "20CoinA", RateAstrArray: []string{"20", "30"}, RateBstrArray: []string{"10", "20"}}, + } + for _, s := range scenarios { + coinPair, _ := sample.SampleCoins("20CoinA", "20CoinB") + + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + var p = types.MsgCreatePool{CoinA: s.coinAStr, CoinB: s.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + if count == 0 { + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + + } else { + require.Error(t, err) //Pool Already exists + require.ErrorContains(t, err, "pool already exists") + require.NotContains(t, p.GetCreator(), response.String()) + } + + count++ + + } + +} + +func TestCreatePool_Insufficient_Funds(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "15CoinA", coinBStr: "15CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}} + coinPair, _ := sample.SampleCoins("10CoinA", "10CoinB") + + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.Error(t, err) + require.ErrorContains(t, err, "insufficient funds") + require.NotContains(t, p.GetCreator(), response.String()) + +} + +func TestCreatePool_PoolAlready_Exists_ReSubmit(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + coinPair, _ := sample.SampleCoins("20CoinA", "20CoinB") + + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + var p = types.MsgCreatePool{CoinA: "15CoinA", CoinB: "15CoinB", Creator: addr} + var p1 = types.MsgCreatePool{CoinA: "30CoinA", CoinB: "30CoinB", Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + response1, err1 := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p1) + require.NoError(t, err) + require.Error(t, err1) + require.ErrorContains(t, err1, "pool already exists") + require.Contains(t, p.GetCreator(), response.String()) + require.NotContains(t, p.GetCreator(), response1.String()) + +} + +func TestCreatePool_With_New_Creator(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "15CoinA", coinBStr: "15CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}} + coinPair, _ := sample.SampleCoins("10CoinA", "10CoinB") + + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: sample.AccAddress()} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.Error(t, err) + require.ErrorContains(t, err, "insufficient funds") + require.NotContains(t, p.GetCreator(), response.String()) + +} + +func TestCreatePool_With_Empty_Rates(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "15CoinA", coinBStr: "15CoinB", RateAstrArray: []string{"0", "0"}, RateBstrArray: []string{"0", "0"}} + coinPair, _ := sample.SampleCoins("20CoinA", "20CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + //validate SetUidCount function. + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + //validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + rst, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + //validate GetMember + members, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 := testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + //validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + +} + +func TestCreatePool_With_Swap_Coins(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + //TestData + testdata := testData{coinAStr: "15CoinB", coinBStr: "15CoinA", RateAstrArray: []string{"0", "0"}, RateBstrArray: []string{"0", "0"}} + coinPair, _ := sample.SampleCoins("20CoinA", "20CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + //validate SetUidCount function. + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + //validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + rst, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + //validate GetMember + members, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 := testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + //validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + +} + +func TestCreatePool_Invalid_Coins(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + scenarios := []struct { + coinAStr string + coinBStr string + RateAstrArray []string + RateBstrArray []string + }{ + {coinAStr: "20Coin", coinBStr: "20CoinB", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}}, + {coinAStr: "20CoinA", coinBStr: "20Coin", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}}, + //{coinAStr: "20CoinA", coinBStr: "20", RateAstrArray: []string{"10", "20"}, RateBstrArray: []string{"20", "30"}}, + } + for _, s := range scenarios { + coinPair, _ := sample.SampleCoins("20CoinA", "20CoinB") + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + requestAddress, _ := sdk.AccAddressFromBech32(addr) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + var p = types.MsgCreatePool{CoinA: s.coinAStr, CoinB: s.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.Error(t, err) + require.NotContains(t, p.GetCreator(), response.String()) + + } + +} diff --git a/x/market/keeper/msg_server_market_order.go b/x/market/keeper/msg_server_market_order.go new file mode 100644 index 00000000..5b00fae4 --- /dev/null +++ b/x/market/keeper/msg_server_market_order.go @@ -0,0 +1,183 @@ +package keeper + +import ( + "context" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) MarketOrder(goCtx context.Context, msg *types.MsgMarketOrder) (*types.MsgMarketOrderResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + amountBid, _ := sdk.NewIntFromString(msg.AmountBid) + + coinBid := sdk.NewCoin(msg.DenomBid, amountBid) + + coinsBid := sdk.NewCoins(coinBid) + + trader, _ := sdk.AccAddressFromBech32(msg.Creator) + + // Check if order creator has available balance + if err := k.validateSenderBalance(ctx, trader, coinsBid); err != nil { + return nil, err + } + + memberAsk, found := k.GetMember(ctx, msg.DenomBid, msg.DenomAsk) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", msg.DenomAsk) + } + + memberBid, found := k.GetMember(ctx, msg.DenomAsk, msg.DenomBid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "Member %s", msg.DenomBid) + } + + productBeg := memberAsk.Balance.Mul(memberBid.Balance) + + // A(i)*B(i) = A(f)*B(f) + // A(f) = A(i)*B(i)/B(f) + // amountAsk = A(i) - A(f) = A(i) - A(i)*B(i)/B(f) + // Compensate for rounding: strikeAmountAsk = A(i) - A(f) = A(i) - [A(i)*B(i)/B(f)+1] + amountAsk := memberAsk.Balance.Sub(((memberAsk.Balance.Mul(memberBid.Balance)).Quo(memberBid.Balance.Add(amountBid))).Add(sdk.NewInt(1))) + + // Market Order Fee + fee, _ := sdk.NewIntFromString(k.getParams(ctx).MarketFee) + amountAsk = amountAsk.Sub((amountAsk.Mul(fee)).Quo(sdk.NewInt(10000))) + + // Edge case where strikeAskAmount rounds to 0 + // Rounding favors AMM vs Order + if amountAsk.LTE(sdk.ZeroInt()) { + return nil, sdkerrors.Wrapf(types.ErrAmtZero, "amount ask equal or less than zero") + } + + // Slippage is initialized at zero + slippage := sdk.ZeroInt() + + amountAskExpected, _ := sdk.NewIntFromString(msg.AmountAsk) + + // Slippage is only updated if amount expected is greater than received + if amountAskExpected.GT(amountAsk) { + slippage = ((amountAskExpected.Sub(amountAsk)).Mul(sdk.NewInt(10000))).Quo(amountAskExpected) + + slipLimit, _ := sdk.NewIntFromString(msg.Slippage) + + if slippage.GT(slipLimit) { + return nil, sdkerrors.Wrapf(types.ErrSlippageTooGreat, "Slippage %s", slippage) + } + } + + // Transfer bid amount from trader account to module + sdkError := k.bankKeeper.SendCoinsFromAccountToModule(ctx, trader, types.ModuleName, coinsBid) + if sdkError != nil { + return nil, sdkError + } + + coinAsk := sdk.NewCoin(msg.DenomAsk, amountAsk) + coinsAsk := sdk.NewCoins(coinAsk) + + // Transfer ask amount from module to trader account + sdkError = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, trader, coinsAsk) + if sdkError != nil { + return nil, sdkError + } + + memberAsk.Balance = memberAsk.Balance.Sub(amountAsk) + memberBid.Balance = memberBid.Balance.Add(amountBid) + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + + uid := k.GetUidCount(ctx) + + pool, _ := k.GetPool(ctx, memberBid.Pair) + prevOrder, _ := k.GetOrder(ctx, pool.History) + + prevOrder.Prev = uid + + var order = types.Order{ + Uid: uid, + Owner: msg.Creator, + Status: "filled", + DenomAsk: msg.DenomAsk, + DenomBid: msg.DenomBid, + OrderType: "market", + Amount: amountBid, + Rate: []sdk.Int{amountAsk, amountBid}, + Prev: 0, + Next: pool.History, + BegTime: ctx.BlockHeader().Time.Unix(), + UpdTime: ctx.BlockHeader().Time.Unix(), + } + + pool.History = uid + + if pool.Denom1 == msg.DenomBid { + pool.Volume1.Amount = pool.Volume1.Amount.Add(amountBid) + pool.Volume2.Amount = pool.Volume2.Amount.Add(amountAsk) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(amountAsk) + pool.Volume2.Amount = pool.Volume2.Amount.Add(amountBid) + } + + k.IncVolume(ctx, msg.DenomBid, amountBid) + k.IncVolume(ctx, msg.DenomAsk, amountAsk) + + k.SetPool(ctx, pool) + k.SetUidCount(ctx, uid+1) + k.SetOrder(ctx, order) + + memberBid, memberAsk, error := ExecuteLimit(k, ctx, coinBid.Denom, coinAsk.Denom, memberBid, memberAsk) + if error != nil { + return nil, error + } + memberBid, memberAsk, error = ExecuteLimit(k, ctx, coinAsk.Denom, coinBid.Denom, memberAsk, memberBid) + if error != nil { + return nil, error + } + + if memberAsk.Balance.Mul(memberBid.Balance).Equal(productBeg) { + return &types.MsgMarketOrderResponse{AmountBid: msg.AmountBid, AmountAsk: amountAsk.String(), Slippage: slippage.String()}, nil + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return nil, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Trade %s", memberAsk.Pair) + } + + profitAsk, profitBid := k.Profit(productBeg, memberAsk, memberBid) + + memberAsk, error = k.Payout(ctx, profitAsk, memberAsk, pool) + if error != nil { + return nil, error + } + + memberBid, error = k.Payout(ctx, profitBid, memberBid, pool) + if error != nil { + return nil, error + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return nil, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Payout %s", memberAsk.Pair) + } + + memberAsk, error = k.Burn(ctx, profitAsk, memberAsk) + if error != nil { + return nil, error + } + + memberBid, error = k.Burn(ctx, profitBid, memberBid) + if error != nil { + return nil, error + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return nil, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Burn %s", memberAsk.Pair) + } + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + + return &types.MsgMarketOrderResponse{AmountBid: msg.AmountBid, AmountAsk: amountAsk.String(), Slippage: slippage.String()}, nil +} diff --git a/x/market/keeper/msg_server_market_order_test.go b/x/market/keeper/msg_server_market_order_test.go new file mode 100644 index 00000000..fe8ffae9 --- /dev/null +++ b/x/market/keeper/msg_server_market_order_test.go @@ -0,0 +1,73 @@ +package keeper_test + +import ( + "testing" + + keepertest "market/testutil/keeper" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +func TestMarketOrder(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + wctx := sdk.WrapSDKContext(testInput.Context) + + _, _, denomA, denomB, pair := common(t, testInput) + + // beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + //Create Order + var o = types.MsgMarketOrder{Creator: addr, DenomAsk: denomA, AmountAsk: "15", DenomBid: denomB, AmountBid: "10", Slippage: "700"} + + quoteBid, error := testInput.MarketKeeper.Quote(wctx, &types.QueryQuoteRequest{ + DenomAsk: o.DenomAsk, + DenomBid: o.DenomBid, + DenomAmount: o.DenomAsk, + Amount: o.AmountAsk, + }) + + o.AmountBid = quoteBid.Amount + + require.NoError(t, error) + + _, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).MarketOrder(wctx, &o) + require.NoError(t, err) + + /* + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + //Validate Order + orders, orderfound := testInput.MarketKeeper.GetOrder(testInput.Context, beforecount) + require.True(t, orderfound) + require.Equal(t, orders.DenomBid, denomB) + require.Equal(t, orders.DenomAsk, denomA) + require.Equal(t, orders.Amount.String(), o.Amount) + + */ + + // Validate GetMember + memberAsk, memberAskfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + + require.True(t, memberAskfound) + require.Equal(t, memberAsk.DenomA, denomB) + require.Equal(t, memberAsk.DenomB, denomA) + require.Equal(t, "18", memberAsk.Balance.String()) + require.Equal(t, memberAsk.Stop, uint64(0)) + + // Validate order estimation + + pool, poolFound := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, poolFound) + order, orderFound := testInput.MarketKeeper.GetOrder(testInput.Context, pool.History) + require.True(t, orderFound) + amountAskInt, ok := sdk.NewIntFromString(o.AmountAsk) + require.True(t, ok) + amountBidInt, ok := sdk.NewIntFromString(o.AmountBid) + require.True(t, ok) + require.True(t, types.EQ(order.Rate, []sdk.Int{amountAskInt, amountBidInt})) + +} diff --git a/x/market/keeper/msg_server_redeem_drop.go b/x/market/keeper/msg_server_redeem_drop.go new file mode 100644 index 00000000..c1820f06 --- /dev/null +++ b/x/market/keeper/msg_server_redeem_drop.go @@ -0,0 +1,129 @@ +package keeper + +import ( + "context" + "math/big" + "strconv" + "strings" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) RedeemDrop(goCtx context.Context, msg *types.MsgRedeemDrop) (*types.MsgRedeemDropResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + uid, _ := strconv.ParseUint(msg.Uid, 10, 64) + + drop, found := k.GetDrop(ctx, uid) + if !found { + return nil, sdkerrors.Wrapf(types.ErrDropNotFound, "%s", msg.Uid) + } + + if drop.Owner != msg.Creator { + return nil, sdkerrors.Wrapf(types.ErrNotDrops, "%s", msg.Uid) + } + + pair := strings.Split(drop.Pair, ",") + + denom1 := pair[0] + denom2 := pair[1] + + pool, found := k.GetPool(ctx, drop.Pair) + if !found { + return nil, sdkerrors.Wrapf(types.ErrPoolNotFound, "%s", drop.Pair) + } + + member1, found := k.GetMember(ctx, denom2, denom1) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "%s", drop.Pair) + } + + member2, found := k.GetMember(ctx, denom1, denom2) + if !found { + return nil, sdkerrors.Wrapf(types.ErrMemberNotFound, "%s", drop.Pair) + } + + // `total1 = (drop.Drops * member1.Balance) / pool.Drops` + tmp := big.NewInt(0) + tmp.Mul(drop.Drops.BigInt(), member1.Balance.BigInt()) + tmp.Quo(tmp, pool.Drops.BigInt()) + total1 := sdk.NewIntFromBigInt(tmp) + // note: because of https://github.com/cosmos/cosmos-sdk/issues/17342 + // always run this after a call to `NewIntFromBigInt` + tmp = big.NewInt(0) + + // `total2 = (drop.Drops * member2.Balance) / pool.Drops` + tmp.Mul(drop.Drops.BigInt(), member2.Balance.BigInt()) + tmp.Quo(tmp, pool.Drops.BigInt()) + total2 := sdk.NewIntFromBigInt(tmp) + // tmp = big.NewInt(0) + + dropRedeemer, ok := k.GetDropsOwnerPair(ctx, msg.Creator, drop.Pair) + var sumDropRedeemer sdk.Int + if ok { + sumDropRedeemer = dropRedeemer.Sum + } else { + return nil, sdkerrors.Wrapf(types.ErrDropSumNotFound, "%s", msg.Creator) + } + + sumDropRedeemer = sumDropRedeemer.Sub(drop.Drops) + + pool = k.updateLeaders(ctx, pool, msg.Creator, sumDropRedeemer) + + var sdkError error + + // Update Pool Total Drops + pool.Drops = pool.Drops.Sub(drop.Drops) + + // Withdraw from Pool + member1.Balance = member1.Balance.Sub(total1) + member2.Balance = member2.Balance.Sub(total2) + + // moduleAcc := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName))) + // Get the borrower address + owner, _ := sdk.AccAddressFromBech32(msg.Creator) + + coinOwner1 := sdk.NewCoin(denom1, total1) + coinOwner2 := sdk.NewCoin(denom2, total2) + coinsOwner := sdk.NewCoins(coinOwner1, coinOwner2) + + // Payout Owner + sdkError = k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, owner, coinsOwner) + if sdkError != nil { + return nil, sdkError + } + + // Deactivate drop + drop.Active = false + + // Set Pool Member and Drop + k.SetDrop( + ctx, + drop, + ) + + k.RemoveDropOwner( + ctx, + drop, + ) + + k.SetPool( + ctx, + pool, + ) + + k.SetMember( + ctx, + member1, + ) + + k.SetMember( + ctx, + member2, + ) + + return &types.MsgRedeemDropResponse{}, nil +} diff --git a/x/market/keeper/msg_server_redeem_drop_test.go b/x/market/keeper/msg_server_redeem_drop_test.go new file mode 100644 index 00000000..22ef4e35 --- /dev/null +++ b/x/market/keeper/msg_server_redeem_drop_test.go @@ -0,0 +1,257 @@ +package keeper_test + +import ( + "strconv" + "strings" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/sample" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +func TestRedeemDrop(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + // TestData + testdata := testData{coinAStr: "30CoinA", coinBStr: "40CoinB", RateAstrArray: []string{"60", "70"}, RateBstrArray: []string{"80", "90"}} + coinPair, _ := sample.SampleCoins("70CoinA", "70CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress2, err := sdk.AccAddressFromBech32(addr2) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress2, coinPair)) + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress3, err := sdk.AccAddressFromBech32(addr3) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress3, coinPair)) + + // GetUidCount before CreatePool + beforecount := testInput.MarketKeeper.GetUidCount(testInput.Context) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + response, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + + // Validate CreatePool + require.NoError(t, err) + require.Contains(t, p.GetCreator(), response.String()) + require.Contains(t, p.GetCoinA(), response.String()) + require.Contains(t, p.GetCoinB(), response.String()) + + // Validate SetUidCount function. + aftercount := testInput.MarketKeeper.GetUidCount(testInput.Context) + require.Equal(t, beforecount+1, aftercount) + + // Validate GetDrop + drops, dropFound := testInput.MarketKeeper.GetDrop(testInput.Context, beforecount) + require.True(t, dropFound) + require.Equal(t, drops.Pair, pair) + + // Validate GetPool + rst1, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst1.Pair, pair) + require.Equal(t, "1200", rst1.Drops.String()) + require.Equal(t, 1, len(rst1.Leaders)) + require.Equal(t, "1200", rst1.Leaders[0].Drops.String()) + + // Validate CreateDrop + var d = types.MsgCreateDrop{Creator: addr2, Pair: pair, Drops: "120"} + createDropResponse, err := keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + // Validate GetMember + members, memberfound := testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 := testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.Equal(t, "33", members.Balance.String()) + + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + require.Equal(t, "44", members1.Balance.String()) + + // Validate GetPool + rst, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + require.Equal(t, "1320", rst.Drops.String()) + require.Equal(t, 2, len(rst.Leaders)) + require.Equal(t, addr2, rst.Leaders[1].Address) + require.Equal(t, "1200", rst.Leaders[0].Drops.String()) + + // Validate GetDrop + drops1, drop1Found := testInput.MarketKeeper.GetDrop(testInput.Context, aftercount) + require.True(t, drop1Found) + require.Equal(t, drops1.Pair, pair) + require.Equal(t, drops1.Drops.String(), d.Drops) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + + // Validate RedeemDrop + Uid := strconv.FormatUint(drops1.Uid, 10) + var rd = types.MsgRedeemDrop{Creator: addr2, Uid: Uid} + createRedeemDropResponse, redeemdropErr := keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd) + require.NoError(t, redeemdropErr) + require.Contains(t, rd.GetCreator(), createRedeemDropResponse.String()) + + // Validate Drop After Redeem Drop + drops1, drop1Found = testInput.MarketKeeper.GetDrop(testInput.Context, aftercount) + require.True(t, drop1Found) + require.Equal(t, drops1.Pair, pair) + require.Equal(t, drops1.Drops.String(), d.Drops) + require.Contains(t, d.GetCreator(), createDropResponse.String()) + require.False(t, drops1.Active) + + // Validate GetPool After Redeem Drop + rst, found = testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + require.Equal(t, "1200", rst.Drops.String()) + require.Equal(t, "1200", rst.Leaders[0].Drops.String()) + require.Equal(t, 1, len(rst.Leaders)) + require.Equal(t, addr, rst.Leaders[0].Address) + + owner, ok := testInput.MarketKeeper.GetDropsOwnerPair(testInput.Context, addr, pair) + require.True(t, ok) + require.Truef(t, owner.Sum.Equal(sdk.NewInt(1200)), owner.Sum.String()) + + pairs, ok := testInput.MarketKeeper.GetPairs(testInput.Context, addr) + require.True(t, ok) + require.Truef(t, pairs.Pairs[0] == pair, pairs.String()) + + // Validate GetMember After Redeem Drop + members, memberfound = testInput.MarketKeeper.GetMember(testInput.Context, denomB, denomA) + members1, memberfound1 = testInput.MarketKeeper.GetMember(testInput.Context, denomA, denomB) + require.True(t, memberfound) + require.Equal(t, members.DenomA, denomB) + require.Equal(t, members.DenomB, denomA) + require.Equal(t, members.Balance.String(), "30") + require.True(t, memberfound1) + require.Equal(t, members1.DenomA, denomA) + require.Equal(t, members1.DenomB, denomB) + require.Equal(t, members1.Balance.String(), "40") + + // Validate RedeemDrop + Uid2 := strconv.FormatUint(beforecount, 10) + var rd2 = types.MsgRedeemDrop{Creator: addr, Uid: Uid2} + createRedeemDropResponse2, redeemdropErr2 := keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd2) + require.NoError(t, redeemdropErr2) + require.Contains(t, rd2.GetCreator(), createRedeemDropResponse2.String()) + + // Validate GetPool After Redeem Drop + rst, found = testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst.Pair, pair) + require.Equal(t, rst.Drops.String(), "0") + require.Equal(t, 0, len(rst.Leaders)) + + pairs, ok = testInput.MarketKeeper.GetPairs(testInput.Context, addr) + require.True(t, ok) + require.Truef(t, len(pairs.Pairs) == 0, pairs.String()) +} + +func TestRedeemDrop_WithBurnCoin(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + require.Equal(t, testInput.MarketKeeper.BurnCoin(testInput.Context), "stake") + + // TestData + testdata := testData{coinAStr: "100stake", coinBStr: "700CoinB"} + coinPair, _ := sample.SampleCoins("1000000000stake", "1000000000CoinB") + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // Create Drop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: "123450000"} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + // redeem the drop + Uid := strconv.FormatUint(2, 10) + var rd = types.MsgRedeemDrop{Creator: addr, Uid: Uid} + _, redeemdropErr := keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd) + require.NoError(t, redeemdropErr) + + rst1, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst1.Pair, pair) + require.Equal(t, "70000", rst1.Drops.String()) +} + +func TestRedeemDrop_NumericalLimits(t *testing.T) { + testInput := keepertest.CreateTestEnvironment(t) + + require.Equal(t, testInput.MarketKeeper.BurnCoin(testInput.Context), "stake") + + // TestData + testdata := testData{coinAStr: keepertest.MaxSupportedCoin("stake"), coinBStr: keepertest.MaxSupportedCoin("CoinB")} + coinPair, _ := sample.SampleCoins(keepertest.FundMaxSupported("stake"), keepertest.FundMaxSupported("CoinB")) + denomA, denomB := sample.SampleDenoms(coinPair) + pair := strings.Join([]string{denomA, denomB}, ",") + + // MintCoins + require.NoError(t, testInput.BankKeeper.MintCoins(testInput.Context, types.ModuleName, coinPair)) + + // SendCoinsFromModuleToAccount + requestAddress, err := sdk.AccAddressFromBech32(addr) + require.NoError(t, err) + require.NoError(t, testInput.BankKeeper.SendCoinsFromModuleToAccount(testInput.Context, types.ModuleName, requestAddress, coinPair)) + + // Create Pool + var p = types.MsgCreatePool{CoinA: testdata.coinAStr, CoinB: testdata.coinBStr, Creator: addr} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreatePool(sdk.WrapSDKContext(testInput.Context), &p) + require.NoError(t, err) + + // Create Drop + var d = types.MsgCreateDrop{Creator: addr, Pair: pair, Drops: keepertest.MaxSupportedDrop("")} + _, err = keeper.NewMsgServerImpl(*testInput.MarketKeeper).CreateDrop(sdk.WrapSDKContext(testInput.Context), &d) + require.NoError(t, err) + + // redeem the drop + Uid := strconv.FormatUint(2, 10) + var rd = types.MsgRedeemDrop{Creator: addr, Uid: Uid} + _, redeemdropErr := keeper.NewMsgServerImpl(*testInput.MarketKeeper).RedeemDrop(sdk.WrapSDKContext(testInput.Context), &rd) + require.NoError(t, redeemdropErr) + + rst1, found := testInput.MarketKeeper.GetPool(testInput.Context, pair) + require.True(t, found) + require.Equal(t, rst1.Pair, pair) + require.Equal(t, keepertest.MaxSupportedDrop(""), rst1.Drops.String()) +} diff --git a/x/market/keeper/msg_server_test.go b/x/market/keeper/msg_server_test.go index 47548d05..f88ceefe 100644 --- a/x/market/keeper/msg_server_test.go +++ b/x/market/keeper/msg_server_test.go @@ -4,13 +4,14 @@ import ( "context" "testing" - sdk "github.com/cosmos/cosmos-sdk/types" keepertest "market/testutil/keeper" "market/x/market/keeper" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" ) func setupMsgServer(t testing.TB) (types.MsgServer, context.Context) { - k, ctx := keepertest.MarketKeeper(t) - return keeper.NewMsgServerImpl(*k), sdk.WrapSDKContext(ctx) + k := keepertest.CreateTestEnvironment(t) + return keeper.NewMsgServerImpl(*k.MarketKeeper), sdk.WrapSDKContext(k.Context) } diff --git a/x/market/keeper/order.go b/x/market/keeper/order.go new file mode 100644 index 00000000..12080de8 --- /dev/null +++ b/x/market/keeper/order.go @@ -0,0 +1,368 @@ +package keeper + +import ( + "strconv" + "strings" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// SetOrder set a specific order in the store from its index +func (k Keeper) SetOrder(ctx sdk.Context, order types.Order) { + + // order event + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeOrder, + sdk.NewAttribute(types.AttributeKeyUid, strconv.FormatUint(order.Uid, 10)), + sdk.NewAttribute(types.AttributeKeyOwner, order.Owner), + sdk.NewAttribute(types.AttributeKeyStatus, order.Status), + sdk.NewAttribute(types.AttributeKeyOrderType, order.OrderType), + sdk.NewAttribute(types.AttributeKeyDenomAsk, order.DenomAsk), + sdk.NewAttribute(types.AttributeKeyDenomBid, order.DenomBid), + sdk.NewAttribute(types.AttributeKeyAmount, order.Amount.String()), + sdk.NewAttribute(types.AttributeKeyRate, strings.Join([]string{order.Rate[0].String(), order.Rate[1].String()}, ",")), + sdk.NewAttribute(types.AttributeKeyPrev, strconv.FormatUint(order.Prev, 10)), + sdk.NewAttribute(types.AttributeKeyNext, strconv.FormatUint(order.Next, 10)), + sdk.NewAttribute(types.AttributeKeyBeginTime, strconv.FormatInt(order.BegTime, 10)), + sdk.NewAttribute(types.AttributeKeyUpdateTime, strconv.FormatInt(order.UpdTime, 10)), + ), + ) + + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + + b := k.cdc.MustMarshal(&order) + store1.Set(types.OrderKey( + order.Uid, + ), b) +} + +// RemoveOrder removes a order from the store +func (k Keeper) RemoveOrder( + ctx sdk.Context, + uid uint64, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + store.Delete(types.OrderKey( + uid, + )) +} + +// GetOrder returns a order from its index +func (k Keeper) GetOrder( + ctx sdk.Context, + uid uint64, +) (val types.Order, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + + b := store.Get(types.OrderKey( + uid, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// SetOrderOwner adds an order to owner's open orders +func (k Keeper) SetOrderOwner( + ctx sdk.Context, + owner string, + uid uint64, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderOwnerKeyPrefix)) + + var orders types.Orders + + a := store.Get(types.OrderOwnerKey( + owner, + )) + if a == nil { + orders.Uids = []uint64{uid} + b := k.cdc.MustMarshal(&orders) + store.Set(types.OrderOwnerKey(owner), b) + return + } + + k.cdc.MustUnmarshal(a, &orders) + + // First remove uid if present + // Allows the order, if changed, to be at top of list + orders.Uids, _ = removeUid(orders.Uids, uid) + + // Append uid in the front + orders.Uids = append(orders.Uids, uid) + b := k.cdc.MustMarshal(&orders) + store.Set(types.OrderOwnerKey(owner), b) +} + +// GetOrderOwner returns order uids from a single owner +func (k Keeper) GetOrderOwnerUids( + ctx sdk.Context, + owner string, +) (orders types.Orders) { + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderOwnerKeyPrefix)) + + a := store1.Get(types.OrderOwnerKey( + owner, + )) + if a == nil { + return orders + } + + k.cdc.MustUnmarshal(a, &orders) + + return orders +} + +// GetOrderOwner returns orders from a single owner +func (k Keeper) GetOrderOwner( + ctx sdk.Context, + owner string, +) (list []types.Order) { + store1 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderOwnerKeyPrefix)) + + a := store1.Get(types.OrderOwnerKey( + owner, + )) + if a == nil { + return list + } + + var orders types.Orders + + k.cdc.MustUnmarshal(a, &orders) + + store2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + + for _, uid := range orders.Uids { + var order types.Order + + b := store2.Get(types.OrderKey( + uid, + )) + + if b != nil { + k.cdc.MustUnmarshal(b, &order) + list = append(list, order) + } + } + + return +} + +// RemoveOrderOwner removes an order from owner's open orders +func (k Keeper) RemoveOrderOwner( + ctx sdk.Context, + owner string, + uid uint64, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderOwnerKeyPrefix)) + + a := store.Get(types.OrderOwnerKey( + owner, + )) + if a == nil { + return + } + + var orders types.Orders + k.cdc.MustUnmarshal(a, &orders) + + orders.Uids, _ = removeUid(orders.Uids, uid) + + b := k.cdc.MustMarshal(&orders) + store.Set(types.OrderOwnerKey(owner), b) +} + +// GetAllOrder returns all order +func (k Keeper) GetAllOrder(ctx sdk.Context) (list []types.Order) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Order + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetOrder returns a order from its index +func (k Keeper) GetBook( + ctx sdk.Context, + denomA string, + denomB string, + orderType string, +) (list []types.OrderResponse) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + + member, _ := k.GetMember(ctx, denomA, denomB) + + var uid uint64 + + if orderType == "limit" { + uid = member.Limit + } else { + uid = member.Stop + } + + if uid == 0 { + return nil + } + + i := 0 + + for uid > 0 && i < 100 { + b := store.Get(types.OrderKey( + uid, + )) + var order types.Order + k.cdc.MustUnmarshal(b, &order) + orderResponse := types.OrderResponse{ + Uid: order.Uid, + Owner: order.Owner, + Status: order.Status, + OrderType: order.OrderType, + DenomAsk: order.DenomAsk, + DenomBid: order.DenomBid, + Amount: order.Amount.String(), + Rate: []string{order.Rate[0].String(), order.Rate[1].String()}, + Prev: order.Prev, + Next: order.Next, + BegTime: order.BegTime, + } + list = append(list, orderResponse) + i++ + uid = order.Next + } + + return +} + +// BookEnds returns adjacent orders determined by rate +func (k Keeper) BookEnds( + ctx sdk.Context, + denomA string, + denomB string, + orderType string, + rate []sdk.Int, +) (ends [2]uint64) { + + member, _ := k.GetMember(ctx, denomA, denomB) + var order types.Order + + if orderType == "limit" { + + if member.Limit == 0 { + return [2]uint64{0, 0} + } + + order, _ = k.GetOrder(ctx, member.Limit) + + for types.GTE(rate, order.Rate) { + + if order.Next == 0 { + break + } + + order, _ = k.GetOrder(ctx, order.Next) + + } + + if order.Next == 0 { + if types.GTE(rate, order.Rate) { + return [2]uint64{order.Uid, 0} + } + } + + return [2]uint64{order.Prev, order.Uid} + + } else { + + if member.Stop == 0 { + return [2]uint64{0, 0} + } + + order, _ = k.GetOrder(ctx, member.Stop) + + for types.LTE(rate, order.Rate) { + + if order.Next == 0 { + break + } + + order, _ = k.GetOrder(ctx, order.Next) + + } + + if order.Next == 0 { + if types.LTE(rate, order.Rate) { + return [2]uint64{order.Uid, 0} + } + } + + return [2]uint64{order.Prev, order.Uid} + } +} + +// BookEnds returns adjacent orders determined by rate +func (k Keeper) GetQuote( + ctx sdk.Context, + memberAsk types.Member, + memberBid types.Member, + denomAmount string, + amount sdk.Int, +) (string, sdk.Int, error) { + + denom := memberAsk.DenomB + var amountResp sdk.Int + + if denomAmount == memberBid.DenomB { + + // A(i)*B(i) = A(f)*B(f) + // A(f) = A(i)*B(i)/B(f) + // amountAsk = A(i) - A(f) = A(i) - A(i)*B(i)/B(f) + amountResp = memberAsk.Balance.Sub(((memberAsk.Balance.Mul(memberBid.Balance)).Quo(memberBid.Balance.Add(amount))).Add(sdk.NewInt(1))) + + // Market Order Fee + fee, _ := sdk.NewIntFromString(k.getParams(ctx).MarketFee) + amountResp = amountResp.Sub((amountResp.Mul(fee)).Quo(sdk.NewInt(10000))) + + // Edge case where strikeAskAmount rounds to 0 + // Rounding favors AMM vs Order + if amountResp.Equal(sdk.ZeroInt()) { + return denom, sdk.ZeroInt(), sdkerrors.Wrapf(types.ErrAmtZero, "amount ask equal to zero") + } + + } else { + denom = memberBid.DenomB + + // Market Order Fee + fee, _ := sdk.NewIntFromString(k.getParams(ctx).MarketFee) + amountPlusFee := amount.Add((amount.Mul(fee)).Quo(sdk.NewInt(10000))).Add(sdk.NewInt(1)) + + // A(i)*B(i) = A(f)*B(f) + // B(f) = A(i)*B(i)/A(f) + // amountBid = B(f) - B(i) = A(i)*B(i)/A(f) - B(i) = A(i)*B(i)/(A(i) - amountAskPlusFee) - B(i) + amountResp = ((memberAsk.Balance.Mul(memberBid.Balance)).Quo(memberAsk.Balance.Sub(amountPlusFee))).Sub(memberBid.Balance) + + // Edge case where strikeAskAmount rounds to 0 + // Rounding favors AMM vs Order + if amountResp.LTE(sdk.ZeroInt()) { + return denom, sdk.ZeroInt(), sdkerrors.Wrapf(types.ErrLiquidityLow, "not enough liquidity") + } + + } + + return denom, amountResp, nil +} diff --git a/x/market/keeper/order_test.go b/x/market/keeper/order_test.go new file mode 100644 index 00000000..7f13f3c1 --- /dev/null +++ b/x/market/keeper/order_test.go @@ -0,0 +1,71 @@ +package keeper_test + +import ( + "strconv" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func createNOrder(keeper *keeper.Keeper, ctx sdk.Context, n int) []types.Order { + items := make([]types.Order, n) + for i := range items { + items[i].Uid = uint64(i) + items[i].Owner = strconv.Itoa(i) + items[i].Status = "active" + items[i].OrderType = strconv.Itoa(i) + items[i].DenomAsk = strconv.Itoa(i) + items[i].DenomBid = strconv.Itoa(i) + items[i].Amount = sdk.NewInt(int64(i)) + items[i].Rate = []sdk.Int{sdk.NewInt(int64(i)), sdk.NewInt(int64(i))} + + keeper.SetOrder(ctx, items[i]) + } + return items +} + +func TestOrderGet(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNOrder(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + rst, found := keeper.MarketKeeper.GetOrder(keeper.Context, + item.Uid, + ) + require.True(t, found) + require.Equal(t, + nullify.Fill(&item), + nullify.Fill(&rst), + ) + } +} +func TestOrderRemove(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNOrder(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + keeper.MarketKeeper.RemoveOrder(keeper.Context, + item.Uid, + ) + _, found := keeper.MarketKeeper.GetOrder(keeper.Context, + item.Uid, + ) + require.False(t, found) + } +} + +func TestOrderGetAll(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNOrder(keeper.MarketKeeper, keeper.Context, 10) + require.ElementsMatch(t, + nullify.Fill(items), + nullify.Fill(keeper.MarketKeeper.GetAllOrder(keeper.Context)), + ) +} diff --git a/x/market/keeper/params.go b/x/market/keeper/params.go index 2903d080..18e1be39 100644 --- a/x/market/keeper/params.go +++ b/x/market/keeper/params.go @@ -1,16 +1,40 @@ package keeper import ( - sdk "github.com/cosmos/cosmos-sdk/types" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" ) // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx sdk.Context) types.Params { - return types.NewParams() + return k.getParams(ctx) } // SetParams set the params func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { k.paramstore.SetParamSet(ctx, ¶ms) } + +func (k Keeper) getParams(ctx sdk.Context) (params types.Params) { + k.paramstore.GetParamSet(ctx, ¶ms) + return params +} + +// EarnRate - the earning rate of the pool leader +func (k Keeper) EarnRates(ctx sdk.Context) (res string) { + k.paramstore.Get(ctx, types.KeyEarnRates, &res) + return +} + +// BurnRate - the burning rate of the burn coin +func (k Keeper) BurnRate(ctx sdk.Context) (res string) { + k.paramstore.Get(ctx, types.KeyBurnRate, &res) + return +} + +// BurnCoin - the burn coin +func (k Keeper) BurnCoin(ctx sdk.Context) (res string) { + k.paramstore.Get(ctx, types.KeyBurnCoin, &res) + return +} diff --git a/x/market/keeper/params_test.go b/x/market/keeper/params_test.go index 8ff96fcc..dfed24e6 100644 --- a/x/market/keeper/params_test.go +++ b/x/market/keeper/params_test.go @@ -3,16 +3,17 @@ package keeper_test import ( "testing" - "github.com/stretchr/testify/require" testkeeper "market/testutil/keeper" "market/x/market/types" + + "github.com/stretchr/testify/require" ) func TestGetParams(t *testing.T) { - k, ctx := testkeeper.MarketKeeper(t) + k := testkeeper.CreateTestEnvironment(t) params := types.DefaultParams() - k.SetParams(ctx, params) + k.MarketKeeper.SetParams(k.Context, params) - require.EqualValues(t, params, k.GetParams(ctx)) + require.EqualValues(t, params, k.MarketKeeper.GetParams(k.Context)) } diff --git a/x/market/keeper/pool.go b/x/market/keeper/pool.go new file mode 100644 index 00000000..9f1ec573 --- /dev/null +++ b/x/market/keeper/pool.go @@ -0,0 +1,128 @@ +package keeper + +import ( + "strconv" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetPool set a specific pool in the store from its index +func (k Keeper) SetPool(ctx sdk.Context, pool types.Pool) { + /* + var leaders []string + + for i := 0; i < len(pool.Leaders); i++ { + leaders = append(leaders, "{"+strings.Join([]string{pool.Leaders[i].Address, pool.Leaders[i].Drops.String()}, ", ")+"}") + } + */ + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + b := k.cdc.MustMarshal(&pool) + store.Set(types.PoolSetKey( + pool.Pair, + ), b) +} + +// GetPool returns a pool from its index +func (k Keeper) GetPool( + ctx sdk.Context, + pair string, +) (val types.Pool, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + + b := store.Get(types.PoolKey( + pair, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemovePool removes a pool from the store +func (k Keeper) RemovePool( + ctx sdk.Context, + pair string, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + store.Delete(types.PoolKey( + pair, + )) +} + +// GetAllPool returns all pool +func (k Keeper) GetAllPool(ctx sdk.Context) (list []types.Pool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Pool + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetHistory returns history of pool trades +func (k Keeper) GetHistory( + ctx sdk.Context, + pair string, + length string, +) (list []types.OrderResponse, found bool) { + + len, err := strconv.ParseUint(length, 10, 64) + if err != nil { + len = 0 + } + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.OrderKeyPrefix)) + + pool, found := k.GetPool(ctx, pair) + + if !found { + return nil, found + } + + uid := pool.History + + if uid == 0 { + return nil, found + } + + counter := uint64(0) + + for uid > 0 && (counter < len || len == 0) { + b := store.Get(types.OrderKey( + uid, + )) + var order types.Order + k.cdc.MustUnmarshal(b, &order) + orderResponse := types.OrderResponse{ + Uid: order.Uid, + Owner: order.Owner, + Status: order.Status, + OrderType: order.OrderType, + DenomAsk: order.DenomAsk, + DenomBid: order.DenomBid, + Amount: order.Amount.String(), + Rate: []string{order.Rate[0].String(), order.Rate[1].String()}, + Prev: order.Prev, + Next: order.Next, + BegTime: order.BegTime, + UpdTime: order.UpdTime, + } + list = append(list, orderResponse) + counter = counter + 1 + uid = order.Next + } + + return +} diff --git a/x/market/keeper/pool_test.go b/x/market/keeper/pool_test.go new file mode 100644 index 00000000..a699021c --- /dev/null +++ b/x/market/keeper/pool_test.go @@ -0,0 +1,73 @@ +package keeper_test + +import ( + "strconv" + "testing" + + keepertest "market/testutil/keeper" + "market/testutil/nullify" + "market/x/market/keeper" + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func createNPool(keeper *keeper.Keeper, ctx sdk.Context, n int) []types.Pool { + items := make([]types.Pool, n) + for i := range items { + items[i].Pair = strconv.Itoa(i) + items[i].Denom1 = strconv.Itoa(i) + items[i].Denom2 = strconv.Itoa(i) + items[i].Leaders = []*types.Leader{ + { + Address: strconv.Itoa(i), + Drops: sdk.NewInt(int64(i)), + }, + } + items[i].Drops = sdk.NewIntFromUint64(uint64(0)) + + keeper.SetPool(ctx, items[i]) + } + return items +} + +func TestPoolGet(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNPool(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + rst, found := keeper.MarketKeeper.GetPool(keeper.Context, + item.Pair, + ) + require.True(t, found) + require.Equal(t, + nullify.Fill(&item), + nullify.Fill(&rst), + ) + } +} +func TestPoolRemove(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNPool(keeper.MarketKeeper, keeper.Context, 10) + for _, item := range items { + keeper.MarketKeeper.RemovePool(keeper.Context, + item.Pair, + ) + _, found := keeper.MarketKeeper.GetPool(keeper.Context, + item.Pair, + ) + require.False(t, found) + } +} + +func TestPoolGetAll(t *testing.T) { + keeper := keepertest.CreateTestEnvironment(t) + items := createNPool(keeper.MarketKeeper, keeper.Context, 10) + require.ElementsMatch(t, + nullify.Fill(items), + nullify.Fill(keeper.MarketKeeper.GetAllPool(keeper.Context)), + ) +} diff --git a/x/market/keeper/profit.go b/x/market/keeper/profit.go new file mode 100644 index 00000000..50dfa55c --- /dev/null +++ b/x/market/keeper/profit.go @@ -0,0 +1,233 @@ +package keeper + +import ( + "math/big" + "strings" + + "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k Keeper) Profit(productBeg sdk.Int, memberA types.Member, memberB types.Member) (profitA sdk.Int, profitB sdk.Int) { + + principalA := big.NewInt(0) + principalA.Mul(productBeg.BigInt(), memberA.Balance.BigInt()) + principalA.Quo(principalA, memberB.Balance.BigInt()) + principalA.Sqrt(principalA) + profitA = memberA.Balance.Sub(sdk.NewIntFromBigInt(principalA)) + + principalB := big.NewInt(0) + principalB.Mul(productBeg.BigInt(), memberB.Balance.BigInt()) + principalB.Quo(principalB, memberA.Balance.BigInt()) + principalB.Sqrt(principalB) + profitB = memberB.Balance.Sub(sdk.NewIntFromBigInt(principalB)) + + return +} + +func (k Keeper) Payout(ctx sdk.Context, profit sdk.Int, member types.Member, pool types.Pool) (types.Member, error) { + if profit == sdk.ZeroInt() { + return member, nil + } + + earnRatesStringSlice := strings.Split(k.EarnRates(ctx), ",") + var earnRate sdk.Int + var earnings sdk.Int + var earningsCoin sdk.Coin + var earningsCoins sdk.Coins + + for i, v := range pool.Leaders { + + earnRate, _ = sdk.NewIntFromString(earnRatesStringSlice[i]) + + earnings = (profit.Mul(earnRate)).Quo(sdk.NewInt(10000)) + + earningsCoin = sdk.NewCoin(member.DenomB, earnings) + + earningsCoins = sdk.NewCoins(earningsCoin) + + leader, _ := sdk.AccAddressFromBech32(v.Address) + + // Payout Leader + sdkError := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, leader, earningsCoins) + if sdkError != nil { + return member, sdkError + } + + member.Balance = member.Balance.Sub(earnings) + } + + return member, nil +} + +func (k Keeper) Burn(ctx sdk.Context, profit sdk.Int, member types.Member) (types.Member, error) { + + if profit == sdk.ZeroInt() { + return member, nil + } + + burnRate, _ := sdk.NewIntFromString(k.BurnRate(ctx)) + + denominator := sdk.NewInt(10000) + + burningsAmount := (profit.Mul(burnRate)).Quo(denominator) + + member.Balance = member.Balance.Sub(burningsAmount) + + burnings, found := k.GetBurnings(ctx, member.DenomB) + if found { + burnings.Amount = burnings.Amount.Add(burningsAmount) + } else { + burnings.Denom = member.DenomB + burnings.Amount = burningsAmount + } + + burnings, err := k.BurnTrade(ctx, burnings) + if err != nil { + return member, err + } + + if found && burnings.Amount == sdk.ZeroInt() { + k.RemoveBurnings(ctx, burnings.Denom) + return member, nil + } + + if burnings.Amount.GT(sdk.ZeroInt()) { + k.SetBurnings(ctx, burnings) + } + + return member, nil +} + +// Input Burnings - Output New Burnings +func (k Keeper) BurnTrade(ctx sdk.Context, burnings types.Burnings) (types.Burnings, error) { + + burnDenom := k.BurnCoin(ctx) + + burnCoin := sdk.NewCoin(burnDenom, burnings.Amount) + + if burnings.Denom != burnDenom { + + // Ask -> Burn Coin, Bid -> Coin traded for Burn Coin + amountBid := burnings.Amount + + memberAsk, found := k.GetMember(ctx, burnings.Denom, burnDenom) + if !found { + return burnings, nil + } + + memberBid, found := k.GetMember(ctx, burnDenom, burnings.Denom) + if !found { + return burnings, nil + } + + productBeg := memberAsk.Balance.Mul(memberBid.Balance) + + // Market Order + // A(i)*B(i) = A(f)*B(f) + // A(f) = A(i)*B(i)/B(f) + // strikeAmountAsk = A(i) - A(f) = A(i) - A(i)*B(i)/B(f) + // Compensate for rounding: strikeAmountAsk = A(i) - A(f) = A(i) - [A(i)*B(i)/B(f)+1] + amountAsk := memberAsk.Balance.Sub(((memberAsk.Balance.Mul(memberBid.Balance)).Quo(memberBid.Balance.Add(amountBid))).Add(sdk.NewInt(1))) + + // Market Order Fee + marketRate, _ := sdk.NewIntFromString(k.getParams(ctx).MarketFee) + + // Burn trades still payout fees + fee := (amountAsk.Mul(marketRate)).Quo(sdk.NewInt(10000)) + amountAsk = amountAsk.Sub(fee) + + // Edge case where strikeAskAmount rounds to 0 + // Rounding favors AMM vs Order + if amountAsk.LTE(sdk.ZeroInt()) { + return burnings, nil + } + + memberAsk.Balance = memberAsk.Balance.Sub(amountAsk) + memberBid.Balance = memberBid.Balance.Add(amountBid) + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return burnings, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Burn Trade %s", memberAsk.Pair) + } + + pool, found := k.GetPool(ctx, memberAsk.Pair) + if !found { + return burnings, nil + } + + // Distribute to Leaders portion of the Burn Trade + // Burn Trade does not Burn further or will create loop + memberAsk, err := k.Payout(ctx, fee, memberAsk, pool) + if err != nil { + return burnings, err + } + + if memberAsk.Balance.Mul(memberBid.Balance).LT(productBeg) { + return burnings, sdkerrors.Wrapf(types.ErrProductInvalid, "Pool product lower after Burn Payout %s", memberAsk.Pair) + } + + k.SetMember(ctx, memberAsk) + k.SetMember(ctx, memberBid) + + uid := k.GetUidCount(ctx) + + prevOrder, _ := k.GetOrder(ctx, pool.History) + + prevOrder.Prev = uid + + var order = types.Order{ + Uid: uid, + Owner: "system", + Status: "filled", + DenomAsk: burnDenom, + DenomBid: burnings.Denom, + OrderType: "burn", + Amount: amountBid, + Rate: []sdk.Int{amountAsk, amountBid}, + Prev: 0, + Next: pool.History, + BegTime: ctx.BlockHeader().Time.Unix(), + UpdTime: ctx.BlockHeader().Time.Unix(), + } + + pool.History = uid + + if pool.Denom1 == burnings.Denom { + pool.Volume1.Amount = pool.Volume1.Amount.Add(amountBid) + pool.Volume2.Amount = pool.Volume2.Amount.Add(amountAsk) + } else { + pool.Volume1.Amount = pool.Volume1.Amount.Add(amountAsk) + pool.Volume2.Amount = pool.Volume2.Amount.Add(amountBid) + } + + k.IncVolume(ctx, burnings.Denom, amountBid) + k.IncVolume(ctx, burnDenom, amountAsk) + + k.SetPool(ctx, pool) + k.SetUidCount(ctx, uid+1) + k.SetOrder(ctx, order) + + burnCoin = sdk.NewCoin(burnDenom, amountAsk) + + } + + if burnCoin.Amount.GT(sdk.ZeroInt()) { + + burnCoins := sdk.NewCoins(burnCoin) + + // Burn Ask Amount of Burn Coin + sdkError := k.bankKeeper.BurnCoins(ctx, types.ModuleName, burnCoins) + if sdkError != nil { + return burnings, sdkError + } + + k.AddBurned(ctx, burnCoins.AmountOf(burnDenom)) + + burnings.Amount = sdk.ZeroInt() + + } + + return burnings, nil +} diff --git a/x/market/keeper/uid.go b/x/market/keeper/uid.go new file mode 100644 index 00000000..59eb8553 --- /dev/null +++ b/x/market/keeper/uid.go @@ -0,0 +1,34 @@ +package keeper + +import ( + "encoding/binary" + + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetUidCount get the next uid +func (k Keeper) GetUidCount(ctx sdk.Context) uint64 { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.KeyPrefix(types.UidKey) + bz := store.Get(byteKey) + + // Count doesn't exist: no element + if bz == nil { + return 1 + } + + // Parse bytes + return binary.BigEndian.Uint64(bz) +} + +// SetUidCount set the next uid +func (k Keeper) SetUidCount(ctx sdk.Context, count uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.KeyPrefix(types.UidKey) + bz := make([]byte, 8) + binary.BigEndian.PutUint64(bz, count) + store.Set(byteKey, bz) +} diff --git a/x/market/keeper/volume.go b/x/market/keeper/volume.go new file mode 100644 index 00000000..18a3df05 --- /dev/null +++ b/x/market/keeper/volume.go @@ -0,0 +1,82 @@ +package keeper + +import ( + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetVolume set a specific volume in the store from its index +func (k Keeper) SetVolume(ctx sdk.Context, volume types.Volume) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.VolumeKeyPrefix)) + b := k.cdc.MustMarshal(&volume) + store.Set(types.VolumeKey( + volume.Denom, + ), b) +} + +// GetVolume returns a volume from its index +func (k Keeper) GetVolume( + ctx sdk.Context, + denom string, +) (val types.Volume, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.VolumeKeyPrefix)) + + b := store.Get(types.VolumeKey( + denom, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// GetVolume returns a volume from its index +func (k Keeper) IncVolume( + ctx sdk.Context, + denom string, + amount sdk.Int, +) types.Volume { + volume, found := k.GetVolume(ctx, denom) + + if found { + volume.Amount = volume.Amount.Add(amount) + } else { + volume.Denom = denom + volume.Amount = amount + } + + k.SetVolume(ctx, volume) + + return volume +} + +// RemoveVolume removes a volume from the store +func (k Keeper) RemoveVolume( + ctx sdk.Context, + denom string, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.VolumeKeyPrefix)) + store.Delete(types.VolumeKey( + denom, + )) +} + +// GetAllVolume returns all volumes +func (k Keeper) GetAllVolumes(ctx sdk.Context) (list []types.Volume) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.VolumeKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Volume + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/market/module.go b/x/market/module.go index dafc35b7..86613e6d 100644 --- a/x/market/module.go +++ b/x/market/module.go @@ -1,9 +1,9 @@ package market import ( + "context" "encoding/json" "fmt" - // this line is used by starport scaffolding # 1 "github.com/gorilla/mux" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -11,14 +11,15 @@ import ( abci "github.com/tendermint/tendermint/abci/types" + "market/x/market/client/cli" + "market/x/market/keeper" + "market/x/market/types" + "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" cdctypes "github.com/cosmos/cosmos-sdk/codec/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" - "market/x/market/client/cli" - "market/x/market/keeper" - "market/x/market/types" ) var ( @@ -77,7 +78,7 @@ func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Rout // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { - // this line is used by starport scaffolding # 2 + types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) } // GetTxCmd returns the capability module's root tx command. diff --git a/x/market/module_simulation.go b/x/market/module_simulation.go index 3a793910..4c8679ad 100644 --- a/x/market/module_simulation.go +++ b/x/market/module_simulation.go @@ -3,15 +3,16 @@ package market import ( "math/rand" + "market/testutil/sample" + marketsimulation "market/x/market/simulation" + "market/x/market/types" + "github.com/cosmos/cosmos-sdk/baseapp" simappparams "github.com/cosmos/cosmos-sdk/simapp/params" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" "github.com/cosmos/cosmos-sdk/x/simulation" - "market/testutil/sample" - marketsimulation "market/x/market/simulation" - "market/x/market/types" ) // avoid unused import issue @@ -24,7 +25,31 @@ var ( ) const ( -// this line is used by starport scaffolding # simapp/module/const + opWeightMsgCreatePool = "op_weight_msg_create_chain" + // TODO: Determine the simulation weight value + defaultWeightMsgCreatePool int = 1 + + opWeightMsgCreateDrop = "op_weight_msg_create_chain" + // TODO: Determine the simulation weight value + defaultWeightMsgCreateDrop int = 0 + + opWeightMsgRedeemDrop = "op_weight_msg_create_chain" + // TODO: Determine the simulation weight value + defaultWeightMsgRedeemDrop int = 0 + + opWeightMsgCreateOrder = "op_weight_msg_create_chain" + // TODO: Determine the simulation weight value + defaultWeightMsgCreateOrder int = 0 + + opWeightMsgCancelOrder = "op_weight_msg_create_chain" + // TODO: Determine the simulation weight value + defaultWeightMsgCancelOrder int = 0 + + opWeightMsgMarketOrder = "op_weight_msg_create_chain" + // TODO: Determine the simulation weight value + defaultWeightMsgMarketOrder int = 0 + + // this line is used by starport scaffolding # simapp/module/const ) // GenerateGenesisState creates a randomized GenState of the module @@ -33,11 +58,8 @@ func (AppModule) GenerateGenesisState(simState *module.SimulationState) { for i, acc := range simState.Accounts { accs[i] = acc.Address.String() } - marketGenesis := types.GenesisState{ - Params: types.DefaultParams(), - // this line is used by starport scaffolding # simapp/module/genesisState - } - simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&marketGenesis) + marketGenesis := types.DefaultGenesis() + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(marketGenesis) } // ProposalContents doesn't return any content functions for governance proposals @@ -58,6 +80,72 @@ func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { operations := make([]simtypes.WeightedOperation, 0) + var weightMsgCreatePool int + simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgCreatePool, &weightMsgCreatePool, nil, + func(_ *rand.Rand) { + weightMsgCreatePool = defaultWeightMsgCreatePool + }, + ) + operations = append(operations, simulation.NewWeightedOperation( + weightMsgCreatePool, + marketsimulation.SimulateMsgCreatePool(am.accountKeeper, am.bankKeeper, am.keeper), + )) + + var weightMsgCreateDrop int + simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgCreateDrop, &weightMsgCreateDrop, nil, + func(_ *rand.Rand) { + weightMsgCreateDrop = defaultWeightMsgCreateDrop + }, + ) + operations = append(operations, simulation.NewWeightedOperation( + weightMsgCreateDrop, + marketsimulation.SimulateMsgCreateDrop(am.accountKeeper, am.bankKeeper, am.keeper), + )) + + var weightMsgRedeemDrop int + simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgRedeemDrop, &weightMsgRedeemDrop, nil, + func(_ *rand.Rand) { + weightMsgRedeemDrop = defaultWeightMsgRedeemDrop + }, + ) + operations = append(operations, simulation.NewWeightedOperation( + weightMsgRedeemDrop, + marketsimulation.SimulateMsgRedeemDrop(am.accountKeeper, am.bankKeeper, am.keeper), + )) + + var weightMsgCreateOrder int + simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgCreateOrder, &weightMsgCreateOrder, nil, + func(_ *rand.Rand) { + weightMsgCreateOrder = defaultWeightMsgCreateOrder + }, + ) + operations = append(operations, simulation.NewWeightedOperation( + weightMsgCreateOrder, + marketsimulation.SimulateMsgCreateOrder(am.accountKeeper, am.bankKeeper, am.keeper), + )) + + var weightMsgCancelOrder int + simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgCancelOrder, &weightMsgCancelOrder, nil, + func(_ *rand.Rand) { + weightMsgCancelOrder = defaultWeightMsgCancelOrder + }, + ) + operations = append(operations, simulation.NewWeightedOperation( + weightMsgCancelOrder, + marketsimulation.SimulateMsgCancelOrder(am.accountKeeper, am.bankKeeper, am.keeper), + )) + + var weightMsgMarketOrder int + simState.AppParams.GetOrGenerate(simState.Cdc, opWeightMsgMarketOrder, &weightMsgMarketOrder, nil, + func(_ *rand.Rand) { + weightMsgMarketOrder = defaultWeightMsgMarketOrder + }, + ) + operations = append(operations, simulation.NewWeightedOperation( + weightMsgMarketOrder, + marketsimulation.SimulateMsgMarketOrder(am.accountKeeper, am.bankKeeper, am.keeper), + )) + // this line is used by starport scaffolding # simapp/module/operation return operations diff --git a/x/market/simulation/cancel_order.go b/x/market/simulation/cancel_order.go new file mode 100644 index 00000000..359e814c --- /dev/null +++ b/x/market/simulation/cancel_order.go @@ -0,0 +1,30 @@ +package simulation + +import ( + "math/rand" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +func SimulateMsgCancelOrder( + ak types.AccountKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + simAccount, _ := simtypes.RandomAcc(r, accs) + msg := &types.MsgCancelOrder{ + Creator: simAccount.Address.String(), + } + + // TODO: Handling the CancelOrder simulation + + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "CancelOrder simulation not implemented"), nil, nil + } +} diff --git a/x/market/simulation/create_drop.go b/x/market/simulation/create_drop.go new file mode 100644 index 00000000..6bbffb0c --- /dev/null +++ b/x/market/simulation/create_drop.go @@ -0,0 +1,30 @@ +package simulation + +import ( + "math/rand" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +func SimulateMsgCreateDrop( + ak types.AccountKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + simAccount, _ := simtypes.RandomAcc(r, accs) + msg := &types.MsgCreateDrop{ + Creator: simAccount.Address.String(), + } + + // TODO: Handling the CreateDrop simulation + + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "CreateDrop simulation not implemented"), nil, nil + } +} diff --git a/x/market/simulation/create_order.go b/x/market/simulation/create_order.go new file mode 100644 index 00000000..27178e85 --- /dev/null +++ b/x/market/simulation/create_order.go @@ -0,0 +1,30 @@ +package simulation + +import ( + "math/rand" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +func SimulateMsgCreateOrder( + ak types.AccountKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + simAccount, _ := simtypes.RandomAcc(r, accs) + msg := &types.MsgCreateOrder{ + Creator: simAccount.Address.String(), + } + + // TODO: Handling the CreateOrder simulation + + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "CreateOrder simulation not implemented"), nil, nil + } +} diff --git a/x/market/simulation/create_pool.go b/x/market/simulation/create_pool.go new file mode 100644 index 00000000..2d91b9de --- /dev/null +++ b/x/market/simulation/create_pool.go @@ -0,0 +1,95 @@ +package simulation + +import ( + "math/rand" + + "market/testutil/sample" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + + //"market/testutil/sample" + + "market/x/market/keeper" + "market/x/market/types" +) + +func SimulateMsgCreatePool( + ak types.AccountKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + //addr := sample.AccAddress() + simAccount, _ := simtypes.RandomAcc(r, accs) + addr := simAccount.Address + //requestAddress, _ := sdk.AccAddressFromBech32(addr) + + msg := &types.MsgCreatePool{ + Creator: addr.String(), + CoinA: sdk.NewCoin("CoinA", sdk.NewInt(160)).String(), + CoinB: sdk.NewCoin("CoinB", sdk.NewInt(170)).String(), + } + coins, _ := sample.SampleCoins(msg.CoinA, msg.CoinB) + + bk.MintCoins(ctx, types.ModuleName, coins) + + bk.SendCoinsFromModuleToAccount(ctx, types.ModuleName, addr, coins) + + _, err := keeper.NewMsgServerImpl(k).CreatePool(sdk.WrapSDKContext(ctx), msg) + + //simtypes. + //err := sendMsgSend(r, app, bk, ak, k, *msg, ctx, chainID, []cryptotypes.PrivKey{creatorAccount.PrivKey}) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "invalid transfers"), nil, err + } + + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "CreatePool simulation"), nil, nil + } +} + +/* +func sendMsgSend( + r *rand.Rand, app *baseapp.BaseApp, bk types.BankKeeper, ak types.AccountKeeper, k keeper.Keeper, + msg types.MsgCreatePool, ctx sdk.Context, chainID string, privkeys []cryptotypes.PrivKey, +) error { + addr := sample.AccAddress() + requestAddress, err := sdk.AccAddressFromBech32(addr) + //coina := msg.GetCoinA() + //coinb := msg.GetCoinB() + coins,_ := sample.SampleCoins(msg.GetCoinA(), msg.GetCoinB()) + //fees, err := simtypes.RandomFees(r, ctx, coins) + + bk.MintCoins(ctx, types.ModuleName, coins) + + err = bk.SendCoinsFromModuleToAccount(ctx, types.ModuleName, requestAddress, coins) + if err != nil { + return err + } + + _, err = keeper.NewMsgServerImpl(k).CreatePool(sdk.WrapSDKContext(ctx), &msg) + if err != nil { + return err + } + txGen := simappparams.MakeTestEncodingConfig().TxConfig + tx, err := helpers.GenTx( + txGen, + []sdk.Msg{&msg}, + coins, + helpers.DefaultGenTxGas, + chainID, + []uint64{requestAddress.GetAccountNumber()}, + []uint64{requestAddress.GetSequence()}, + privkeys..., + ) + if err != nil { + return err + } + _, _, err = app.Deliver(txGen.TxEncoder(), tx) + if err != nil { + return err + } + return nil +}*/ diff --git a/x/market/simulation/market_order.go b/x/market/simulation/market_order.go new file mode 100644 index 00000000..dfd28c7c --- /dev/null +++ b/x/market/simulation/market_order.go @@ -0,0 +1,30 @@ +package simulation + +import ( + "math/rand" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +func SimulateMsgMarketOrder( + ak types.AccountKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + simAccount, _ := simtypes.RandomAcc(r, accs) + msg := &types.MsgMarketOrder{ + Creator: simAccount.Address.String(), + } + + // TODO: Handling the MarketOrder simulation + + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "MarketOrder simulation not implemented"), nil, nil + } +} diff --git a/x/market/simulation/redeem_drop.go b/x/market/simulation/redeem_drop.go new file mode 100644 index 00000000..dc5d378a --- /dev/null +++ b/x/market/simulation/redeem_drop.go @@ -0,0 +1,30 @@ +package simulation + +import ( + "math/rand" + + "market/x/market/keeper" + "market/x/market/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +func SimulateMsgRedeemDrop( + ak types.AccountKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + simAccount, _ := simtypes.RandomAcc(r, accs) + msg := &types.MsgRedeemDrop{ + Creator: simAccount.Address.String(), + } + + // TODO: Handling the RedeemDrop simulation + + return simtypes.NoOpMsg(types.ModuleName, msg.Type(), "RedeemDrop simulation not implemented"), nil, nil + } +} diff --git a/x/market/types/burnings.pb.go b/x/market/types/burnings.pb.go new file mode 100644 index 00000000..3eab782a --- /dev/null +++ b/x/market/types/burnings.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: market/burnings.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Burnings struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + Amount github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=amount,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"amount"` +} + +func (m *Burnings) Reset() { *m = Burnings{} } +func (m *Burnings) String() string { return proto.CompactTextString(m) } +func (*Burnings) ProtoMessage() {} +func (*Burnings) Descriptor() ([]byte, []int) { + return fileDescriptor_9fc444423a2f29a3, []int{0} +} +func (m *Burnings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Burnings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Burnings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Burnings) XXX_Merge(src proto.Message) { + xxx_messageInfo_Burnings.Merge(m, src) +} +func (m *Burnings) XXX_Size() int { + return m.Size() +} +func (m *Burnings) XXX_DiscardUnknown() { + xxx_messageInfo_Burnings.DiscardUnknown(m) +} + +var xxx_messageInfo_Burnings proto.InternalMessageInfo + +type Burned struct { + Amount github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=amount,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"amount"` +} + +func (m *Burned) Reset() { *m = Burned{} } +func (m *Burned) String() string { return proto.CompactTextString(m) } +func (*Burned) ProtoMessage() {} +func (*Burned) Descriptor() ([]byte, []int) { + return fileDescriptor_9fc444423a2f29a3, []int{1} +} +func (m *Burned) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Burned) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Burned.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Burned) XXX_Merge(src proto.Message) { + xxx_messageInfo_Burned.Merge(m, src) +} +func (m *Burned) XXX_Size() int { + return m.Size() +} +func (m *Burned) XXX_DiscardUnknown() { + xxx_messageInfo_Burned.DiscardUnknown(m) +} + +var xxx_messageInfo_Burned proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Burnings)(nil), "pendulumlabs.market.market.Burnings") + proto.RegisterType((*Burned)(nil), "pendulumlabs.market.market.Burned") +} + +func init() { proto.RegisterFile("market/burnings.proto", fileDescriptor_9fc444423a2f29a3) } + +var fileDescriptor_9fc444423a2f29a3 = []byte{ + // 228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0x4d, 0x2c, 0xca, + 0x4e, 0x2d, 0xd1, 0x4f, 0x2a, 0x2d, 0xca, 0xcb, 0xcc, 0x4b, 0x2f, 0xd6, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x2a, 0x48, 0xcd, 0x4b, 0x29, 0xcd, 0x29, 0xcd, 0xcd, 0x49, 0x4c, 0x2a, 0xd6, + 0x83, 0xa8, 0x81, 0x52, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0x65, 0xfa, 0x20, 0x16, 0x44, + 0x87, 0x52, 0x06, 0x17, 0x87, 0x13, 0xd4, 0x0c, 0x21, 0x11, 0x2e, 0xd6, 0x94, 0xd4, 0xbc, 0xfc, + 0x5c, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x08, 0x47, 0xc8, 0x8d, 0x8b, 0x2d, 0x31, 0x37, + 0xbf, 0x34, 0xaf, 0x44, 0x82, 0x09, 0x24, 0xec, 0xa4, 0x77, 0xe2, 0x9e, 0x3c, 0xc3, 0xad, 0x7b, + 0xf2, 0x6a, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xc9, 0xf9, 0xc5, + 0xb9, 0xf9, 0xc5, 0x50, 0x4a, 0xb7, 0x38, 0x25, 0x5b, 0xbf, 0xa4, 0xb2, 0x20, 0xb5, 0x58, 0xcf, + 0x33, 0xaf, 0x24, 0x08, 0xaa, 0x5b, 0x29, 0x80, 0x8b, 0x0d, 0x64, 0x53, 0x6a, 0x0a, 0x92, 0x89, + 0x8c, 0x94, 0x98, 0xe8, 0x64, 0x7e, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x8c, 0x27, 0x1e, + 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, + 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x05, 0x0b, 0xa6, 0x0a, 0x7d, 0x28, 0x03, 0x6c, + 0x48, 0x12, 0x1b, 0xd8, 0xef, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xc5, 0x27, 0x75, + 0x46, 0x01, 0x00, 0x00, +} + +func (m *Burnings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Burnings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Burnings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Amount.Size() + i -= size + if _, err := m.Amount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintBurnings(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintBurnings(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Burned) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Burned) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Burned) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Amount.Size() + i -= size + if _, err := m.Amount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintBurnings(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintBurnings(dAtA []byte, offset int, v uint64) int { + offset -= sovBurnings(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Burnings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovBurnings(uint64(l)) + } + l = m.Amount.Size() + n += 1 + l + sovBurnings(uint64(l)) + return n +} + +func (m *Burned) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Amount.Size() + n += 1 + l + sovBurnings(uint64(l)) + return n +} + +func sovBurnings(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBurnings(x uint64) (n int) { + return sovBurnings(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Burnings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBurnings + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Burnings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Burnings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBurnings + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBurnings + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBurnings + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBurnings + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBurnings + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBurnings + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBurnings(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBurnings + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Burned) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBurnings + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Burned: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Burned: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBurnings + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBurnings + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBurnings + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBurnings(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBurnings + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBurnings(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBurnings + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBurnings + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBurnings + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBurnings + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBurnings + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBurnings + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBurnings = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBurnings = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBurnings = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/market/types/codec.go b/x/market/types/codec.go index 844157a8..9e5ad3f4 100644 --- a/x/market/types/codec.go +++ b/x/market/types/codec.go @@ -3,15 +3,39 @@ package types import ( "github.com/cosmos/cosmos-sdk/codec" cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - // this line is used by starport scaffolding # 1 + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/msgservice" ) func RegisterCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreatePool{}, "market/CreatePool", nil) + cdc.RegisterConcrete(&MsgCreateDrop{}, "market/CreateDrop", nil) + cdc.RegisterConcrete(&MsgRedeemDrop{}, "market/RedeemDrop", nil) + cdc.RegisterConcrete(&MsgCreateOrder{}, "market/CreateOrder", nil) + cdc.RegisterConcrete(&MsgCancelOrder{}, "market/CancelOrder", nil) + cdc.RegisterConcrete(&MsgMarketOrder{}, "market/MarketOrder", nil) // this line is used by starport scaffolding # 2 } func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreatePool{}, + ) + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateDrop{}, + ) + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgRedeemDrop{}, + ) + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateOrder{}, + ) + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCancelOrder{}, + ) + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgMarketOrder{}, + ) // this line is used by starport scaffolding # 3 msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) diff --git a/x/market/types/drop.pb.go b/x/market/types/drop.pb.go new file mode 100644 index 00000000..61149bdb --- /dev/null +++ b/x/market/types/drop.pb.go @@ -0,0 +1,955 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: market/drop.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Drop struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + Pair string `protobuf:"bytes,3,opt,name=pair,proto3" json:"pair,omitempty"` + Drops github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=drops,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"drops"` + Product github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=product,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"product"` + Active bool `protobuf:"varint,6,opt,name=active,proto3" json:"active,omitempty"` +} + +func (m *Drop) Reset() { *m = Drop{} } +func (m *Drop) String() string { return proto.CompactTextString(m) } +func (*Drop) ProtoMessage() {} +func (*Drop) Descriptor() ([]byte, []int) { + return fileDescriptor_3961bee11a1276cb, []int{0} +} +func (m *Drop) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Drop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Drop.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Drop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Drop.Merge(m, src) +} +func (m *Drop) XXX_Size() int { + return m.Size() +} +func (m *Drop) XXX_DiscardUnknown() { + xxx_messageInfo_Drop.DiscardUnknown(m) +} + +var xxx_messageInfo_Drop proto.InternalMessageInfo + +type Drops struct { + Uids []uint64 `protobuf:"varint,1,rep,packed,name=uids,proto3" json:"uids,omitempty"` + Sum github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=sum,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"sum"` +} + +func (m *Drops) Reset() { *m = Drops{} } +func (m *Drops) String() string { return proto.CompactTextString(m) } +func (*Drops) ProtoMessage() {} +func (*Drops) Descriptor() ([]byte, []int) { + return fileDescriptor_3961bee11a1276cb, []int{1} +} +func (m *Drops) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Drops) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Drops.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Drops) XXX_Merge(src proto.Message) { + xxx_messageInfo_Drops.Merge(m, src) +} +func (m *Drops) XXX_Size() int { + return m.Size() +} +func (m *Drops) XXX_DiscardUnknown() { + xxx_messageInfo_Drops.DiscardUnknown(m) +} + +var xxx_messageInfo_Drops proto.InternalMessageInfo + +type DropPairs struct { + Pairs []string `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs,omitempty"` +} + +func (m *DropPairs) Reset() { *m = DropPairs{} } +func (m *DropPairs) String() string { return proto.CompactTextString(m) } +func (*DropPairs) ProtoMessage() {} +func (*DropPairs) Descriptor() ([]byte, []int) { + return fileDescriptor_3961bee11a1276cb, []int{2} +} +func (m *DropPairs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DropPairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DropPairs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DropPairs) XXX_Merge(src proto.Message) { + xxx_messageInfo_DropPairs.Merge(m, src) +} +func (m *DropPairs) XXX_Size() int { + return m.Size() +} +func (m *DropPairs) XXX_DiscardUnknown() { + xxx_messageInfo_DropPairs.DiscardUnknown(m) +} + +var xxx_messageInfo_DropPairs proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Drop)(nil), "pendulumlabs.market.market.Drop") + proto.RegisterType((*Drops)(nil), "pendulumlabs.market.market.Drops") + proto.RegisterType((*DropPairs)(nil), "pendulumlabs.market.market.DropPairs") +} + +func init() { proto.RegisterFile("market/drop.proto", fileDescriptor_3961bee11a1276cb) } + +var fileDescriptor_3961bee11a1276cb = []byte{ + // 325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xbf, 0x4e, 0xfb, 0x30, + 0x10, 0xc7, 0xe3, 0x5f, 0x92, 0xfe, 0xe8, 0x4d, 0x60, 0x15, 0x64, 0x75, 0x70, 0x43, 0x07, 0x94, + 0x85, 0x64, 0x60, 0x60, 0x45, 0x55, 0x07, 0xd8, 0x50, 0x46, 0x24, 0x86, 0x34, 0x89, 0x4a, 0xd4, + 0xa6, 0xb6, 0xfc, 0x87, 0x3f, 0x6f, 0xc1, 0x63, 0x75, 0xec, 0x58, 0x31, 0x54, 0xb4, 0xd9, 0x78, + 0x0a, 0x64, 0x3b, 0x3c, 0x00, 0x4c, 0xf7, 0xb9, 0xb3, 0xbf, 0xf7, 0xb5, 0xef, 0xe0, 0xa4, 0xc9, + 0xc5, 0xa2, 0x52, 0x69, 0x29, 0x18, 0x4f, 0xb8, 0x60, 0x8a, 0xe1, 0x21, 0xaf, 0x56, 0xa5, 0x5e, + 0xea, 0x66, 0x99, 0xcf, 0x64, 0xe2, 0xce, 0xbb, 0x30, 0x1c, 0xcc, 0xd9, 0x9c, 0xd9, 0x6b, 0xa9, + 0x21, 0xa7, 0x18, 0x7f, 0x21, 0x08, 0xa6, 0x82, 0x71, 0x7c, 0x0c, 0xbe, 0xae, 0x4b, 0x82, 0x22, + 0x14, 0x07, 0x99, 0x41, 0x3c, 0x80, 0x90, 0xbd, 0xac, 0x2a, 0x41, 0xfe, 0x45, 0x28, 0xee, 0x67, + 0x2e, 0xc1, 0x18, 0x02, 0x9e, 0xd7, 0x82, 0xf8, 0xb6, 0x68, 0x19, 0x4f, 0x21, 0x34, 0x8f, 0x90, + 0x24, 0x30, 0xc5, 0x49, 0xb2, 0xde, 0x8d, 0xbc, 0x8f, 0xdd, 0xe8, 0x62, 0x5e, 0xab, 0x27, 0x3d, + 0x4b, 0x0a, 0xd6, 0xa4, 0x05, 0x93, 0x0d, 0x93, 0x5d, 0xb8, 0x94, 0xe5, 0x22, 0x55, 0x6f, 0xbc, + 0x92, 0xc9, 0xdd, 0x4a, 0x65, 0x4e, 0x8c, 0x6f, 0xe1, 0x3f, 0x17, 0xac, 0xd4, 0x85, 0x22, 0xe1, + 0x9f, 0xfa, 0xfc, 0xc8, 0xf1, 0x19, 0xf4, 0xf2, 0x42, 0xd5, 0xcf, 0x15, 0xe9, 0x45, 0x28, 0x3e, + 0xca, 0xba, 0x6c, 0xfc, 0x08, 0xe1, 0xd4, 0x5a, 0x61, 0x08, 0x74, 0x5d, 0x4a, 0x82, 0x22, 0x3f, + 0x0e, 0x32, 0xcb, 0xf8, 0x06, 0x7c, 0xa9, 0x1b, 0xf7, 0xd9, 0x5f, 0x5b, 0x1b, 0xe9, 0xf8, 0x1c, + 0xfa, 0xa6, 0xfd, 0x7d, 0x5e, 0x0b, 0x69, 0xa6, 0x67, 0x66, 0xe3, 0x3c, 0xfa, 0x99, 0x4b, 0x26, + 0xd7, 0xeb, 0x3d, 0xf5, 0xb6, 0x7b, 0x8a, 0xd6, 0x07, 0x8a, 0x36, 0x07, 0x8a, 0x3e, 0x0f, 0x14, + 0xbd, 0xb7, 0xd4, 0xdb, 0xb4, 0xd4, 0xdb, 0xb6, 0xd4, 0x7b, 0x38, 0xed, 0xb6, 0xfa, 0x9a, 0x76, + 0x60, 0x4d, 0x66, 0x3d, 0xbb, 0xae, 0xab, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xd9, 0x33, + 0xae, 0xf5, 0x01, 0x00, 0x00, +} + +func (m *Drop) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Drop) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Drop) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Active { + i-- + if m.Active { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + { + size := m.Product.Size() + i -= size + if _, err := m.Product.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDrop(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.Drops.Size() + i -= size + if _, err := m.Drops.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDrop(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintDrop(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x1a + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintDrop(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if m.Uid != 0 { + i = encodeVarintDrop(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Drops) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Drops) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Drops) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDrop(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Uids) > 0 { + dAtA2 := make([]byte, len(m.Uids)*10) + var j1 int + for _, num := range m.Uids { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintDrop(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DropPairs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DropPairs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DropPairs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pairs) > 0 { + for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Pairs[iNdEx]) + copy(dAtA[i:], m.Pairs[iNdEx]) + i = encodeVarintDrop(dAtA, i, uint64(len(m.Pairs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintDrop(dAtA []byte, offset int, v uint64) int { + offset -= sovDrop(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Drop) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovDrop(uint64(m.Uid)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovDrop(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovDrop(uint64(l)) + } + l = m.Drops.Size() + n += 1 + l + sovDrop(uint64(l)) + l = m.Product.Size() + n += 1 + l + sovDrop(uint64(l)) + if m.Active { + n += 2 + } + return n +} + +func (m *Drops) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Uids) > 0 { + l = 0 + for _, e := range m.Uids { + l += sovDrop(uint64(e)) + } + n += 1 + sovDrop(uint64(l)) + l + } + l = m.Sum.Size() + n += 1 + l + sovDrop(uint64(l)) + return n +} + +func (m *DropPairs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pairs) > 0 { + for _, s := range m.Pairs { + l = len(s) + n += 1 + l + sovDrop(uint64(l)) + } + } + return n +} + +func sovDrop(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDrop(x uint64) (n int) { + return sovDrop(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Drop) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Drop: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Drop: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Drops.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Product", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Product.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Active = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipDrop(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDrop + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Drops) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Drops: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Drops: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Uids = append(m.Uids, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Uids) == 0 { + m.Uids = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Uids = append(m.Uids, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Uids", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Sum.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDrop(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDrop + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DropPairs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DropPairs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DropPairs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDrop + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDrop + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDrop + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pairs = append(m.Pairs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDrop(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDrop + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDrop(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDrop + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDrop + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDrop + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDrop + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDrop + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDrop + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDrop = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDrop = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDrop = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/market/types/errors.go b/x/market/types/errors.go index 65906590..6fa0fed4 100644 --- a/x/market/types/errors.go +++ b/x/market/types/errors.go @@ -8,5 +8,46 @@ import ( // x/market module sentinel errors var ( - ErrSample = sdkerrors.Register(ModuleName, 1100, "sample error") + // ErrInvalidCoins - coin/coins are invalid. + ErrInvalidCoins = sdkerrors.Register(ModuleName, 1, "coins are invalid") + // ErrInsufficientBalance - the user balance is insufficient for the operation. + ErrInsufficientBalance = sdkerrors.Register(ModuleName, 2, "insufficient balance") // nolint: gomnd + // ErrPoolAlreadyExists - the pool is already exist. + ErrPoolAlreadyExists = sdkerrors.Register(ModuleName, 3, "the pool already exists") // nolint: gomnd + // ErrPoolNotFound - the pool not found. + ErrPoolNotFound = sdkerrors.Register(ModuleName, 4, "the pool not found") // nolint: gomnd + // ErrPoolNotFound - the drop not found. + ErrDropNotFound = sdkerrors.Register(ModuleName, 5, "the pool not found") // nolint: gomnd + // ErrPoolNotFound - the drop not found. + ErrNotDrops = sdkerrors.Register(ModuleName, 6, "not order owner") // nolint: gomnd + // ErrMemberNotFound - the pool member not found. + ErrMemberNotFound = sdkerrors.Register(ModuleName, 7, "the pool member not found") // nolint: gomnd + // ErrInvalidDropAmount - the drop amount is invalid. + ErrInvalidDropAmount = sdkerrors.Register(ModuleName, 8, "invalid drop amount") // nolint: gomnd + // ErrInvalidDenomsPair - invalid demos pair. + ErrInvalidDenomsPair = sdkerrors.Register(ModuleName, 9, "invalid demos pair") // nolint: gomnd + // ErrInvalidOrder - invalid demos pair. + ErrInvalidOrder = sdkerrors.Register(ModuleName, 10, "invalid order") // nolint: gomnd + // ErrPoolNotFound - the drop not found. + ErrOrderNotFound = sdkerrors.Register(ModuleName, 11, "order not found") // nolint: gomnd + // ErrPoolNotFound - the drop not found. + ErrNotOrderOwner = sdkerrors.Register(ModuleName, 12, "not order owner") // nolint: gomnd + // ErrInvalidOrderAmount - invalid bid amount + ErrInvalidOrderAmount = sdkerrors.Register(ModuleName, 13, "invalid order amount") // nolint: gomnd + // ErrSlippageTooGreat - slippage over limit + ErrSlippageTooGreat = sdkerrors.Register(ModuleName, 14, "slippage too great") // nolint: gomnd + // ErrPoolInactive - the pool is not active and has 0 drops. + ErrPoolInactive = sdkerrors.Register(ModuleName, 15, "the pool is inactive") // nolint: gomnd + // ErrDropSumNotFound - the drop sum of owner with drop not found + ErrDropSumNotFound = sdkerrors.Register(ModuleName, 16, "drop sum not found") // nolint: gomnd + // ErrAmtZero - Not greater than zero + ErrAmtZero = sdkerrors.Register(ModuleName, 17, "payment amount equal to zero") // nolint: gomnd + // ErrMemberBalanceZero - the drop sum of owner with drop not found + ErrMemberBalanceZero = sdkerrors.Register(ModuleName, 18, "member balance zero - pool empty") // nolint: gomnd + // ErrDenomMismatch - mismatch of denoms entered + ErrDenomMismatch = sdkerrors.Register(ModuleName, 19, "denoms are not matching") // nolint: gomnd + // ErrLiquidityLow - liquidity is too low + ErrLiquidityLow = sdkerrors.Register(ModuleName, 20, "liquidity too low") // nolint: gomnd + // ErrProductInvalid - liquidity is too low + ErrProductInvalid = sdkerrors.Register(ModuleName, 21, "product less than beg") // nolint: gomnd ) diff --git a/x/market/types/events.go b/x/market/types/events.go new file mode 100644 index 00000000..e34912e9 --- /dev/null +++ b/x/market/types/events.go @@ -0,0 +1,48 @@ +package types + +// Slashing module event types +const ( + EventTypeCreatePool = "create_pool" + EventTypeUpdatePool = "update_pool" + + EventTypeCreateBurnings = "create_burnings" + EventTypeUpdateBurnings = "update_burnings" + EventTypeBurn = "burn" + + EventTypeCreateMember = "new_member" + EventTypeUpdateMember = "update_member" + + EventTypeCreateDrop = "create_drop" + EventTypeUpdateDrop = "update_drop" + EventTypeRedeemDrop = "redeem_drop" + + EventTypeOrder = "order" + + AttributeKeyActive = "active" + AttributeKeyAmount = "amount" + AttributeKeyBalance = "balance" + AttributeKeyDenom = "denom" + // Alpha-numeric ordered denom for pool pair + AttributeKeyDenom1 = "denom_1" + AttributeKeyDenom2 = "denom_2" + // Sequenced denom pair to identify member + AttributeKeyDenomA = "denom_a" + AttributeKeyDenomB = "denom_b" + AttributeKeyDenomAsk = "denom_ask" + AttributeKeyDenomBid = "denom_bid" + AttributeKeyDrops = "drops" + AttributeKeyLeaders = "leaders" + AttributeKeyLimit = "limit" + AttributeKeyNext = "next" + AttributeKeyOwner = "owner" + AttributeKeyOrderType = "order_type" + AttributeKeyPair = "pair" + AttributeKeyPrev = "prev" + AttributeKeyProduct = "product" + AttributeKeyRate = "rate" + AttributeKeyStatus = "status" + AttributeKeyStop = "stop" + AttributeKeyBeginTime = "begin-time" + AttributeKeyUpdateTime = "update-time" + AttributeKeyUid = "uid" +) diff --git a/x/market/types/expected_keepers.go b/x/market/types/expected_keepers.go index 6aa6e977..fc8b1d9e 100644 --- a/x/market/types/expected_keepers.go +++ b/x/market/types/expected_keepers.go @@ -13,6 +13,11 @@ type AccountKeeper interface { // BankKeeper defines the expected interface needed to retrieve account balances. type BankKeeper interface { + BurnCoins(ctx sdk.Context, name string, amt sdk.Coins) error + GetBalance(ctx sdk.Context, addr sdk.AccAddress, denom string) sdk.Coin + GetAllBalances(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins SpendableCoins(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins - // Methods imported from bank should be defined here + SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + MintCoins(ctx sdk.Context, moduleName string, amt sdk.Coins) error } diff --git a/x/market/types/genesis.go b/x/market/types/genesis.go index 8df94bae..d5a23618 100644 --- a/x/market/types/genesis.go +++ b/x/market/types/genesis.go @@ -1,7 +1,7 @@ package types import ( -// this line is used by starport scaffolding # genesis/types/import + "fmt" ) // DefaultIndex is the default capability global index @@ -10,6 +10,11 @@ const DefaultIndex uint64 = 1 // DefaultGenesis returns the default Capability genesis state func DefaultGenesis() *GenesisState { return &GenesisState{ + PoolList: []Pool{}, + DropList: []Drop{}, + MemberList: []Member{}, + BurningsList: []Burnings{}, + OrderList: []Order{}, // this line is used by starport scaffolding # genesis/types/default Params: DefaultParams(), } @@ -18,6 +23,57 @@ func DefaultGenesis() *GenesisState { // Validate performs basic genesis state validation returning an error upon any // failure. func (gs GenesisState) Validate() error { + // Check for duplicated index in pool + poolIndexMap := make(map[string]struct{}) + + for _, elem := range gs.PoolList { + index := string(PoolKey(elem.Pair)) + if _, ok := poolIndexMap[index]; ok { + return fmt.Errorf("duplicated index for pool") + } + poolIndexMap[index] = struct{}{} + } + // Check for duplicated index in drop + dropIndexMap := make(map[string]struct{}) + + for _, elem := range gs.DropList { + index := string(DropKey(elem.Uid)) + if _, ok := dropIndexMap[index]; ok { + return fmt.Errorf("duplicated index for drop") + } + dropIndexMap[index] = struct{}{} + } + // Check for duplicated index in member + memberIndexMap := make(map[string]struct{}) + + for _, elem := range gs.MemberList { + index := string(MemberKey(elem.DenomA, elem.DenomB)) + if _, ok := memberIndexMap[index]; ok { + return fmt.Errorf("duplicated index for member") + } + memberIndexMap[index] = struct{}{} + } + // Check for duplicated index in burnings + burningsIndexMap := make(map[string]struct{}) + + for _, elem := range gs.BurningsList { + index := string(BurningsKey(elem.Denom)) + if _, ok := burningsIndexMap[index]; ok { + return fmt.Errorf("duplicated index for burnings") + } + burningsIndexMap[index] = struct{}{} + } + // Check for duplicated index in order + orderIndexMap := make(map[string]struct{}) + + for _, elem := range gs.OrderList { + index := string(OrderKey(elem.Uid)) + if _, ok := orderIndexMap[index]; ok { + return fmt.Errorf("duplicated index for order") + } + orderIndexMap[index] = struct{}{} + } + // this line is used by starport scaffolding # genesis/types/validate return gs.Params.Validate() diff --git a/x/market/types/genesis.pb.go b/x/market/types/genesis.pb.go index 841db80d..ed5c5651 100644 --- a/x/market/types/genesis.pb.go +++ b/x/market/types/genesis.pb.go @@ -25,7 +25,12 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // GenesisState defines the market module's genesis state. type GenesisState struct { - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + PoolList []Pool `protobuf:"bytes,2,rep,name=poolList,proto3" json:"poolList"` + DropList []Drop `protobuf:"bytes,3,rep,name=dropList,proto3" json:"dropList"` + MemberList []Member `protobuf:"bytes,4,rep,name=memberList,proto3" json:"memberList"` + BurningsList []Burnings `protobuf:"bytes,5,rep,name=burningsList,proto3" json:"burningsList"` + OrderList []Order `protobuf:"bytes,6,rep,name=orderList,proto3" json:"orderList"` } func (m *GenesisState) Reset() { *m = GenesisState{} } @@ -68,25 +73,70 @@ func (m *GenesisState) GetParams() Params { return Params{} } +func (m *GenesisState) GetPoolList() []Pool { + if m != nil { + return m.PoolList + } + return nil +} + +func (m *GenesisState) GetDropList() []Drop { + if m != nil { + return m.DropList + } + return nil +} + +func (m *GenesisState) GetMemberList() []Member { + if m != nil { + return m.MemberList + } + return nil +} + +func (m *GenesisState) GetBurningsList() []Burnings { + if m != nil { + return m.BurningsList + } + return nil +} + +func (m *GenesisState) GetOrderList() []Order { + if m != nil { + return m.OrderList + } + return nil +} + func init() { - proto.RegisterType((*GenesisState)(nil), "market.market.GenesisState") + proto.RegisterType((*GenesisState)(nil), "pendulumlabs.market.market.GenesisState") } func init() { proto.RegisterFile("market/genesis.proto", fileDescriptor_198e3e6486717af4) } var fileDescriptor_198e3e6486717af4 = []byte{ - // 164 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc9, 0x4d, 0x2c, 0xca, - 0x4e, 0x2d, 0xd1, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0xe2, 0x85, 0x88, 0xea, 0x41, 0x28, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0xb0, 0x8c, 0x3e, - 0x88, 0x05, 0x51, 0x24, 0x25, 0x0c, 0xd5, 0x5a, 0x90, 0x58, 0x94, 0x98, 0x0b, 0xd5, 0xa9, 0xe4, - 0xcc, 0xc5, 0xe3, 0x0e, 0x31, 0x2a, 0xb8, 0x24, 0xb1, 0x24, 0x55, 0xc8, 0x98, 0x8b, 0x0d, 0x22, - 0x2f, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xaa, 0x87, 0x62, 0xb4, 0x5e, 0x00, 0x58, 0xd2, - 0x89, 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0xa8, 0x52, 0x27, 0xfd, 0x13, 0x8f, 0xe4, 0x18, 0x2f, - 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, - 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x12, 0x85, 0xda, 0x59, 0xa1, 0x0f, 0x65, 0x94, 0x54, 0x16, 0xa4, - 0x16, 0x27, 0xb1, 0x81, 0x2d, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x20, 0xd5, 0xb5, 0xd5, - 0xce, 0x00, 0x00, 0x00, + // 328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0xd2, 0xcb, 0x4a, 0xf3, 0x40, + 0x14, 0x07, 0xf0, 0xe4, 0x6b, 0xbf, 0xa2, 0xd3, 0x6e, 0x1c, 0x2b, 0x94, 0x2c, 0xc6, 0x58, 0x5c, + 0x74, 0x95, 0x40, 0x7d, 0x01, 0x09, 0x8a, 0x2e, 0xbc, 0xa1, 0x3b, 0x77, 0x09, 0x19, 0x42, 0x30, + 0xc9, 0x0c, 0x93, 0x09, 0xe8, 0x5b, 0xf8, 0x3a, 0xbe, 0x41, 0x97, 0x5d, 0xba, 0x12, 0x49, 0x5e, + 0x44, 0x72, 0x72, 0xc6, 0x4b, 0xc1, 0xb8, 0xca, 0x70, 0xf8, 0xff, 0x7f, 0xcc, 0x25, 0x64, 0x9a, + 0x87, 0xea, 0x81, 0x6b, 0x3f, 0xe1, 0x05, 0x2f, 0xd3, 0xd2, 0x93, 0x4a, 0x68, 0x41, 0x1d, 0xc9, + 0x8b, 0xb8, 0xca, 0xaa, 0x3c, 0x0b, 0xa3, 0xd2, 0xeb, 0x22, 0xf8, 0x71, 0xa6, 0x89, 0x48, 0x04, + 0xc4, 0xfc, 0x76, 0xd5, 0x35, 0x9c, 0x5d, 0x74, 0x64, 0xa8, 0xc2, 0x1c, 0x19, 0x67, 0xc7, 0x0c, + 0x85, 0xc8, 0x36, 0x46, 0xb1, 0x12, 0x72, 0xa3, 0x9a, 0xf3, 0x3c, 0xe2, 0x0a, 0x87, 0x7b, 0x38, + 0x8c, 0x2a, 0x55, 0xa4, 0x45, 0x62, 0x44, 0x8a, 0x63, 0xa1, 0x62, 0x13, 0x9d, 0xbf, 0x0c, 0xc8, + 0xe4, 0xac, 0xdb, 0xfe, 0x9d, 0x0e, 0x35, 0xa7, 0xc7, 0x64, 0xd4, 0x6d, 0x63, 0x66, 0xbb, 0xf6, + 0x62, 0xbc, 0x9c, 0x7b, 0xbf, 0x1f, 0xc7, 0xbb, 0x81, 0x64, 0x30, 0x5c, 0xbd, 0xed, 0x5b, 0xb7, + 0xd8, 0xa3, 0x01, 0xd9, 0x6a, 0xf7, 0x7c, 0x91, 0x96, 0x7a, 0xf6, 0xcf, 0x1d, 0x2c, 0xc6, 0x4b, + 0xb7, 0xd7, 0x10, 0x22, 0x43, 0xe1, 0xb3, 0xd7, 0x1a, 0xed, 0x21, 0xc1, 0x18, 0xfc, 0x6d, 0x9c, + 0x28, 0x21, 0x8d, 0x61, 0x7a, 0xf4, 0x9c, 0x90, 0xee, 0x56, 0x40, 0x19, 0x82, 0xd2, 0x7b, 0x9a, + 0x4b, 0x48, 0xa3, 0xf3, 0xad, 0x4b, 0xaf, 0xc8, 0xc4, 0x5c, 0x25, 0x58, 0xff, 0xc1, 0x3a, 0xec, + 0xb3, 0x02, 0xcc, 0xa3, 0xf6, 0xa3, 0x4f, 0x4f, 0xc9, 0x36, 0xbc, 0x01, 0x60, 0x23, 0xc0, 0x0e, + 0xfa, 0xb0, 0xeb, 0x36, 0x8c, 0xd2, 0x57, 0x33, 0xf0, 0x57, 0x35, 0xb3, 0xd7, 0x35, 0xb3, 0xdf, + 0x6b, 0x66, 0x3f, 0x37, 0xcc, 0x5a, 0x37, 0xcc, 0x7a, 0x6d, 0x98, 0x75, 0x6f, 0x7e, 0x80, 0x47, + 0x1f, 0x17, 0xfa, 0x49, 0xf2, 0x32, 0x1a, 0xc1, 0x9b, 0x1f, 0x7d, 0x04, 0x00, 0x00, 0xff, 0xff, + 0x3d, 0x90, 0xa2, 0xce, 0xb8, 0x02, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { @@ -109,6 +159,76 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.OrderList) > 0 { + for iNdEx := len(m.OrderList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OrderList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.BurningsList) > 0 { + for iNdEx := len(m.BurningsList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BurningsList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.MemberList) > 0 { + for iNdEx := len(m.MemberList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MemberList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.DropList) > 0 { + for iNdEx := len(m.DropList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DropList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.PoolList) > 0 { + for iNdEx := len(m.PoolList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PoolList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } { size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -141,6 +261,36 @@ func (m *GenesisState) Size() (n int) { _ = l l = m.Params.Size() n += 1 + l + sovGenesis(uint64(l)) + if len(m.PoolList) > 0 { + for _, e := range m.PoolList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.DropList) > 0 { + for _, e := range m.DropList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.MemberList) > 0 { + for _, e := range m.MemberList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.BurningsList) > 0 { + for _, e := range m.BurningsList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.OrderList) > 0 { + for _, e := range m.OrderList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } return n } @@ -212,6 +362,176 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PoolList = append(m.PoolList, Pool{}) + if err := m.PoolList[len(m.PoolList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DropList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DropList = append(m.DropList, Drop{}) + if err := m.DropList[len(m.DropList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MemberList = append(m.MemberList, Member{}) + if err := m.MemberList[len(m.MemberList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BurningsList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BurningsList = append(m.BurningsList, Burnings{}) + if err := m.BurningsList[len(m.BurningsList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderList = append(m.OrderList, Order{}) + if err := m.OrderList[len(m.OrderList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenesis(dAtA[iNdEx:]) diff --git a/x/market/types/genesis_test.go b/x/market/types/genesis_test.go index 43c99a61..bfa838d8 100644 --- a/x/market/types/genesis_test.go +++ b/x/market/types/genesis_test.go @@ -3,8 +3,10 @@ package types_test import ( "testing" - "github.com/stretchr/testify/require" "market/x/market/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" ) func TestGenesisState_Validate(t *testing.T) { @@ -19,13 +21,233 @@ func TestGenesisState_Validate(t *testing.T) { valid: true, }, { - desc: "valid genesis state", + desc: "valid genesis state", genState: &types.GenesisState{ - + Params: types.DefaultParams(), + PoolList: []types.Pool{ + { + Pair: "0", + Denom1: "0", + Denom2: "0", + Leaders: []*types.Leader{ + { + Address: "0", + Drops: sdk.ZeroInt(), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + { + Pair: "1", + Denom1: "1", + Denom2: "1", + Leaders: []*types.Leader{ + { + Address: "1", + Drops: sdk.NewInt(1), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(1)), + }, + }, + DropList: []types.Drop{ + { + Uid: 0, + Owner: "0", + Pair: "0", + Drops: sdk.NewIntFromUint64(uint64(0)), + Product: sdk.NewIntFromUint64(uint64(0)), + Active: true, + }, + { + Uid: 1, + Owner: "1", + Pair: "1", + Drops: sdk.NewIntFromUint64(uint64(1)), + Product: sdk.NewIntFromUint64(uint64(0)), + Active: true, + }, + }, + MemberList: []types.Member{ + { + Pair: "0", + DenomA: "0", + DenomB: "0", + Balance: sdk.NewIntFromUint64(uint64(0)), + Previous: sdk.NewIntFromUint64(uint64(0)), + Limit: uint64(0), + Stop: uint64(0), + }, + { + Pair: "1", + DenomA: "1", + DenomB: "1", + Balance: sdk.NewIntFromUint64(uint64(1)), + Previous: sdk.NewIntFromUint64(uint64(1)), + Limit: uint64(1), + Stop: uint64(1), + }, + }, + BurningsList: []types.Burnings{ + { + Denom: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + }, + { + Denom: "1", + Amount: sdk.NewIntFromUint64(uint64(1)), + }, + }, + OrderList: []types.Order{ + { + Uid: 0, + Owner: "0", + Status: "active", + OrderType: "stop", + DenomAsk: "0", + DenomBid: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + Rate: []sdk.Int{sdk.NewInt(int64(0)), sdk.NewInt(int64(0))}, + Prev: uint64(0), + Next: uint64(0), + }, + { + Uid: 1, + Owner: "1", + Status: "active", + OrderType: "limit", + DenomAsk: "1", + DenomBid: "1", + Amount: sdk.NewIntFromUint64(uint64(1)), + Rate: []sdk.Int{sdk.NewInt(int64(1)), sdk.NewInt(int64(1))}, + Prev: uint64(1), + Next: uint64(1), + }, + }, // this line is used by starport scaffolding # types/genesis/validField }, valid: true, }, + { + desc: "duplicated pool", + genState: &types.GenesisState{ + PoolList: []types.Pool{ + { + Pair: "0", + Denom1: "0", + Denom2: "0", + Leaders: []*types.Leader{ + { + Address: "0", + Drops: sdk.ZeroInt(), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + { + Pair: "0", + Denom1: "0", + Denom2: "0", + Leaders: []*types.Leader{ + { + Address: "0", + Drops: sdk.ZeroInt(), + }, + }, + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + }, + }, + valid: false, + }, + { + desc: "duplicated drop", + genState: &types.GenesisState{ + DropList: []types.Drop{ + { + Uid: 0, + Owner: "0", + Pair: "0", + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + { + Uid: 0, + Owner: "0", + Pair: "0", + Drops: sdk.NewIntFromUint64(uint64(0)), + }, + }, + }, + valid: false, + }, + { + desc: "duplicated member", + genState: &types.GenesisState{ + MemberList: []types.Member{ + { + Pair: "0", + DenomA: "0", + DenomB: "0", + Balance: sdk.NewIntFromUint64(uint64(0)), + }, + { + Pair: "0", + DenomA: "0", + DenomB: "0", + Balance: sdk.NewIntFromUint64(uint64(0)), + }, + }, + }, + valid: false, + }, + { + desc: "duplicated burnings", + genState: &types.GenesisState{ + BurningsList: []types.Burnings{ + { + Denom: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + }, + { + Denom: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + }, + }, + }, + valid: false, + }, + { + desc: "duplicated order", + genState: &types.GenesisState{ + OrderList: []types.Order{ + { + Uid: 0, + Owner: "0", + Status: "active", + OrderType: "0", + DenomAsk: "0", + DenomBid: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + Rate: []sdk.Int{sdk.NewInt(int64(0)), sdk.NewInt(int64(0))}, + Prev: uint64(0), + Next: uint64(0), + }, + { + Uid: 0, + Owner: "0", + Status: "active", + OrderType: "0", + DenomAsk: "0", + DenomBid: "0", + Amount: sdk.NewIntFromUint64(uint64(0)), + Rate: []sdk.Int{sdk.NewInt(int64(0)), sdk.NewInt(int64(0))}, + Prev: uint64(0), + Next: uint64(0), + }, + }, + }, + valid: false, + }, // this line is used by starport scaffolding # types/genesis/testcase } { t.Run(tc.desc, func(t *testing.T) { diff --git a/x/market/types/key_burnings.go b/x/market/types/key_burnings.go new file mode 100644 index 00000000..ea7e1d13 --- /dev/null +++ b/x/market/types/key_burnings.go @@ -0,0 +1,27 @@ +package types + +import "encoding/binary" + +var _ binary.ByteOrder + +const ( + // BurningsKeyPrefix is the prefix to retrieve all Burnings + BurningsKeyPrefix = "Burnings/value/" +) + +// BurningsKey returns the store key to retrieve a Burnings from the index fields +func BurningsKey( + denom string, +) []byte { + var key []byte + + denomBytes := []byte(denom) + key = append(key, denomBytes...) + key = append(key, []byte("/")...) + + return key +} + +const ( + BurnedKey = "Burned-value-" +) diff --git a/x/market/types/key_drop.go b/x/market/types/key_drop.go new file mode 100644 index 00000000..9e508738 --- /dev/null +++ b/x/market/types/key_drop.go @@ -0,0 +1,57 @@ +package types + +import "encoding/binary" + +var _ binary.ByteOrder + +const ( + // DropKeyPrefix is the prefix to retrieve all Drop + DropKeyPrefix = "Drop/value/" + // DropsKeyPrefix is the prefix to retrieve all Owner of Drops + DropsKeyPrefix = "Drop/Owner/Pair/" + // DropPairsKeyPrefix is the prefix to retrieve all Pairs an Owner owns Drops + DropPairsKeyPrefix = "Drop/Owner/" +) + +// DropKey returns the store key to retrieve a Drop from the index fields +func DropKey( + uid uint64, +) []byte { + var key []byte + + uidBytes := make([]byte, 8) + binary.BigEndian.PutUint64(uidBytes, uid) + key = append(key, uidBytes...) + key = append(key, []byte("/")...) + + return key +} + +// DropKey returns the store key to retrieve a Drop from the index fields +func DropsKey( + owner string, + pair string, +) []byte { + var key []byte + + ownerBytes := []byte(owner) + pairBytes := []byte(pair) + key = append(key, ownerBytes...) + key = append(key, pairBytes...) + key = append(key, []byte("/")...) + + return key +} + +// DropKey returns the store key to retrieve all Pairs and Owner has Drops +func DropPairsKey( + owner string, +) []byte { + var key []byte + + ownerBytes := []byte(owner) + key = append(key, ownerBytes...) + key = append(key, []byte("/")...) + + return key +} diff --git a/x/market/types/key_member.go b/x/market/types/key_member.go new file mode 100644 index 00000000..6a27487f --- /dev/null +++ b/x/market/types/key_member.go @@ -0,0 +1,92 @@ +package types + +import ( + "encoding/binary" + //github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" +) + +var _ binary.ByteOrder + +const ( + // MemberKeyPrefix is the prefix to retrieve all Member + MemberKeyPrefix = "Member/value/" +) + +// MemberKey returns the store key to retrieve a Member from the index fields +func MemberSetKey( + denomA string, + denomB string, + //balance github_com_cosmos_cosmos_sdk_types.Int, + //previous github_com_cosmos_cosmos_sdk_types.Int, + //limit uint64, + //stop uint64, + //protect uint64, + +) []byte { + var key []byte + + denomABytes := []byte(denomA) + key = append(key, denomABytes...) + key = append(key, []byte("/")...) + + denomBBytes := []byte(denomB) + key = append(key, denomBBytes...) + key = append(key, []byte("/")...) + /* + limitBytes := make([]byte, 8) + binary.BigEndian.PutUint64(limitBytes, limit) + key = append(key, limitBytes...) + key = append(key, []byte("/")...) + + stopBytes := make([]byte, 8) + binary.BigEndian.PutUint64(stopBytes, stop) + key = append(key, stopBytes...) + key = append(key, []byte("/")...) + + protectBytes := make([]byte, 8) + binary.BigEndian.PutUint64(protectBytes, protect) + key = append(key, stopBytes...) + key = append(key, []byte("/")...) + */ + return key +} + +// MemberKey returns the store key to retrieve a Member from the index fields +func MemberKey( + denomA string, + denomB string, +) []byte { + var key []byte + + denomABytes := []byte(denomA) + key = append(key, denomABytes...) + key = append(key, []byte("/")...) + + denomBBytes := []byte(denomB) + key = append(key, denomBBytes...) + key = append(key, []byte("/")...) + + return key +} + +// MemberKey returns the store key to retrieve a Member from the index fields +func MemberKeyPair( + pair string, + denomA string, + denomB string, +) []byte { + var key []byte + + pairBytes := []byte(pair) + key = append(key, pairBytes...) + key = append(key, []byte("/")...) + + denomABytes := []byte(denomA) + key = append(key, denomABytes...) + key = append(key, []byte("/")...) + + denomBBytes := []byte(denomB) + key = append(key, denomBBytes...) + key = append(key, []byte("/")...) + return key +} diff --git a/x/market/types/key_order.go b/x/market/types/key_order.go new file mode 100644 index 00000000..32078869 --- /dev/null +++ b/x/market/types/key_order.go @@ -0,0 +1,38 @@ +package types + +import "encoding/binary" + +var _ binary.ByteOrder + +const ( + // OrderKeyPrefix is the prefix to retrieve all Order + OrderKeyPrefix = "Order/value/" + OrderOwnerKeyPrefix = "Order/owner/" +) + +// OrderKey returns the store key to retrieve a Order from the index fields +func OrderKey( + uid uint64, +) []byte { + var key []byte + + uidBytes := make([]byte, 8) + binary.BigEndian.PutUint64(uidBytes, uid) + key = append(key, uidBytes...) + key = append(key, []byte("/")...) + + return key +} + +// OrdersKey returns the store key to retrieve and Owner's Active Orders +func OrderOwnerKey( + owner string, +) []byte { + var key []byte + + ownerBytes := []byte(owner) + key = append(key, ownerBytes...) + key = append(key, []byte("/")...) + + return key +} diff --git a/x/market/types/key_pool.go b/x/market/types/key_pool.go new file mode 100644 index 00000000..d7826540 --- /dev/null +++ b/x/market/types/key_pool.go @@ -0,0 +1,37 @@ +package types + +import "encoding/binary" + +var _ binary.ByteOrder + +const ( + // PoolKeyPrefix is the prefix to retrieve all Pool + PoolKeyPrefix = "Pool/value/" +) + +// PoolKey returns the store key to retrieve a Pool +func PoolKey( + pair string, +) []byte { + var key []byte + + pairBytes := []byte(pair) + key = append(key, pairBytes...) + key = append(key, []byte("/")...) + + return key +} + +// PoolSetKey returns the store key to set a Pool with the index fields +func PoolSetKey( + pair string, + +) []byte { + var key []byte + + pairBytes := []byte(pair) + key = append(key, pairBytes...) + key = append(key, []byte("/")...) + + return key +} diff --git a/x/market/types/key_volume.go b/x/market/types/key_volume.go new file mode 100644 index 00000000..2571b18e --- /dev/null +++ b/x/market/types/key_volume.go @@ -0,0 +1,26 @@ +package types + +import ( + "encoding/binary" + //github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" +) + +var _ binary.ByteOrder + +const ( + // MemberKeyPrefix is the prefix to retrieve all Volumes + VolumeKeyPrefix = "Volume/value/" +) + +// MemberKey returns the store key to retrieve a Volume +func VolumeKey( + denom string, +) []byte { + var key []byte + + denomBytes := []byte(denom) + key = append(key, denomBytes...) + key = append(key, []byte("/")...) + + return key +} diff --git a/x/market/types/keys.go b/x/market/types/keys.go index 9fe0ff9e..1c0eed8b 100644 --- a/x/market/types/keys.go +++ b/x/market/types/keys.go @@ -20,3 +20,7 @@ const ( func KeyPrefix(p string) []byte { return []byte(p) } + +const ( + UidKey = "Uid-value-" +) diff --git a/x/market/types/market_math.go b/x/market/types/market_math.go new file mode 100644 index 00000000..b3e17c92 --- /dev/null +++ b/x/market/types/market_math.go @@ -0,0 +1,52 @@ +package types + +import ( + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func EQ(a []sdk.Int, b []sdk.Int) bool { + return (a[0].Mul(b[1])).Equal(a[1].Mul(b[0])) +} + +func GT(a []sdk.Int, b []sdk.Int) bool { + return (a[0].Mul(b[1])).GT(a[1].Mul(b[0])) +} + +func GTE(a []sdk.Int, b []sdk.Int) bool { + return (a[0].Mul(b[1])).GTE(a[1].Mul(b[0])) +} + +func LT(a []sdk.Int, b []sdk.Int) bool { + return (a[0].Mul(b[1])).LT(a[1].Mul(b[0])) +} + +func LTE(a []sdk.Int, b []sdk.Int) bool { + return (a[0].Mul(b[1])).LTE(a[1].Mul(b[0])) +} + +func RateStringToInt(rateString []string) ([]sdk.Int, error) { + var rateUint64 [2]uint64 + var err error + + // Rate[0] needs to fit into uint64 to avoid numerical errors + rateUint64[0], err = strconv.ParseUint(rateString[0], 10, 64) + if err != nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate format") + } + + // Rate[1] needs to fit into uint64 to avoid numerical errors + rateUint64[1], err = strconv.ParseUint(rateString[1], 10, 64) + if err != nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate format") + } + + var rate []sdk.Int + + rate = append(rate, sdk.NewIntFromUint64(rateUint64[0])) + rate = append(rate, sdk.NewIntFromUint64(rateUint64[1])) + + return rate, nil +} diff --git a/x/market/types/member.pb.go b/x/market/types/member.pb.go new file mode 100644 index 00000000..8a20cf28 --- /dev/null +++ b/x/market/types/member.pb.go @@ -0,0 +1,555 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: market/member.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Member struct { + Pair string `protobuf:"bytes,1,opt,name=pair,proto3" json:"pair,omitempty"` + DenomA string `protobuf:"bytes,2,opt,name=denomA,proto3" json:"denomA,omitempty"` + DenomB string `protobuf:"bytes,3,opt,name=denomB,proto3" json:"denomB,omitempty"` + Balance github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=balance,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"balance"` + Previous github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=previous,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"previous"` + Limit uint64 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` + Stop uint64 `protobuf:"varint,7,opt,name=stop,proto3" json:"stop,omitempty"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_f075dd79c09a0a2c, []int{0} +} +func (m *Member) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Member.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Member) XXX_Merge(src proto.Message) { + xxx_messageInfo_Member.Merge(m, src) +} +func (m *Member) XXX_Size() int { + return m.Size() +} +func (m *Member) XXX_DiscardUnknown() { + xxx_messageInfo_Member.DiscardUnknown(m) +} + +var xxx_messageInfo_Member proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Member)(nil), "pendulumlabs.market.market.Member") +} + +func init() { proto.RegisterFile("market/member.proto", fileDescriptor_f075dd79c09a0a2c) } + +var fileDescriptor_f075dd79c09a0a2c = []byte{ + // 284 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0x3f, 0x4f, 0xf3, 0x30, + 0x10, 0xc6, 0xe3, 0xbe, 0x69, 0xfa, 0xe2, 0xd1, 0x14, 0x64, 0x75, 0x70, 0x2b, 0x06, 0xd4, 0x85, + 0x64, 0x60, 0x60, 0x26, 0x13, 0x20, 0xb1, 0x64, 0x64, 0xcb, 0x1f, 0x2b, 0x44, 0x8d, 0x73, 0x56, + 0xec, 0x20, 0xf8, 0x0e, 0x0c, 0x7c, 0xac, 0x8c, 0x1d, 0x2b, 0x86, 0x8a, 0x26, 0x5f, 0x04, 0xc5, + 0x31, 0x15, 0x33, 0xd3, 0x3d, 0xf7, 0xf3, 0x3d, 0x67, 0xdd, 0x83, 0x4f, 0x45, 0x5c, 0x6f, 0xb8, + 0x0e, 0x04, 0x17, 0x09, 0xaf, 0x7d, 0x59, 0x83, 0x06, 0xb2, 0x90, 0xbc, 0xca, 0x9a, 0xb2, 0x11, + 0x65, 0x9c, 0x28, 0x7f, 0x9c, 0xb0, 0x65, 0x31, 0xcf, 0x21, 0x07, 0x33, 0x16, 0x0c, 0x6a, 0x74, + 0x5c, 0xbc, 0x4f, 0xb0, 0xf7, 0x68, 0x56, 0x10, 0x82, 0x5d, 0x19, 0x17, 0x35, 0x45, 0x2b, 0xb4, + 0x3e, 0x89, 0x8c, 0x26, 0xe7, 0xd8, 0xcb, 0x78, 0x05, 0xe2, 0x96, 0x4e, 0x0c, 0xb5, 0xdd, 0x91, + 0x87, 0xf4, 0xdf, 0x2f, 0x1e, 0x92, 0x3b, 0x3c, 0x4b, 0xe2, 0x32, 0xae, 0x52, 0x4e, 0xdd, 0xe1, + 0x21, 0xf4, 0xdb, 0xfd, 0xd2, 0xf9, 0xdc, 0x2f, 0x2f, 0xf3, 0x42, 0x3f, 0x37, 0x89, 0x9f, 0x82, + 0x08, 0x52, 0x50, 0x02, 0x94, 0x2d, 0x57, 0x2a, 0xdb, 0x04, 0xfa, 0x4d, 0x72, 0xe5, 0xdf, 0x57, + 0x3a, 0xfa, 0xb1, 0x93, 0x07, 0xfc, 0x5f, 0xd6, 0xfc, 0xa5, 0x80, 0x46, 0xd1, 0xe9, 0x9f, 0x56, + 0x1d, 0xfd, 0x64, 0x8e, 0xa7, 0x65, 0x21, 0x0a, 0x4d, 0xbd, 0x15, 0x5a, 0xbb, 0xd1, 0xd8, 0x0c, + 0xf7, 0x2a, 0x0d, 0x92, 0xce, 0x0c, 0x34, 0x3a, 0xbc, 0x69, 0x0f, 0xcc, 0xd9, 0x1d, 0x18, 0x6a, + 0x3b, 0x86, 0xb6, 0x1d, 0x43, 0x5f, 0x1d, 0x43, 0x1f, 0x3d, 0x73, 0xb6, 0x3d, 0x73, 0x76, 0x3d, + 0x73, 0x9e, 0xce, 0x6c, 0xee, 0xaf, 0x81, 0x15, 0xe6, 0xc3, 0xc4, 0x33, 0x71, 0x5e, 0x7f, 0x07, + 0x00, 0x00, 0xff, 0xff, 0xb1, 0xee, 0x70, 0x56, 0x97, 0x01, 0x00, 0x00, +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Stop != 0 { + i = encodeVarintMember(dAtA, i, uint64(m.Stop)) + i-- + dAtA[i] = 0x38 + } + if m.Limit != 0 { + i = encodeVarintMember(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x30 + } + { + size := m.Previous.Size() + i -= size + if _, err := m.Previous.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMember(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.Balance.Size() + i -= size + if _, err := m.Balance.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMember(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.DenomB) > 0 { + i -= len(m.DenomB) + copy(dAtA[i:], m.DenomB) + i = encodeVarintMember(dAtA, i, uint64(len(m.DenomB))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomA) > 0 { + i -= len(m.DenomA) + copy(dAtA[i:], m.DenomA) + i = encodeVarintMember(dAtA, i, uint64(len(m.DenomA))) + i-- + dAtA[i] = 0x12 + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintMember(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMember(dAtA []byte, offset int, v uint64) int { + offset -= sovMember(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Member) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovMember(uint64(l)) + } + l = len(m.DenomA) + if l > 0 { + n += 1 + l + sovMember(uint64(l)) + } + l = len(m.DenomB) + if l > 0 { + n += 1 + l + sovMember(uint64(l)) + } + l = m.Balance.Size() + n += 1 + l + sovMember(uint64(l)) + l = m.Previous.Size() + n += 1 + l + sovMember(uint64(l)) + if m.Limit != 0 { + n += 1 + sovMember(uint64(m.Limit)) + } + if m.Stop != 0 { + n += 1 + sovMember(uint64(m.Stop)) + } + return n +} + +func sovMember(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMember(x uint64) (n int) { + return sovMember(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMember + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMember + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMember + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMember + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMember + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMember + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMember + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMember + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMember + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMember + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Previous.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stop", wireType) + } + m.Stop = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMember + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stop |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMember(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMember + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMember(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMember + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMember + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMember + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMember + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMember + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMember + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMember = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMember = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMember = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/market/types/message_cancel_order.go b/x/market/types/message_cancel_order.go new file mode 100644 index 00000000..4161734b --- /dev/null +++ b/x/market/types/message_cancel_order.go @@ -0,0 +1,54 @@ +package types + +import ( + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgCancelOrder = "cancel_order" + +var _ sdk.Msg = &MsgCancelOrder{} + +func NewMsgCancelOrder(creator string, uid string) *MsgCancelOrder { + return &MsgCancelOrder{ + Creator: creator, + Uid: uid, + } +} + +func (msg *MsgCancelOrder) Route() string { + return RouterKey +} + +func (msg *MsgCancelOrder) Type() string { + return TypeMsgCancelOrder +} + +func (msg *MsgCancelOrder) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgCancelOrder) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgCancelOrder) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + _, err = strconv.ParseUint(msg.Uid, 10, 64) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "drop uid is not an integer") + } + + return nil +} diff --git a/x/market/types/message_cancel_order_test.go b/x/market/types/message_cancel_order_test.go new file mode 100644 index 00000000..da2c73be --- /dev/null +++ b/x/market/types/message_cancel_order_test.go @@ -0,0 +1,52 @@ +package types + +import ( + "testing" + + "market/testutil/sample" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/stretchr/testify/require" +) + +func TestMsgCancelOrder_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg MsgCancelOrder + err error + }{ + { + name: "invalid address", + msg: MsgCancelOrder{ + Creator: "invalid_address", + Uid: "2", + }, + err: sdkerrors.ErrInvalidAddress, + }, + { + name: "valid address", + msg: MsgCancelOrder{ + Creator: sample.AccAddress(), + Uid: "0", + }, + }, + { + name: "negative uid", + msg: MsgCancelOrder{ + Creator: sample.AccAddress(), + Uid: "-1", + }, + err: sdkerrors.ErrInvalidRequest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.msg.ValidateBasic() + if tt.err != nil { + require.ErrorIs(t, err, tt.err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/x/market/types/message_create_drop.go b/x/market/types/message_create_drop.go new file mode 100644 index 00000000..962a4128 --- /dev/null +++ b/x/market/types/message_create_drop.go @@ -0,0 +1,67 @@ +package types + +import ( + "sort" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgCreateDrop = "create_drop" + +var _ sdk.Msg = &MsgCreateDrop{} + +func NewMsgCreateDrop(creator string, pair string, drops string) *MsgCreateDrop { + return &MsgCreateDrop{ + Creator: creator, + Pair: pair, + Drops: drops, + } +} + +func (msg *MsgCreateDrop) Route() string { + return RouterKey +} + +func (msg *MsgCreateDrop) Type() string { + return TypeMsgCreateDrop +} + +func (msg *MsgCreateDrop) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgCreateDrop) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgCreateDrop) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + pairMsg := strings.Split(msg.Pair, ",") + sort.Strings(pairMsg) + + if len(pairMsg) != 2 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "pair not a valid denom pair") + } + + drops, ok := sdk.NewIntFromString(msg.Drops) + if !ok { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "drops not a valid integer") + } + + if !drops.GT(sdk.ZeroInt()) { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "drops not >0") + } + + return nil +} diff --git a/x/market/types/message_create_drop_test.go b/x/market/types/message_create_drop_test.go new file mode 100644 index 00000000..088af128 --- /dev/null +++ b/x/market/types/message_create_drop_test.go @@ -0,0 +1,80 @@ +package types + +import ( + "testing" + + "market/testutil/sample" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/stretchr/testify/require" +) + +func TestMsgCreateDrop_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg MsgCreateDrop + err error + }{ + { + name: "invalid address", + msg: MsgCreateDrop{ + Creator: "invalid_address", + }, + err: sdkerrors.ErrInvalidAddress, + }, + { + name: "valid address", + msg: MsgCreateDrop{ + Creator: sample.AccAddress(), + Pair: "CoinA,CoinB", + Drops: "70", + }, + }, + { + name: "not a pair", + msg: MsgCreateDrop{ + Creator: sample.AccAddress(), + Pair: "CoinACoinB", + Drops: "70", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "not a pair", + msg: MsgCreateDrop{ + Creator: sample.AccAddress(), + Pair: "CoinA,CoinB,CoinC", + Drops: "70", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "negative drops", + msg: MsgCreateDrop{ + Creator: sample.AccAddress(), + Pair: "CoinA,CoinB", + Drops: "-1", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "zero drops", + msg: MsgCreateDrop{ + Creator: sample.AccAddress(), + Pair: "CoinA,CoinB", + Drops: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.msg.ValidateBasic() + if tt.err != nil { + require.ErrorIs(t, err, tt.err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/x/market/types/message_create_order.go b/x/market/types/message_create_order.go new file mode 100644 index 00000000..e2e8573f --- /dev/null +++ b/x/market/types/message_create_order.go @@ -0,0 +1,113 @@ +package types + +import ( + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgCreateOrder = "create_order" + +var _ sdk.Msg = &MsgCreateOrder{} + +func NewMsgCreateOrder(creator string, denomAsk string, denomBid string, orderType string, amount string, rate []string, prev string, next string) *MsgCreateOrder { + return &MsgCreateOrder{ + Creator: creator, + DenomAsk: denomAsk, + DenomBid: denomBid, + OrderType: orderType, + Amount: amount, + Rate: rate, + Prev: prev, + Next: next, + } +} + +func (msg *MsgCreateOrder) Route() string { + return RouterKey +} + +func (msg *MsgCreateOrder) Type() string { + return TypeMsgCreateOrder +} + +func (msg *MsgCreateOrder) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgCreateOrder) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgCreateOrder) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + if msg.OrderType != "stop" && msg.OrderType != "limit" { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid order type") + } + + err = sdk.ValidateDenom(msg.DenomAsk) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "denomAsk is not a valid denom") + } + + err = sdk.ValidateDenom(msg.DenomBid) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "denomBid is not a valid denom") + } + + amount, ok := sdk.NewIntFromString(msg.Amount) + if !ok { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + if amount.LTE(sdk.ZeroInt()) { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + + if len(msg.Rate) != 2 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate that is not a ratio of two integers") + } + + // Rate[0] needs to fit into uint64 to avoid numerical errors + // Rate[0] will be converted to sdk.Int type in execution + rate0, err := strconv.ParseUint(msg.Rate[0], 10, 64) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate") + } + + if rate0 == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate") + } + + // Rate[1] needs to fit into uint64 to avoid numerical errors + // Rate[1] will be converted to sdk.Int type in execution + rate1, err := strconv.ParseUint(msg.Rate[1], 10, 64) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate") + } + + if rate1 == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid rate") + } + + _, err = strconv.ParseUint(msg.Prev, 10, 64) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "prev uid is not an integer") + } + + _, err = strconv.ParseUint(msg.Next, 10, 64) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "next uid is not an integer") + } + + return nil +} diff --git a/x/market/types/message_create_order_test.go b/x/market/types/message_create_order_test.go new file mode 100644 index 00000000..bd54c9b4 --- /dev/null +++ b/x/market/types/message_create_order_test.go @@ -0,0 +1,299 @@ +package types + +import ( + "testing" + + "market/testutil/sample" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/stretchr/testify/require" +) + +func TestMsgCreateOrder_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg MsgCreateOrder + err error + }{ + { + name: "invalid address", + msg: MsgCreateOrder{ + Creator: "invalid_address", + }, + err: sdkerrors.ErrInvalidAddress, + }, + { + name: "valid address", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + }, + { + name: "limit order", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "limit", + Amount: "10", + Prev: "0", + Next: "0", + }, + }, + { + name: "invalid denom", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "10CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid denom", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "10CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid type", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "invalid", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "zero amount", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "0", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "negative amount", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "-1", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate is not a 2-tuple", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate is not a 2-tuple", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate is not a 2-tuple", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20", "30"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate component is negative", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"-10", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate component is negative", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "-20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate component is zero", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"0", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate component is zero", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "0"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate component fits in uint64", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"18446744073709551615", "18446744073709551615"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + }, + { + name: "rate component does not fit in uint64", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"18446744073709551616", "18446744073709551615"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "rate component does not fit in uint64", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"18446744073709551615", "18446744073709551616"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "Prev Uid is invalid", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "-1", + Next: "0", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "Prev Uid is invalid", + msg: MsgCreateOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + DenomBid: "CoinB", + Rate: []string{"10", "20"}, + OrderType: "stop", + Amount: "10", + Prev: "0", + Next: "-1", + }, + err: sdkerrors.ErrInvalidRequest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.msg.ValidateBasic() + if tt.err != nil { + require.ErrorIs(t, err, tt.err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/x/market/types/message_create_pool.go b/x/market/types/message_create_pool.go new file mode 100644 index 00000000..6e488633 --- /dev/null +++ b/x/market/types/message_create_pool.go @@ -0,0 +1,62 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgCreatePool = "create_pool" + +var _ sdk.Msg = &MsgCreatePool{} + +func NewMsgCreatePool(creator string, coinA string, coinB string) *MsgCreatePool { + return &MsgCreatePool{ + Creator: creator, + CoinA: coinA, + CoinB: coinB, + } +} + +func (msg *MsgCreatePool) Route() string { + return RouterKey +} + +func (msg *MsgCreatePool) Type() string { + return TypeMsgCreatePool +} + +func (msg *MsgCreatePool) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgCreatePool) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgCreatePool) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + coinA, _ := sdk.ParseCoinNormalized(msg.CoinA) + if !coinA.IsValid() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "amount is not a valid Coin object") + } + + coinB, _ := sdk.ParseCoinNormalized(msg.CoinB) + if !coinB.IsValid() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "amount is not a valid Coin object") + } + + if coinA.Denom == coinB.Denom { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "CoinA equal to CoinB") + } + + return nil +} diff --git a/x/market/types/message_create_pool_test.go b/x/market/types/message_create_pool_test.go new file mode 100644 index 00000000..8f5546b5 --- /dev/null +++ b/x/market/types/message_create_pool_test.go @@ -0,0 +1,71 @@ +package types + +import ( + "testing" + + "market/testutil/sample" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/stretchr/testify/require" +) + +func TestMsgCreatePool_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg MsgCreatePool + err error + }{ + { + name: "invalid address", + msg: MsgCreatePool{ + Creator: "invalid_address", + }, + err: sdkerrors.ErrInvalidAddress, + }, + { + name: "valid address", + msg: MsgCreatePool{ + CoinA: "20CoinA", + CoinB: "20CoinB", + Creator: sample.AccAddress(), + }, + }, + { + name: "equal denoms", + msg: MsgCreatePool{ + CoinA: "20CoinA", + CoinB: "20CoinA", + Creator: sample.AccAddress(), + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid coin A", + msg: MsgCreatePool{ + CoinA: "-20CoinA", + CoinB: "20CoinV", + Creator: sample.AccAddress(), + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid coin B", + msg: MsgCreatePool{ + CoinA: "20CoinA", + CoinB: "-20CoinV", + Creator: sample.AccAddress(), + }, + err: sdkerrors.ErrInvalidRequest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.msg.ValidateBasic() + if tt.err != nil { + require.ErrorIs(t, err, tt.err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/x/market/types/message_market_order.go b/x/market/types/message_market_order.go new file mode 100644 index 00000000..f0d1323c --- /dev/null +++ b/x/market/types/message_market_order.go @@ -0,0 +1,88 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgMarketOrder = "market_order" + +var _ sdk.Msg = &MsgMarketOrder{} + +func NewMsgMarketOrder(creator string, denomAsk string, amountAsk string, denomBid string, amountBid string, slippage string) *MsgMarketOrder { + return &MsgMarketOrder{ + Creator: creator, + DenomAsk: denomAsk, + AmountAsk: amountAsk, + DenomBid: denomBid, + AmountBid: amountBid, + Slippage: slippage, + } +} + +func (msg *MsgMarketOrder) Route() string { + return RouterKey +} + +func (msg *MsgMarketOrder) Type() string { + return TypeMsgMarketOrder +} + +func (msg *MsgMarketOrder) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgMarketOrder) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgMarketOrder) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + err = sdk.ValidateDenom(msg.DenomAsk) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid ask denom") + } + + err = sdk.ValidateDenom(msg.DenomBid) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid bid denom") + } + + amount, ok := sdk.NewIntFromString(msg.AmountBid) + if !ok { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + if amount.LTE(sdk.ZeroInt()) { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + + amount, ok = sdk.NewIntFromString(msg.AmountAsk) + if !ok { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + if amount.LTE(sdk.ZeroInt()) { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid amount integer") + } + + slippage, ok := sdk.NewIntFromString(msg.Slippage) + if !ok { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid slippage integer") + } + if slippage.IsNegative() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid slippage integer") + } + if slippage.GT(sdk.NewInt(9999)) { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "invalid slippage integer") + } + + return nil +} diff --git a/x/market/types/message_market_order_test.go b/x/market/types/message_market_order_test.go new file mode 100644 index 00000000..e799b85d --- /dev/null +++ b/x/market/types/message_market_order_test.go @@ -0,0 +1,142 @@ +package types + +import ( + "testing" + + "market/testutil/sample" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/stretchr/testify/require" +) + +func TestMsgMarketOrder_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg MsgMarketOrder + err error + }{ + { + name: "invalid address", + msg: MsgMarketOrder{ + Creator: "invalid_address", + }, + err: sdkerrors.ErrInvalidAddress, + }, + { + name: "valid address", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "40", + DenomBid: "CoinB", + AmountBid: "40", + Slippage: "20", + }, + }, + { + name: "max slippage", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "40", + DenomBid: "CoinB", + AmountBid: "40", + Slippage: "9999", + }, + }, + { + name: "too large slippage", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "40", + DenomBid: "CoinB", + AmountBid: "40", + Slippage: "10000", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "negative slippage", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "40", + DenomBid: "CoinB", + AmountBid: "40", + Slippage: "-1", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "negative bid", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "0", + DenomBid: "CoinB", + AmountBid: "-1", + Slippage: "20", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "zero bid", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "0", + DenomBid: "CoinB", + AmountBid: "0", + Slippage: "20", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid ask", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "20CoinA", + AmountAsk: "40", + DenomBid: "CoinB", + AmountBid: "40", + Slippage: "20", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid bid", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "40", + DenomBid: "20CoinB", + AmountBid: "40", + Slippage: "20", + }, + err: sdkerrors.ErrInvalidRequest, + }, + { + name: "invalid slippage", + msg: MsgMarketOrder{ + Creator: sample.AccAddress(), + DenomAsk: "CoinA", + AmountAsk: "40", + DenomBid: "CoinB", + AmountBid: "40", + Slippage: "0999", + }, + err: sdkerrors.ErrInvalidRequest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.msg.ValidateBasic() + if tt.err != nil { + require.ErrorIs(t, err, tt.err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/x/market/types/message_redeem_drop.go b/x/market/types/message_redeem_drop.go new file mode 100644 index 00000000..93565d12 --- /dev/null +++ b/x/market/types/message_redeem_drop.go @@ -0,0 +1,54 @@ +package types + +import ( + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgRedeemDrop = "redeem_drop" + +var _ sdk.Msg = &MsgRedeemDrop{} + +func NewMsgRedeemDrop(creator string, uid string) *MsgRedeemDrop { + return &MsgRedeemDrop{ + Creator: creator, + Uid: uid, + } +} + +func (msg *MsgRedeemDrop) Route() string { + return RouterKey +} + +func (msg *MsgRedeemDrop) Type() string { + return TypeMsgRedeemDrop +} + +func (msg *MsgRedeemDrop) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgRedeemDrop) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgRedeemDrop) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + _, err = strconv.ParseUint(msg.Uid, 10, 64) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "drop uid is not an integer or is negative") + } + + return nil +} diff --git a/x/market/types/message_redeem_drop_test.go b/x/market/types/message_redeem_drop_test.go new file mode 100644 index 00000000..bca16c65 --- /dev/null +++ b/x/market/types/message_redeem_drop_test.go @@ -0,0 +1,51 @@ +package types + +import ( + "testing" + + "market/testutil/sample" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/stretchr/testify/require" +) + +func TestMsgRedeemDrop_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg MsgRedeemDrop + err error + }{ + { + name: "invalid address", + msg: MsgRedeemDrop{ + Creator: "invalid_address", + }, + err: sdkerrors.ErrInvalidAddress, + }, + { + name: "valid address", + msg: MsgRedeemDrop{ + Creator: sample.AccAddress(), + Uid: "0", + }, + }, + { + name: "negative uid", + msg: MsgRedeemDrop{ + Creator: sample.AccAddress(), + Uid: "-1", + }, + err: sdkerrors.ErrInvalidRequest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.msg.ValidateBasic() + if tt.err != nil { + require.ErrorIs(t, err, tt.err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/x/market/types/order.pb.go b/x/market/types/order.pb.go new file mode 100644 index 00000000..242f7ab5 --- /dev/null +++ b/x/market/types/order.pb.go @@ -0,0 +1,1537 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: market/order.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Order struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + OrderType string `protobuf:"bytes,4,opt,name=orderType,proto3" json:"orderType,omitempty"` + DenomAsk string `protobuf:"bytes,5,opt,name=denomAsk,proto3" json:"denomAsk,omitempty"` + DenomBid string `protobuf:"bytes,6,opt,name=denomBid,proto3" json:"denomBid,omitempty"` + Amount github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,7,opt,name=amount,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"amount"` + Rate []github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,8,rep,name=rate,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"rate"` + Prev uint64 `protobuf:"varint,9,opt,name=prev,proto3" json:"prev,omitempty"` + Next uint64 `protobuf:"varint,10,opt,name=next,proto3" json:"next,omitempty"` + BegTime int64 `protobuf:"varint,11,opt,name=beg_time,json=begTime,proto3" json:"beg_time,omitempty"` + UpdTime int64 `protobuf:"varint,12,opt,name=upd_time,json=updTime,proto3" json:"upd_time,omitempty"` +} + +func (m *Order) Reset() { *m = Order{} } +func (m *Order) String() string { return proto.CompactTextString(m) } +func (*Order) ProtoMessage() {} +func (*Order) Descriptor() ([]byte, []int) { + return fileDescriptor_8c6375df0c4a1904, []int{0} +} +func (m *Order) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Order.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Order) XXX_Merge(src proto.Message) { + xxx_messageInfo_Order.Merge(m, src) +} +func (m *Order) XXX_Size() int { + return m.Size() +} +func (m *Order) XXX_DiscardUnknown() { + xxx_messageInfo_Order.DiscardUnknown(m) +} + +var xxx_messageInfo_Order proto.InternalMessageInfo + +type Orders struct { + Uids []uint64 `protobuf:"varint,1,rep,packed,name=uids,proto3" json:"uids,omitempty"` +} + +func (m *Orders) Reset() { *m = Orders{} } +func (m *Orders) String() string { return proto.CompactTextString(m) } +func (*Orders) ProtoMessage() {} +func (*Orders) Descriptor() ([]byte, []int) { + return fileDescriptor_8c6375df0c4a1904, []int{1} +} +func (m *Orders) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Orders) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Orders.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Orders) XXX_Merge(src proto.Message) { + xxx_messageInfo_Orders.Merge(m, src) +} +func (m *Orders) XXX_Size() int { + return m.Size() +} +func (m *Orders) XXX_DiscardUnknown() { + xxx_messageInfo_Orders.DiscardUnknown(m) +} + +var xxx_messageInfo_Orders proto.InternalMessageInfo + +type OrderResponse struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + OrderType string `protobuf:"bytes,4,opt,name=orderType,proto3" json:"orderType,omitempty"` + DenomAsk string `protobuf:"bytes,5,opt,name=denomAsk,proto3" json:"denomAsk,omitempty"` + DenomBid string `protobuf:"bytes,6,opt,name=denomBid,proto3" json:"denomBid,omitempty"` + Amount string `protobuf:"bytes,7,opt,name=amount,proto3" json:"amount,omitempty"` + Rate []string `protobuf:"bytes,8,rep,name=rate,proto3" json:"rate,omitempty"` + Prev uint64 `protobuf:"varint,9,opt,name=prev,proto3" json:"prev,omitempty"` + Next uint64 `protobuf:"varint,10,opt,name=next,proto3" json:"next,omitempty"` + BegTime int64 `protobuf:"varint,11,opt,name=beg_time,json=begTime,proto3" json:"beg_time,omitempty"` + UpdTime int64 `protobuf:"varint,12,opt,name=upd_time,json=updTime,proto3" json:"upd_time,omitempty"` +} + +func (m *OrderResponse) Reset() { *m = OrderResponse{} } +func (m *OrderResponse) String() string { return proto.CompactTextString(m) } +func (*OrderResponse) ProtoMessage() {} +func (*OrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8c6375df0c4a1904, []int{2} +} +func (m *OrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderResponse.Merge(m, src) +} +func (m *OrderResponse) XXX_Size() int { + return m.Size() +} +func (m *OrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_OrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Order)(nil), "pendulumlabs.market.market.Order") + proto.RegisterType((*Orders)(nil), "pendulumlabs.market.market.Orders") + proto.RegisterType((*OrderResponse)(nil), "pendulumlabs.market.market.OrderResponse") +} + +func init() { proto.RegisterFile("market/order.proto", fileDescriptor_8c6375df0c4a1904) } + +var fileDescriptor_8c6375df0c4a1904 = []byte{ + // 405 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x93, 0x31, 0x8f, 0xd3, 0x30, + 0x14, 0xc7, 0xe3, 0x4b, 0x9a, 0x6b, 0x0d, 0x48, 0xc8, 0x3a, 0x4e, 0xa6, 0x3a, 0xf9, 0xa2, 0x0e, + 0xa8, 0x0b, 0xc9, 0xc0, 0xc0, 0x4c, 0x06, 0x24, 0x26, 0xa4, 0xe8, 0x26, 0x16, 0x94, 0x9c, 0x9f, + 0x42, 0xd4, 0x73, 0x6c, 0xc5, 0x36, 0xb4, 0x9f, 0x80, 0x95, 0x6f, 0xc2, 0xd7, 0xe8, 0xd8, 0xb1, + 0x62, 0xa8, 0x68, 0xfb, 0x45, 0x90, 0x9d, 0xa8, 0x94, 0x95, 0x01, 0x31, 0xf9, 0xff, 0xde, 0xcf, + 0xef, 0xc9, 0xfa, 0xff, 0x65, 0x4c, 0x44, 0xd9, 0x2d, 0xc0, 0x64, 0xb2, 0xe3, 0xd0, 0xa5, 0xaa, + 0x93, 0x46, 0x92, 0xa9, 0x82, 0x96, 0xdb, 0x07, 0x2b, 0x1e, 0xca, 0x4a, 0xa7, 0xfd, 0x85, 0xe1, + 0x98, 0x5e, 0xd5, 0xb2, 0x96, 0xfe, 0x5a, 0xe6, 0x54, 0x3f, 0x31, 0xfb, 0x1a, 0xe2, 0xd1, 0x7b, + 0xb7, 0x81, 0x3c, 0xc5, 0xa1, 0x6d, 0x38, 0x45, 0x09, 0x9a, 0x47, 0x85, 0x93, 0xe4, 0x0a, 0x8f, + 0xe4, 0x97, 0x16, 0x3a, 0x7a, 0x91, 0xa0, 0xf9, 0xa4, 0xe8, 0x0b, 0x72, 0x8d, 0x63, 0x6d, 0x4a, + 0x63, 0x35, 0x0d, 0x7d, 0x7b, 0xa8, 0xc8, 0x0d, 0x9e, 0xf8, 0xa7, 0xdc, 0xad, 0x14, 0xd0, 0xc8, + 0xa3, 0xdf, 0x0d, 0x32, 0xc5, 0x63, 0x0e, 0xad, 0x14, 0x6f, 0xf4, 0x82, 0x8e, 0x3c, 0x3c, 0xd5, + 0x27, 0x96, 0x37, 0x9c, 0xc6, 0x67, 0x2c, 0x6f, 0x38, 0x79, 0x8b, 0xe3, 0x52, 0x48, 0xdb, 0x1a, + 0x7a, 0xe9, 0x48, 0x9e, 0xae, 0x77, 0xb7, 0xc1, 0x8f, 0xdd, 0xed, 0x8b, 0xba, 0x31, 0x9f, 0x6c, + 0x95, 0xde, 0x4b, 0x91, 0xdd, 0x4b, 0x2d, 0xa4, 0x1e, 0x8e, 0x97, 0x9a, 0x2f, 0x32, 0xb3, 0x52, + 0xa0, 0xd3, 0x77, 0xad, 0x29, 0x86, 0x69, 0x92, 0xe3, 0xa8, 0x2b, 0x0d, 0xd0, 0x71, 0x12, 0xfe, + 0xc5, 0x16, 0x3f, 0x4b, 0x08, 0x8e, 0x54, 0x07, 0x9f, 0xe9, 0xc4, 0x5b, 0xe4, 0xb5, 0xeb, 0xb5, + 0xb0, 0x34, 0x14, 0xf7, 0x3d, 0xa7, 0xc9, 0x73, 0x3c, 0xae, 0xa0, 0xfe, 0x68, 0x1a, 0x01, 0xf4, + 0x51, 0x82, 0xe6, 0x61, 0x71, 0x59, 0x41, 0x7d, 0xd7, 0x08, 0x70, 0xc8, 0x2a, 0xde, 0xa3, 0xc7, + 0x3d, 0xb2, 0x8a, 0x3b, 0x34, 0xbb, 0xc1, 0xb1, 0x0f, 0x42, 0xbb, 0x9d, 0xb6, 0xe1, 0x9a, 0xa2, + 0x24, 0x74, 0x3b, 0x9d, 0x9e, 0x7d, 0xbf, 0xc0, 0x4f, 0x3c, 0x2e, 0x40, 0x2b, 0xd9, 0x6a, 0xf8, + 0x4f, 0xf3, 0xba, 0xfe, 0x33, 0xaf, 0x93, 0xff, 0xe4, 0xdc, 0xff, 0x7f, 0xe1, 0x67, 0xfe, 0x7a, + 0xbd, 0x67, 0xc1, 0x76, 0xcf, 0xd0, 0xfa, 0xc0, 0xd0, 0xe6, 0xc0, 0xd0, 0xcf, 0x03, 0x43, 0xdf, + 0x8e, 0x2c, 0xd8, 0x1c, 0x59, 0xb0, 0x3d, 0xb2, 0xe0, 0xc3, 0xb3, 0xe1, 0x07, 0x2d, 0xb3, 0x41, + 0xf8, 0xc0, 0xab, 0xd8, 0xff, 0x8c, 0x57, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x6e, 0x43, + 0x6e, 0x61, 0x03, 0x00, 0x00, +} + +func (m *Order) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Order) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UpdTime != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.UpdTime)) + i-- + dAtA[i] = 0x60 + } + if m.BegTime != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.BegTime)) + i-- + dAtA[i] = 0x58 + } + if m.Next != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.Next)) + i-- + dAtA[i] = 0x50 + } + if m.Prev != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.Prev)) + i-- + dAtA[i] = 0x48 + } + if len(m.Rate) > 0 { + for iNdEx := len(m.Rate) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Rate[iNdEx].Size() + i -= size + if _, err := m.Rate[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintOrder(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size := m.Amount.Size() + i -= size + if _, err := m.Amount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintOrder(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.DenomBid) > 0 { + i -= len(m.DenomBid) + copy(dAtA[i:], m.DenomBid) + i = encodeVarintOrder(dAtA, i, uint64(len(m.DenomBid))) + i-- + dAtA[i] = 0x32 + } + if len(m.DenomAsk) > 0 { + i -= len(m.DenomAsk) + copy(dAtA[i:], m.DenomAsk) + i = encodeVarintOrder(dAtA, i, uint64(len(m.DenomAsk))) + i-- + dAtA[i] = 0x2a + } + if len(m.OrderType) > 0 { + i -= len(m.OrderType) + copy(dAtA[i:], m.OrderType) + i = encodeVarintOrder(dAtA, i, uint64(len(m.OrderType))) + i-- + dAtA[i] = 0x22 + } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x1a + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if m.Uid != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Orders) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Orders) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Orders) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Uids) > 0 { + dAtA2 := make([]byte, len(m.Uids)*10) + var j1 int + for _, num := range m.Uids { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintOrder(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UpdTime != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.UpdTime)) + i-- + dAtA[i] = 0x60 + } + if m.BegTime != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.BegTime)) + i-- + dAtA[i] = 0x58 + } + if m.Next != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.Next)) + i-- + dAtA[i] = 0x50 + } + if m.Prev != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.Prev)) + i-- + dAtA[i] = 0x48 + } + if len(m.Rate) > 0 { + for iNdEx := len(m.Rate) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Rate[iNdEx]) + copy(dAtA[i:], m.Rate[iNdEx]) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Rate[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Amount) > 0 { + i -= len(m.Amount) + copy(dAtA[i:], m.Amount) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Amount))) + i-- + dAtA[i] = 0x3a + } + if len(m.DenomBid) > 0 { + i -= len(m.DenomBid) + copy(dAtA[i:], m.DenomBid) + i = encodeVarintOrder(dAtA, i, uint64(len(m.DenomBid))) + i-- + dAtA[i] = 0x32 + } + if len(m.DenomAsk) > 0 { + i -= len(m.DenomAsk) + copy(dAtA[i:], m.DenomAsk) + i = encodeVarintOrder(dAtA, i, uint64(len(m.DenomAsk))) + i-- + dAtA[i] = 0x2a + } + if len(m.OrderType) > 0 { + i -= len(m.OrderType) + copy(dAtA[i:], m.OrderType) + i = encodeVarintOrder(dAtA, i, uint64(len(m.OrderType))) + i-- + dAtA[i] = 0x22 + } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x1a + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if m.Uid != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { + offset -= sovOrder(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Order) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovOrder(uint64(m.Uid)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.OrderType) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.DenomAsk) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.DenomBid) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = m.Amount.Size() + n += 1 + l + sovOrder(uint64(l)) + if len(m.Rate) > 0 { + for _, e := range m.Rate { + l = e.Size() + n += 1 + l + sovOrder(uint64(l)) + } + } + if m.Prev != 0 { + n += 1 + sovOrder(uint64(m.Prev)) + } + if m.Next != 0 { + n += 1 + sovOrder(uint64(m.Next)) + } + if m.BegTime != 0 { + n += 1 + sovOrder(uint64(m.BegTime)) + } + if m.UpdTime != 0 { + n += 1 + sovOrder(uint64(m.UpdTime)) + } + return n +} + +func (m *Orders) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Uids) > 0 { + l = 0 + for _, e := range m.Uids { + l += sovOrder(uint64(e)) + } + n += 1 + sovOrder(uint64(l)) + l + } + return n +} + +func (m *OrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovOrder(uint64(m.Uid)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.OrderType) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.DenomAsk) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.DenomBid) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + if len(m.Rate) > 0 { + for _, s := range m.Rate { + l = len(s) + n += 1 + l + sovOrder(uint64(l)) + } + } + if m.Prev != 0 { + n += 1 + sovOrder(uint64(m.Prev)) + } + if m.Next != 0 { + n += 1 + sovOrder(uint64(m.Next)) + } + if m.BegTime != 0 { + n += 1 + sovOrder(uint64(m.BegTime)) + } + if m.UpdTime != 0 { + n += 1 + sovOrder(uint64(m.UpdTime)) + } + return n +} + +func sovOrder(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozOrder(x uint64) (n int) { + return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Order) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Order: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v github_com_cosmos_cosmos_sdk_types.Int + m.Rate = append(m.Rate, v) + if err := m.Rate[len(m.Rate)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prev", wireType) + } + m.Prev = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Prev |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Next", wireType) + } + m.Next = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Next |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BegTime", wireType) + } + m.BegTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BegTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdTime", wireType) + } + m.UpdTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Orders) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Orders: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Orders: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Uids = append(m.Uids, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Uids) == 0 { + m.Uids = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Uids = append(m.Uids, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Uids", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rate = append(m.Rate, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prev", wireType) + } + m.Prev = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Prev |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Next", wireType) + } + m.Next = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Next |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BegTime", wireType) + } + m.BegTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BegTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdTime", wireType) + } + m.UpdTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOrder(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthOrder + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupOrder + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthOrder + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/market/types/params.go b/x/market/types/params.go index 357196ad..648c011f 100644 --- a/x/market/types/params.go +++ b/x/market/types/params.go @@ -1,10 +1,37 @@ package types import ( + "fmt" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" "gopkg.in/yaml.v2" ) +var ( + // KeyEarnRate is byte key for EarnRate param. + KeyEarnRates = []byte("EarnRates") //nolint:gochecknoglobals // cosmos-sdk style + // KeyBurnRate is byte key for BurnRate param. + KeyBurnRate = []byte("BurnRate") //nolint:gochecknoglobals // cosmos-sdk style + // KeyBurnCoin is byte key for BurnCoin param. + KeyBurnCoin = []byte("BurnCoin") //nolint:gochecknoglobals // cosmos-sdk style + // KeyBurnCoin is byte key for BurnCoin param. + KeyMarketFee = []byte("MarketFee") //nolint:gochecknoglobals // cosmos-sdk style +) + +var ( + // DefaultEarnRate is default value for the DefaultEarnRate param. + DefaultEarnRates = "0500,0300,0200" //nolint:gomnd,gochecknoglobals // cosmos-sdk style + // DefaultBurnRate is default value for the DefaultBurnRate param. + DefaultBurnRate = "1000" //nolint:gomnd,gochecknoglobals // cosmos-sdk style + // DefaultBurnCoin is default value for the DefaultBurnCoin param. + DefaultBurnCoin = "stake" //nolint:gomnd,gochecknoglobals // cosmos-sdk style + // DefaultMarketFee is default value for the MarketFee param. + DefaultMarketFee = "0030" //nolint:gomnd,gochecknoglobals // cosmos-sdk style + +) + var _ paramtypes.ParamSet = (*Params)(nil) // ParamKeyTable the param key table for launch module @@ -13,27 +40,138 @@ func ParamKeyTable() paramtypes.KeyTable { } // NewParams creates a new Params instance -func NewParams() Params { - return Params{} +func NewParams( + earnRates string, + burnRate string, + burnCoin string, + marketFee string, +) Params { + return Params{ + EarnRates: earnRates, + BurnRate: burnRate, + BurnCoin: burnCoin, + MarketFee: marketFee, + } } // DefaultParams returns a default set of parameters func DefaultParams() Params { - return NewParams() + return NewParams(DefaultEarnRates, DefaultBurnRate, DefaultBurnCoin, DefaultMarketFee) } // ParamSetPairs get the params.ParamSet func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{} + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair(KeyEarnRates, &p.EarnRates, validateEarnRates), + paramtypes.NewParamSetPair(KeyBurnRate, &p.BurnRate, validateBurnRate), + paramtypes.NewParamSetPair(KeyBurnCoin, &p.BurnCoin, validateBurnCoin), + paramtypes.NewParamSetPair(KeyMarketFee, &p.MarketFee, validateMarketFee), + } } // Validate validates the set of params func (p Params) Validate() error { + if err := validateEarnRates(p.EarnRates); err != nil { + return err + } + if err := validateBurnRate(p.BurnRate); err != nil { + return err + } + if err := validateBurnCoin(p.BurnCoin); err != nil { + return err + } + if err := validateMarketFee(p.MarketFee); err != nil { + return err + } + return nil } // String implements the Stringer interface. func (p Params) String() string { - out, _ := yaml.Marshal(p) + out, _ := yaml.Marshal(p) //nolint:errcheck // error is not expected here return string(out) } + +func validateEarnRates(i interface{}) error { + value, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + earnRatesStringArray := strings.Split(value, ",") + + if len(earnRatesStringArray) > 10 { + return fmt.Errorf("the maximum number of rate values is 10") + } + + var earnRates [10]sdk.Int + for i, v := range earnRatesStringArray { + earnRates[i], ok = sdk.NewIntFromString(v) + if !ok { + return fmt.Errorf("invalid string number format: %q", v) + } + if earnRates[i].LTE(sdk.ZeroInt()) { + return fmt.Errorf("earn rate numerator must be positive and greater than zero: %d", earnRates[i]) + } + if earnRates[i].GTE(sdk.NewInt(10000)) { + return fmt.Errorf("earn rate numerator must be less than 10000: %d", earnRates[i]) + } + if i > 0 { + if earnRates[i].GT(earnRates[i-1]) { + return fmt.Errorf("earn rates must not increase") + } + } + } + + return nil +} + +func validateBurnRate(i interface{}) error { + value, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + burnRate, ok := sdk.NewIntFromString(value) + if !ok { + return fmt.Errorf("invalid string number format: %q", value) + } + if burnRate.LTE(sdk.ZeroInt()) { + return fmt.Errorf("burn rate numerator must be positive and greater than zero: %d", burnRate) + } + if burnRate.GTE(sdk.NewInt(10000)) { + return fmt.Errorf("burn rate numerator must be less than 10000: %d", burnRate) + } + + return nil +} + +func validateBurnCoin(i interface{}) error { + _, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + return nil +} + +func validateMarketFee(i interface{}) error { + value, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + marketFee, ok := sdk.NewIntFromString(value) + if !ok { + return fmt.Errorf("invalid string number format: %q", value) + } + if marketFee.LTE(sdk.ZeroInt()) { + return fmt.Errorf("market fee numerator must be positive and greater than zero: %d", marketFee) + } + if marketFee.GTE(sdk.NewInt(10000)) { + return fmt.Errorf("market fee numerator must be less than 10000: %d", marketFee) + } + + return nil +} diff --git a/x/market/types/params.pb.go b/x/market/types/params.pb.go index 177333d8..36e2fd04 100644 --- a/x/market/types/params.pb.go +++ b/x/market/types/params.pb.go @@ -25,6 +25,15 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Params defines the parameters for the module. type Params struct { + // leader earnings rates + // 1,2,3 Comma separated, no space + EarnRates string `protobuf:"bytes,1,opt,name=earn_rates,json=earnRates,proto3" json:"earn_rates,omitempty"` + // pool burning rate + BurnRate string `protobuf:"bytes,2,opt,name=burn_rate,json=burnRate,proto3" json:"burn_rate,omitempty"` + // burn coin + BurnCoin string `protobuf:"bytes,3,opt,name=burn_coin,json=burnCoin,proto3" json:"burn_coin,omitempty"` + // market_fee (parameter / 10000), 9999 representing as 99.99% + MarketFee string `protobuf:"bytes,4,opt,name=market_fee,json=marketFee,proto3" json:"market_fee,omitempty"` } func (m *Params) Reset() { *m = Params{} } @@ -59,22 +68,56 @@ func (m *Params) XXX_DiscardUnknown() { var xxx_messageInfo_Params proto.InternalMessageInfo +func (m *Params) GetEarnRates() string { + if m != nil { + return m.EarnRates + } + return "" +} + +func (m *Params) GetBurnRate() string { + if m != nil { + return m.BurnRate + } + return "" +} + +func (m *Params) GetBurnCoin() string { + if m != nil { + return m.BurnCoin + } + return "" +} + +func (m *Params) GetMarketFee() string { + if m != nil { + return m.MarketFee + } + return "" +} + func init() { - proto.RegisterType((*Params)(nil), "market.market.Params") + proto.RegisterType((*Params)(nil), "pendulumlabs.market.market.Params") } func init() { proto.RegisterFile("market/params.proto", fileDescriptor_43480567057de0ef) } var fileDescriptor_43480567057de0ef = []byte{ - // 123 bytes of a gzipped FileDescriptorProto + // 211 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, - 0xe2, 0x85, 0x08, 0xea, 0x41, 0x28, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0xb0, 0x8c, 0x3e, 0x88, - 0x05, 0x51, 0xa4, 0xc4, 0xc7, 0xc5, 0x16, 0x00, 0xd6, 0x64, 0xc5, 0x32, 0x63, 0x81, 0x3c, 0x83, - 0x93, 0xfe, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, - 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x89, 0x42, 0xed, 0xa8, - 0xd0, 0x87, 0x32, 0x4a, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x51, 0x51, 0x92, 0x35, 0x83, 0x00, 0x00, 0x00, + 0x92, 0x2a, 0x48, 0xcd, 0x4b, 0x29, 0xcd, 0x29, 0xcd, 0xcd, 0x49, 0x4c, 0x2a, 0xd6, 0x83, 0xa8, + 0x80, 0x52, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0x65, 0xfa, 0x20, 0x16, 0x44, 0x87, 0x52, + 0x1b, 0x23, 0x17, 0x5b, 0x00, 0xd8, 0x08, 0x21, 0x59, 0x2e, 0xae, 0xd4, 0xc4, 0xa2, 0xbc, 0xf8, + 0xa2, 0xc4, 0x92, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0x90, 0x48, 0x10, + 0x48, 0x40, 0x48, 0x9a, 0x8b, 0x33, 0xa9, 0x14, 0x2a, 0x2d, 0xc1, 0x04, 0x96, 0xe5, 0x00, 0x09, + 0x80, 0x64, 0xe1, 0x92, 0xc9, 0xf9, 0x99, 0x79, 0x12, 0xcc, 0x08, 0x49, 0xe7, 0xfc, 0xcc, 0x3c, + 0x90, 0xc1, 0x10, 0x37, 0xc4, 0xa7, 0xa5, 0xa6, 0x4a, 0xb0, 0x40, 0x0c, 0x86, 0x88, 0xb8, 0xa5, + 0xa6, 0x5a, 0xb1, 0xcc, 0x58, 0x20, 0xcf, 0xe0, 0xa4, 0x7f, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, + 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, + 0xc7, 0x72, 0x0c, 0x51, 0xa2, 0x50, 0x9f, 0x56, 0xe8, 0x43, 0x19, 0x25, 0x95, 0x05, 0xa9, 0xc5, + 0x49, 0x6c, 0x60, 0x0f, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x61, 0x00, 0xc3, 0xe9, 0x09, + 0x01, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -97,6 +140,34 @@ func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MarketFee) > 0 { + i -= len(m.MarketFee) + copy(dAtA[i:], m.MarketFee) + i = encodeVarintParams(dAtA, i, uint64(len(m.MarketFee))) + i-- + dAtA[i] = 0x22 + } + if len(m.BurnCoin) > 0 { + i -= len(m.BurnCoin) + copy(dAtA[i:], m.BurnCoin) + i = encodeVarintParams(dAtA, i, uint64(len(m.BurnCoin))) + i-- + dAtA[i] = 0x1a + } + if len(m.BurnRate) > 0 { + i -= len(m.BurnRate) + copy(dAtA[i:], m.BurnRate) + i = encodeVarintParams(dAtA, i, uint64(len(m.BurnRate))) + i-- + dAtA[i] = 0x12 + } + if len(m.EarnRates) > 0 { + i -= len(m.EarnRates) + copy(dAtA[i:], m.EarnRates) + i = encodeVarintParams(dAtA, i, uint64(len(m.EarnRates))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } @@ -117,6 +188,22 @@ func (m *Params) Size() (n int) { } var l int _ = l + l = len(m.EarnRates) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + l = len(m.BurnRate) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + l = len(m.BurnCoin) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + l = len(m.MarketFee) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } return n } @@ -155,6 +242,134 @@ func (m *Params) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarnRates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EarnRates = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BurnRate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BurnRate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BurnCoin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BurnCoin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MarketFee", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MarketFee = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) diff --git a/x/market/types/pool.pb.go b/x/market/types/pool.pb.go new file mode 100644 index 00000000..be579417 --- /dev/null +++ b/x/market/types/pool.pb.go @@ -0,0 +1,1066 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: market/pool.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Pool struct { + Pair string `protobuf:"bytes,1,opt,name=pair,proto3" json:"pair,omitempty"` + Denom1 string `protobuf:"bytes,2,opt,name=denom1,proto3" json:"denom1,omitempty"` + Denom2 string `protobuf:"bytes,3,opt,name=denom2,proto3" json:"denom2,omitempty"` + Volume1 *Volume `protobuf:"bytes,4,opt,name=volume1,proto3" json:"volume1,omitempty"` + Volume2 *Volume `protobuf:"bytes,5,opt,name=volume2,proto3" json:"volume2,omitempty"` + Leaders []*Leader `protobuf:"bytes,6,rep,name=leaders,proto3" json:"leaders,omitempty"` + Drops github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,7,opt,name=drops,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"drops"` + History uint64 `protobuf:"varint,8,opt,name=history,proto3" json:"history,omitempty"` +} + +func (m *Pool) Reset() { *m = Pool{} } +func (m *Pool) String() string { return proto.CompactTextString(m) } +func (*Pool) ProtoMessage() {} +func (*Pool) Descriptor() ([]byte, []int) { + return fileDescriptor_adb392bd01694df9, []int{0} +} +func (m *Pool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Pool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Pool) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pool.Merge(m, src) +} +func (m *Pool) XXX_Size() int { + return m.Size() +} +func (m *Pool) XXX_DiscardUnknown() { + xxx_messageInfo_Pool.DiscardUnknown(m) +} + +var xxx_messageInfo_Pool proto.InternalMessageInfo + +type Leader struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Drops github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=drops,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"drops"` +} + +func (m *Leader) Reset() { *m = Leader{} } +func (m *Leader) String() string { return proto.CompactTextString(m) } +func (*Leader) ProtoMessage() {} +func (*Leader) Descriptor() ([]byte, []int) { + return fileDescriptor_adb392bd01694df9, []int{1} +} +func (m *Leader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Leader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Leader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Leader) XXX_Merge(src proto.Message) { + xxx_messageInfo_Leader.Merge(m, src) +} +func (m *Leader) XXX_Size() int { + return m.Size() +} +func (m *Leader) XXX_DiscardUnknown() { + xxx_messageInfo_Leader.DiscardUnknown(m) +} + +var xxx_messageInfo_Leader proto.InternalMessageInfo + +type Volume struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + Amount github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=amount,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"amount"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_adb392bd01694df9, []int{2} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Pool)(nil), "pendulumlabs.market.market.Pool") + proto.RegisterType((*Leader)(nil), "pendulumlabs.market.market.Leader") + proto.RegisterType((*Volume)(nil), "pendulumlabs.market.market.Volume") +} + +func init() { proto.RegisterFile("market/pool.proto", fileDescriptor_adb392bd01694df9) } + +var fileDescriptor_adb392bd01694df9 = []byte{ + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0x31, 0x4f, 0xfa, 0x40, + 0x18, 0xc6, 0x7b, 0x50, 0xca, 0xff, 0x7f, 0x4e, 0x5e, 0xd0, 0x5c, 0x18, 0x8e, 0xa6, 0x83, 0x61, + 0xb1, 0x0d, 0x75, 0x70, 0x71, 0x22, 0xc6, 0xc4, 0xc4, 0xc1, 0x74, 0x70, 0x70, 0x2b, 0xf4, 0x04, + 0x42, 0xdb, 0xb7, 0xe9, 0x5d, 0x8d, 0x7c, 0x0b, 0x3f, 0x16, 0x23, 0x23, 0x71, 0x20, 0x02, 0x7e, + 0x10, 0xc3, 0xf5, 0x2a, 0x2e, 0xc6, 0xa8, 0xd3, 0xbd, 0xcf, 0xdb, 0xe7, 0xd7, 0xe7, 0xed, 0xf5, + 0xc5, 0x87, 0x49, 0x98, 0x4f, 0xb9, 0xf4, 0x32, 0x80, 0xd8, 0xcd, 0x72, 0x90, 0x40, 0xda, 0x19, + 0x4f, 0xa3, 0x22, 0x2e, 0x92, 0x38, 0x1c, 0x08, 0xb7, 0x7c, 0xae, 0x8f, 0x76, 0x6b, 0x04, 0x23, + 0x50, 0x36, 0x6f, 0x57, 0x95, 0x84, 0xf3, 0x56, 0xc3, 0xe6, 0x2d, 0x40, 0x4c, 0x08, 0x36, 0xb3, + 0x70, 0x92, 0x53, 0x64, 0xa3, 0xee, 0xff, 0x40, 0xd5, 0xe4, 0x18, 0x5b, 0x11, 0x4f, 0x21, 0xe9, + 0xd1, 0x9a, 0xea, 0x6a, 0xf5, 0xd1, 0xf7, 0x69, 0xfd, 0x53, 0xdf, 0x27, 0x17, 0xb8, 0xf9, 0x08, + 0x71, 0x91, 0xf0, 0x1e, 0x35, 0x6d, 0xd4, 0x3d, 0xf0, 0x1d, 0xf7, 0xeb, 0x81, 0xdc, 0x3b, 0x65, + 0x0d, 0x2a, 0x64, 0x4f, 0xfb, 0xb4, 0xf1, 0x53, 0x5a, 0x65, 0xc7, 0x3c, 0x8c, 0x78, 0x2e, 0xa8, + 0x65, 0xd7, 0xbf, 0xa3, 0x6f, 0x94, 0x35, 0xa8, 0x10, 0x72, 0x89, 0x1b, 0x51, 0x0e, 0x99, 0xa0, + 0xcd, 0xdd, 0x07, 0xf5, 0xdd, 0xf9, 0xaa, 0x63, 0xbc, 0xac, 0x3a, 0x27, 0xa3, 0x89, 0x1c, 0x17, + 0x03, 0x77, 0x08, 0x89, 0x37, 0x04, 0x91, 0x80, 0xd0, 0xc7, 0xa9, 0x88, 0xa6, 0x9e, 0x9c, 0x65, + 0x5c, 0xb8, 0xd7, 0xa9, 0x0c, 0x4a, 0x98, 0x50, 0xdc, 0x1c, 0x4f, 0x84, 0x84, 0x7c, 0x46, 0xff, + 0xd9, 0xa8, 0x6b, 0x06, 0x95, 0x74, 0xc6, 0xd8, 0x2a, 0x23, 0x77, 0x9e, 0x30, 0x8a, 0x72, 0x2e, + 0x84, 0xbe, 0xea, 0x4a, 0xee, 0x67, 0xa8, 0xfd, 0x61, 0x06, 0xe7, 0x01, 0x5b, 0xe5, 0xd5, 0x90, + 0x16, 0x6e, 0xa8, 0xff, 0xa2, 0x73, 0x4a, 0x41, 0xae, 0xb0, 0x15, 0x26, 0x50, 0xa4, 0xf2, 0x97, + 0x31, 0x9a, 0xee, 0x9f, 0xcf, 0xd7, 0xcc, 0x58, 0xae, 0x19, 0x9a, 0x6f, 0x18, 0x5a, 0x6c, 0x18, + 0x7a, 0xdd, 0x30, 0xf4, 0xbc, 0x65, 0xc6, 0x62, 0xcb, 0x8c, 0xe5, 0x96, 0x19, 0xf7, 0x47, 0x7a, + 0x3f, 0x9f, 0x3c, 0x5d, 0xa8, 0x97, 0x0c, 0x2c, 0xb5, 0x78, 0x67, 0xef, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x3d, 0x4f, 0xf9, 0x00, 0xbf, 0x02, 0x00, 0x00, +} + +func (m *Pool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.History != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.History)) + i-- + dAtA[i] = 0x40 + } + { + size := m.Drops.Size() + i -= size + if _, err := m.Drops.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.Leaders) > 0 { + for iNdEx := len(m.Leaders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Volume2 != nil { + { + size, err := m.Volume2.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Volume1 != nil { + { + size, err := m.Volume1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Denom2) > 0 { + i -= len(m.Denom2) + copy(dAtA[i:], m.Denom2) + i = encodeVarintPool(dAtA, i, uint64(len(m.Denom2))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom1) > 0 { + i -= len(m.Denom1) + copy(dAtA[i:], m.Denom1) + i = encodeVarintPool(dAtA, i, uint64(len(m.Denom1))) + i-- + dAtA[i] = 0x12 + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintPool(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Leader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Leader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Leader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Drops.Size() + i -= size + if _, err := m.Drops.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintPool(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Amount.Size() + i -= size + if _, err := m.Amount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintPool(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintPool(dAtA []byte, offset int, v uint64) int { + offset -= sovPool(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Pool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Denom1) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Denom2) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + if m.Volume1 != nil { + l = m.Volume1.Size() + n += 1 + l + sovPool(uint64(l)) + } + if m.Volume2 != nil { + l = m.Volume2.Size() + n += 1 + l + sovPool(uint64(l)) + } + if len(m.Leaders) > 0 { + for _, e := range m.Leaders { + l = e.Size() + n += 1 + l + sovPool(uint64(l)) + } + } + l = m.Drops.Size() + n += 1 + l + sovPool(uint64(l)) + if m.History != 0 { + n += 1 + sovPool(uint64(m.History)) + } + return n +} + +func (m *Leader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = m.Drops.Size() + n += 1 + l + sovPool(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = m.Amount.Size() + n += 1 + l + sovPool(uint64(l)) + return n +} + +func sovPool(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPool(x uint64) (n int) { + return sovPool(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Pool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom1", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom1 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom2", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom2 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume1", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Volume1 == nil { + m.Volume1 = &Volume{} + } + if err := m.Volume1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Volume2 == nil { + m.Volume2 = &Volume{} + } + if err := m.Volume2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leaders = append(m.Leaders, &Leader{}) + if err := m.Leaders[len(m.Leaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Drops.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field History", wireType) + } + m.History = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.History |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Leader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Leader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Leader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Drops.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPool(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPool + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPool + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPool + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPool + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPool + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPool + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPool = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPool = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPool = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/market/types/query.pb.go b/x/market/types/query.pb.go index d0126a39..516c67de 100644 --- a/x/market/types/query.pb.go +++ b/x/market/types/query.pb.go @@ -6,7 +6,7 @@ package types import ( context "context" fmt "fmt" - _ "github.com/cosmos/cosmos-sdk/types/query" + query "github.com/cosmos/cosmos-sdk/types/query" _ "github.com/gogo/protobuf/gogoproto" grpc1 "github.com/gogo/protobuf/grpc" proto "github.com/gogo/protobuf/proto" @@ -113,211 +113,12410 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } -func init() { - proto.RegisterType((*QueryParamsRequest)(nil), "market.market.QueryParamsRequest") - proto.RegisterType((*QueryParamsResponse)(nil), "market.market.QueryParamsResponse") +type QueryGetPoolRequest struct { + Pair string `protobuf:"bytes,1,opt,name=pair,proto3" json:"pair,omitempty"` } -func init() { proto.RegisterFile("market/query.proto", fileDescriptor_dc4839d34748ebb1) } +func (m *QueryGetPoolRequest) Reset() { *m = QueryGetPoolRequest{} } +func (m *QueryGetPoolRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGetPoolRequest) ProtoMessage() {} +func (*QueryGetPoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{2} +} +func (m *QueryGetPoolRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetPoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetPoolRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetPoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetPoolRequest.Merge(m, src) +} +func (m *QueryGetPoolRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryGetPoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetPoolRequest.DiscardUnknown(m) +} -var fileDescriptor_dc4839d34748ebb1 = []byte{ - // 275 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, - 0x18, 0x84, 0x13, 0x04, 0x19, 0x8c, 0x58, 0xdc, 0x46, 0xa0, 0x08, 0x0c, 0x64, 0x42, 0x0c, 0xb1, - 0xda, 0xbe, 0x41, 0x47, 0x26, 0xe8, 0xc8, 0xe6, 0xa0, 0x5f, 0x56, 0x04, 0xf1, 0xef, 0xc6, 0x2e, - 0xd0, 0x95, 0x27, 0x40, 0xe2, 0xa5, 0x3a, 0x56, 0x62, 0x61, 0x42, 0x28, 0xe1, 0x41, 0x50, 0x6d, - 0x33, 0x04, 0xa4, 0x4e, 0x89, 0xee, 0xff, 0xee, 0x74, 0x67, 0x42, 0x6b, 0xd1, 0xdc, 0x83, 0xe5, - 0xf3, 0x05, 0x34, 0xcb, 0x42, 0x37, 0x68, 0x91, 0x1e, 0x78, 0xad, 0xf0, 0x9f, 0x6c, 0x28, 0x51, - 0xa2, 0xbb, 0xf0, 0xcd, 0x9f, 0x87, 0xb2, 0x63, 0x89, 0x28, 0x1f, 0x80, 0x0b, 0x5d, 0x71, 0xa1, - 0x14, 0x5a, 0x61, 0x2b, 0x54, 0x26, 0x5c, 0x2f, 0xef, 0xd0, 0xd4, 0x68, 0x78, 0x29, 0x0c, 0xf8, - 0x6c, 0xfe, 0x38, 0x2a, 0xc1, 0x8a, 0x11, 0xd7, 0x42, 0x56, 0xca, 0xc1, 0x81, 0x1d, 0x84, 0x0a, - 0x5a, 0x34, 0xa2, 0x0e, 0x01, 0xf9, 0x90, 0xd0, 0x9b, 0x8d, 0xed, 0xda, 0x89, 0x33, 0x98, 0x2f, - 0xc0, 0xd8, 0xfc, 0x8a, 0x0c, 0x7a, 0xaa, 0xd1, 0xa8, 0x0c, 0xd0, 0x09, 0x49, 0xbc, 0xf9, 0x28, - 0x3e, 0x8b, 0x2f, 0xf6, 0xc7, 0x69, 0xd1, 0x5b, 0x50, 0x78, 0x7c, 0xba, 0xbb, 0xfa, 0x3c, 0x8d, - 0x66, 0x01, 0x1d, 0x3f, 0x91, 0x3d, 0x97, 0x45, 0x15, 0x49, 0x3c, 0x40, 0xcf, 0xff, 0xf8, 0xfe, - 0x37, 0xc8, 0xf2, 0x6d, 0x88, 0xaf, 0x93, 0x9f, 0xbc, 0xbc, 0x7f, 0xbf, 0xed, 0x1c, 0xd2, 0x94, - 0x87, 0x65, 0xbd, 0x81, 0x53, 0xbe, 0x6a, 0x59, 0xbc, 0x6e, 0x59, 0xfc, 0xd5, 0xb2, 0xf8, 0xb5, - 0x63, 0xd1, 0xba, 0x63, 0xd1, 0x47, 0xc7, 0xa2, 0xdb, 0x34, 0x80, 0xcf, 0xbf, 0x0e, 0xbb, 0xd4, - 0x60, 0xca, 0xc4, 0x3d, 0xc9, 0xe4, 0x27, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xf1, 0x2c, 0xb9, 0xac, - 0x01, 0x00, 0x00, +var xxx_messageInfo_QueryGetPoolRequest proto.InternalMessageInfo + +func (m *QueryGetPoolRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn +type QueryGetPoolResponse struct { + Pool Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool"` +} -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (m *QueryGetPoolResponse) Reset() { *m = QueryGetPoolResponse{} } +func (m *QueryGetPoolResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGetPoolResponse) ProtoMessage() {} +func (*QueryGetPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{3} +} +func (m *QueryGetPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetPoolResponse.Merge(m, src) +} +func (m *QueryGetPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryGetPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetPoolResponse.DiscardUnknown(m) +} -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Parameters queries the parameters of the module. - Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +var xxx_messageInfo_QueryGetPoolResponse proto.InternalMessageInfo + +func (m *QueryGetPoolResponse) GetPool() Pool { + if m != nil { + return m.Pool + } + return Pool{} } -type queryClient struct { - cc grpc1.ClientConn +type QueryAllPoolRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` } -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} +func (m *QueryAllPoolRequest) Reset() { *m = QueryAllPoolRequest{} } +func (m *QueryAllPoolRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllPoolRequest) ProtoMessage() {} +func (*QueryAllPoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{4} +} +func (m *QueryAllPoolRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllPoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllPoolRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllPoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllPoolRequest.Merge(m, src) +} +func (m *QueryAllPoolRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllPoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllPoolRequest.DiscardUnknown(m) } -func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { - out := new(QueryParamsResponse) - err := c.cc.Invoke(ctx, "/market.market.Query/Params", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_QueryAllPoolRequest proto.InternalMessageInfo + +func (m *QueryAllPoolRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination } - return out, nil + return nil } -// QueryServer is the server API for Query service. -type QueryServer interface { - // Parameters queries the parameters of the module. - Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +type QueryAllPoolResponse struct { + Pool []Pool `protobuf:"bytes,1,rep,name=pool,proto3" json:"pool"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { +func (m *QueryAllPoolResponse) Reset() { *m = QueryAllPoolResponse{} } +func (m *QueryAllPoolResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllPoolResponse) ProtoMessage() {} +func (*QueryAllPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{5} +} +func (m *QueryAllPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllPoolResponse.Merge(m, src) +} +func (m *QueryAllPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllPoolResponse.DiscardUnknown(m) } -func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +var xxx_messageInfo_QueryAllPoolResponse proto.InternalMessageInfo + +func (m *QueryAllPoolResponse) GetPool() []Pool { + if m != nil { + return m.Pool + } + return nil } -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) +func (m *QueryAllPoolResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil } -func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryParamsRequest) - if err := dec(in); err != nil { - return nil, err +type QueryVolumeRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` +} + +func (m *QueryVolumeRequest) Reset() { *m = QueryVolumeRequest{} } +func (m *QueryVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*QueryVolumeRequest) ProtoMessage() {} +func (*QueryVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{6} +} +func (m *QueryVolumeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryVolumeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - if interceptor == nil { - return srv.(QueryServer).Params(ctx, in) +} +func (m *QueryVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryVolumeRequest.Merge(m, src) +} +func (m *QueryVolumeRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryVolumeRequest proto.InternalMessageInfo + +func (m *QueryVolumeRequest) GetDenom() string { + if m != nil { + return m.Denom } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/market.market.Query/Params", + return "" +} + +type QueryVolumeResponse struct { + Amount string `protobuf:"bytes,1,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *QueryVolumeResponse) Reset() { *m = QueryVolumeResponse{} } +func (m *QueryVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*QueryVolumeResponse) ProtoMessage() {} +func (*QueryVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{7} +} +func (m *QueryVolumeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryVolumeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) +} +func (m *QueryVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryVolumeResponse.Merge(m, src) +} +func (m *QueryVolumeResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryVolumeResponse proto.InternalMessageInfo + +func (m *QueryVolumeResponse) GetAmount() string { + if m != nil { + return m.Amount } - return interceptor(ctx, in, info, handler) + return "" } -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "market.market.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Params", - Handler: _Query_Params_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "market/query.proto", +type QueryAllVolumeRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` } -func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *QueryAllVolumeRequest) Reset() { *m = QueryAllVolumeRequest{} } +func (m *QueryAllVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllVolumeRequest) ProtoMessage() {} +func (*QueryAllVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{8} +} +func (m *QueryAllVolumeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllVolumeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil +} +func (m *QueryAllVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllVolumeRequest.Merge(m, src) +} +func (m *QueryAllVolumeRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllVolumeRequest.DiscardUnknown(m) } -func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_QueryAllVolumeRequest proto.InternalMessageInfo + +func (m *QueryAllVolumeRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil } -func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type QueryAllVolumeResponse struct { + Volumes []Volume `protobuf:"bytes,1,rep,name=volumes,proto3" json:"volumes"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } -func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *QueryAllVolumeResponse) Reset() { *m = QueryAllVolumeResponse{} } +func (m *QueryAllVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllVolumeResponse) ProtoMessage() {} +func (*QueryAllVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{9} +} +func (m *QueryAllVolumeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllVolumeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil +} +func (m *QueryAllVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllVolumeResponse.Merge(m, src) +} +func (m *QueryAllVolumeResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllVolumeResponse.DiscardUnknown(m) } -func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_QueryAllVolumeResponse proto.InternalMessageInfo + +func (m *QueryAllVolumeResponse) GetVolumes() []Volume { + if m != nil { + return m.Volumes + } + return nil } -func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func (m *QueryAllVolumeResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryBurnedRequest struct { +} + +func (m *QueryBurnedRequest) Reset() { *m = QueryBurnedRequest{} } +func (m *QueryBurnedRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBurnedRequest) ProtoMessage() {} +func (*QueryBurnedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{10} +} +func (m *QueryBurnedRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBurnedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBurnedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBurnedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBurnedRequest.Merge(m, src) +} +func (m *QueryBurnedRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBurnedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBurnedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBurnedRequest proto.InternalMessageInfo + +type QueryBurnedResponse struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + Amount string `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *QueryBurnedResponse) Reset() { *m = QueryBurnedResponse{} } +func (m *QueryBurnedResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBurnedResponse) ProtoMessage() {} +func (*QueryBurnedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{11} +} +func (m *QueryBurnedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBurnedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBurnedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBurnedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBurnedResponse.Merge(m, src) +} +func (m *QueryBurnedResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBurnedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBurnedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBurnedResponse proto.InternalMessageInfo + +func (m *QueryBurnedResponse) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryBurnedResponse) GetAmount() string { + if m != nil { + return m.Amount + } + return "" +} + +type QueryDropRequest struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (m *QueryDropRequest) Reset() { *m = QueryDropRequest{} } +func (m *QueryDropRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropRequest) ProtoMessage() {} +func (*QueryDropRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{12} +} +func (m *QueryDropRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropRequest.Merge(m, src) +} +func (m *QueryDropRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropRequest proto.InternalMessageInfo + +func (m *QueryDropRequest) GetUid() uint64 { + if m != nil { + return m.Uid + } + return 0 +} + +type QueryDropCoinRequest struct { + DenomA string `protobuf:"bytes,1,opt,name=denomA,proto3" json:"denomA,omitempty"` + DenomB string `protobuf:"bytes,2,opt,name=denomB,proto3" json:"denomB,omitempty"` + AmountA string `protobuf:"bytes,3,opt,name=amountA,proto3" json:"amountA,omitempty"` +} + +func (m *QueryDropCoinRequest) Reset() { *m = QueryDropCoinRequest{} } +func (m *QueryDropCoinRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropCoinRequest) ProtoMessage() {} +func (*QueryDropCoinRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{13} +} +func (m *QueryDropCoinRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropCoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropCoinRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropCoinRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropCoinRequest.Merge(m, src) +} +func (m *QueryDropCoinRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropCoinRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropCoinRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropCoinRequest proto.InternalMessageInfo + +func (m *QueryDropCoinRequest) GetDenomA() string { + if m != nil { + return m.DenomA + } + return "" +} + +func (m *QueryDropCoinRequest) GetDenomB() string { + if m != nil { + return m.DenomB + } + return "" +} + +func (m *QueryDropCoinRequest) GetAmountA() string { + if m != nil { + return m.AmountA + } + return "" +} + +type QueryDropCoinResponse struct { + Drops string `protobuf:"bytes,1,opt,name=drops,proto3" json:"drops,omitempty"` + AmountB string `protobuf:"bytes,2,opt,name=amountB,proto3" json:"amountB,omitempty"` +} + +func (m *QueryDropCoinResponse) Reset() { *m = QueryDropCoinResponse{} } +func (m *QueryDropCoinResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDropCoinResponse) ProtoMessage() {} +func (*QueryDropCoinResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{14} +} +func (m *QueryDropCoinResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropCoinResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropCoinResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropCoinResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropCoinResponse.Merge(m, src) +} +func (m *QueryDropCoinResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDropCoinResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropCoinResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropCoinResponse proto.InternalMessageInfo + +func (m *QueryDropCoinResponse) GetDrops() string { + if m != nil { + return m.Drops + } + return "" +} + +func (m *QueryDropCoinResponse) GetAmountB() string { + if m != nil { + return m.AmountB + } + return "" +} + +type QueryDropResponse struct { + Drop Drop `protobuf:"bytes,1,opt,name=drop,proto3" json:"drop"` +} + +func (m *QueryDropResponse) Reset() { *m = QueryDropResponse{} } +func (m *QueryDropResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDropResponse) ProtoMessage() {} +func (*QueryDropResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{15} +} +func (m *QueryDropResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropResponse.Merge(m, src) +} +func (m *QueryDropResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDropResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropResponse proto.InternalMessageInfo + +func (m *QueryDropResponse) GetDrop() Drop { + if m != nil { + return m.Drop + } + return Drop{} +} + +type QueryDropAmountsRequest struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (m *QueryDropAmountsRequest) Reset() { *m = QueryDropAmountsRequest{} } +func (m *QueryDropAmountsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropAmountsRequest) ProtoMessage() {} +func (*QueryDropAmountsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{16} +} +func (m *QueryDropAmountsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropAmountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropAmountsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropAmountsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropAmountsRequest.Merge(m, src) +} +func (m *QueryDropAmountsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropAmountsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropAmountsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropAmountsRequest proto.InternalMessageInfo + +func (m *QueryDropAmountsRequest) GetUid() uint64 { + if m != nil { + return m.Uid + } + return 0 +} + +type QueryDropAmountsResponse struct { + Denom1 string `protobuf:"bytes,1,opt,name=denom1,proto3" json:"denom1,omitempty"` + Denom2 string `protobuf:"bytes,2,opt,name=denom2,proto3" json:"denom2,omitempty"` + Amount1 string `protobuf:"bytes,3,opt,name=amount1,proto3" json:"amount1,omitempty"` + Amount2 string `protobuf:"bytes,4,opt,name=amount2,proto3" json:"amount2,omitempty"` +} + +func (m *QueryDropAmountsResponse) Reset() { *m = QueryDropAmountsResponse{} } +func (m *QueryDropAmountsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDropAmountsResponse) ProtoMessage() {} +func (*QueryDropAmountsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{17} +} +func (m *QueryDropAmountsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropAmountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropAmountsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropAmountsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropAmountsResponse.Merge(m, src) +} +func (m *QueryDropAmountsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDropAmountsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropAmountsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropAmountsResponse proto.InternalMessageInfo + +func (m *QueryDropAmountsResponse) GetDenom1() string { + if m != nil { + return m.Denom1 + } + return "" +} + +func (m *QueryDropAmountsResponse) GetDenom2() string { + if m != nil { + return m.Denom2 + } + return "" +} + +func (m *QueryDropAmountsResponse) GetAmount1() string { + if m != nil { + return m.Amount1 + } + return "" +} + +func (m *QueryDropAmountsResponse) GetAmount2() string { + if m != nil { + return m.Amount2 + } + return "" +} + +type QueryDropsToCoinsRequest struct { + Pair string `protobuf:"bytes,1,opt,name=pair,proto3" json:"pair,omitempty"` + Drops string `protobuf:"bytes,2,opt,name=drops,proto3" json:"drops,omitempty"` +} + +func (m *QueryDropsToCoinsRequest) Reset() { *m = QueryDropsToCoinsRequest{} } +func (m *QueryDropsToCoinsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropsToCoinsRequest) ProtoMessage() {} +func (*QueryDropsToCoinsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{18} +} +func (m *QueryDropsToCoinsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropsToCoinsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropsToCoinsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropsToCoinsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropsToCoinsRequest.Merge(m, src) +} +func (m *QueryDropsToCoinsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropsToCoinsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropsToCoinsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropsToCoinsRequest proto.InternalMessageInfo + +func (m *QueryDropsToCoinsRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *QueryDropsToCoinsRequest) GetDrops() string { + if m != nil { + return m.Drops + } + return "" +} + +type QueryDropPairsRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryDropPairsRequest) Reset() { *m = QueryDropPairsRequest{} } +func (m *QueryDropPairsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropPairsRequest) ProtoMessage() {} +func (*QueryDropPairsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{19} +} +func (m *QueryDropPairsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropPairsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropPairsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropPairsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropPairsRequest.Merge(m, src) +} +func (m *QueryDropPairsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropPairsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropPairsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropPairsRequest proto.InternalMessageInfo + +func (m *QueryDropPairsRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +type QueryDropPairsResponse struct { + Pairs []string `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs,omitempty"` +} + +func (m *QueryDropPairsResponse) Reset() { *m = QueryDropPairsResponse{} } +func (m *QueryDropPairsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDropPairsResponse) ProtoMessage() {} +func (*QueryDropPairsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{20} +} +func (m *QueryDropPairsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropPairsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropPairsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropPairsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropPairsResponse.Merge(m, src) +} +func (m *QueryDropPairsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDropPairsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropPairsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropPairsResponse proto.InternalMessageInfo + +func (m *QueryDropPairsResponse) GetPairs() []string { + if m != nil { + return m.Pairs + } + return nil +} + +type QueryDropOwnerPairRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Pair string `protobuf:"bytes,2,opt,name=pair,proto3" json:"pair,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDropOwnerPairRequest) Reset() { *m = QueryDropOwnerPairRequest{} } +func (m *QueryDropOwnerPairRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropOwnerPairRequest) ProtoMessage() {} +func (*QueryDropOwnerPairRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{21} +} +func (m *QueryDropOwnerPairRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropOwnerPairRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropOwnerPairRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropOwnerPairRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropOwnerPairRequest.Merge(m, src) +} +func (m *QueryDropOwnerPairRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropOwnerPairRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropOwnerPairRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropOwnerPairRequest proto.InternalMessageInfo + +func (m *QueryDropOwnerPairRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *QueryDropOwnerPairRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *QueryDropOwnerPairRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryDropOwnerPairSumRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Pair string `protobuf:"bytes,2,opt,name=pair,proto3" json:"pair,omitempty"` +} + +func (m *QueryDropOwnerPairSumRequest) Reset() { *m = QueryDropOwnerPairSumRequest{} } +func (m *QueryDropOwnerPairSumRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropOwnerPairSumRequest) ProtoMessage() {} +func (*QueryDropOwnerPairSumRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{22} +} +func (m *QueryDropOwnerPairSumRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropOwnerPairSumRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropOwnerPairSumRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropOwnerPairSumRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropOwnerPairSumRequest.Merge(m, src) +} +func (m *QueryDropOwnerPairSumRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropOwnerPairSumRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropOwnerPairSumRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropOwnerPairSumRequest proto.InternalMessageInfo + +func (m *QueryDropOwnerPairSumRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *QueryDropOwnerPairSumRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +type QueryDropOwnerPairSumResponse struct { + Sum string `protobuf:"bytes,1,opt,name=sum,proto3" json:"sum,omitempty"` +} + +func (m *QueryDropOwnerPairSumResponse) Reset() { *m = QueryDropOwnerPairSumResponse{} } +func (m *QueryDropOwnerPairSumResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDropOwnerPairSumResponse) ProtoMessage() {} +func (*QueryDropOwnerPairSumResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{23} +} +func (m *QueryDropOwnerPairSumResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropOwnerPairSumResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropOwnerPairSumResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropOwnerPairSumResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropOwnerPairSumResponse.Merge(m, src) +} +func (m *QueryDropOwnerPairSumResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDropOwnerPairSumResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropOwnerPairSumResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropOwnerPairSumResponse proto.InternalMessageInfo + +func (m *QueryDropOwnerPairSumResponse) GetSum() string { + if m != nil { + return m.Sum + } + return "" +} + +type QueryDropOwnerPairUidsRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Pair string `protobuf:"bytes,2,opt,name=pair,proto3" json:"pair,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDropOwnerPairUidsRequest) Reset() { *m = QueryDropOwnerPairUidsRequest{} } +func (m *QueryDropOwnerPairUidsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropOwnerPairUidsRequest) ProtoMessage() {} +func (*QueryDropOwnerPairUidsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{24} +} +func (m *QueryDropOwnerPairUidsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropOwnerPairUidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropOwnerPairUidsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropOwnerPairUidsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropOwnerPairUidsRequest.Merge(m, src) +} +func (m *QueryDropOwnerPairUidsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropOwnerPairUidsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropOwnerPairUidsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropOwnerPairUidsRequest proto.InternalMessageInfo + +func (m *QueryDropOwnerPairUidsRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *QueryDropOwnerPairUidsRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *QueryDropOwnerPairUidsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryUidsResponse struct { + Uids []uint64 `protobuf:"varint,1,rep,packed,name=uids,proto3" json:"uids,omitempty"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryUidsResponse) Reset() { *m = QueryUidsResponse{} } +func (m *QueryUidsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryUidsResponse) ProtoMessage() {} +func (*QueryUidsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{25} +} +func (m *QueryUidsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryUidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryUidsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryUidsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryUidsResponse.Merge(m, src) +} +func (m *QueryUidsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryUidsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryUidsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryUidsResponse proto.InternalMessageInfo + +func (m *QueryUidsResponse) GetUids() []uint64 { + if m != nil { + return m.Uids + } + return nil +} + +func (m *QueryUidsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryDropOwnerPairDetailRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Pair string `protobuf:"bytes,2,opt,name=pair,proto3" json:"pair,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDropOwnerPairDetailRequest) Reset() { *m = QueryDropOwnerPairDetailRequest{} } +func (m *QueryDropOwnerPairDetailRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDropOwnerPairDetailRequest) ProtoMessage() {} +func (*QueryDropOwnerPairDetailRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{26} +} +func (m *QueryDropOwnerPairDetailRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropOwnerPairDetailRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropOwnerPairDetailRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropOwnerPairDetailRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropOwnerPairDetailRequest.Merge(m, src) +} +func (m *QueryDropOwnerPairDetailRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDropOwnerPairDetailRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropOwnerPairDetailRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropOwnerPairDetailRequest proto.InternalMessageInfo + +func (m *QueryDropOwnerPairDetailRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *QueryDropOwnerPairDetailRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *QueryDropOwnerPairDetailRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryAllDropRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllDropRequest) Reset() { *m = QueryAllDropRequest{} } +func (m *QueryAllDropRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllDropRequest) ProtoMessage() {} +func (*QueryAllDropRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{27} +} +func (m *QueryAllDropRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllDropRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllDropRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllDropRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllDropRequest.Merge(m, src) +} +func (m *QueryAllDropRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllDropRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllDropRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllDropRequest proto.InternalMessageInfo + +func (m *QueryAllDropRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryDropsResponse struct { + Drops []Drop `protobuf:"bytes,1,rep,name=drops,proto3" json:"drops"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDropsResponse) Reset() { *m = QueryDropsResponse{} } +func (m *QueryDropsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDropsResponse) ProtoMessage() {} +func (*QueryDropsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{28} +} +func (m *QueryDropsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDropsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDropsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDropsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDropsResponse.Merge(m, src) +} +func (m *QueryDropsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDropsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDropsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDropsResponse proto.InternalMessageInfo + +func (m *QueryDropsResponse) GetDrops() []Drop { + if m != nil { + return m.Drops + } + return nil +} + +func (m *QueryDropsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryGetMemberRequest struct { + DenomA string `protobuf:"bytes,2,opt,name=denomA,proto3" json:"denomA,omitempty"` + DenomB string `protobuf:"bytes,3,opt,name=denomB,proto3" json:"denomB,omitempty"` +} + +func (m *QueryGetMemberRequest) Reset() { *m = QueryGetMemberRequest{} } +func (m *QueryGetMemberRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGetMemberRequest) ProtoMessage() {} +func (*QueryGetMemberRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{29} +} +func (m *QueryGetMemberRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetMemberRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetMemberRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetMemberRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetMemberRequest.Merge(m, src) +} +func (m *QueryGetMemberRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryGetMemberRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetMemberRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGetMemberRequest proto.InternalMessageInfo + +func (m *QueryGetMemberRequest) GetDenomA() string { + if m != nil { + return m.DenomA + } + return "" +} + +func (m *QueryGetMemberRequest) GetDenomB() string { + if m != nil { + return m.DenomB + } + return "" +} + +type QueryGetMemberResponse struct { + Member Member `protobuf:"bytes,1,opt,name=member,proto3" json:"member"` +} + +func (m *QueryGetMemberResponse) Reset() { *m = QueryGetMemberResponse{} } +func (m *QueryGetMemberResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGetMemberResponse) ProtoMessage() {} +func (*QueryGetMemberResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{30} +} +func (m *QueryGetMemberResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetMemberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetMemberResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetMemberResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetMemberResponse.Merge(m, src) +} +func (m *QueryGetMemberResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryGetMemberResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetMemberResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGetMemberResponse proto.InternalMessageInfo + +func (m *QueryGetMemberResponse) GetMember() Member { + if m != nil { + return m.Member + } + return Member{} +} + +type QueryAllMemberRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllMemberRequest) Reset() { *m = QueryAllMemberRequest{} } +func (m *QueryAllMemberRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllMemberRequest) ProtoMessage() {} +func (*QueryAllMemberRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{31} +} +func (m *QueryAllMemberRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllMemberRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllMemberRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllMemberRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllMemberRequest.Merge(m, src) +} +func (m *QueryAllMemberRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllMemberRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllMemberRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllMemberRequest proto.InternalMessageInfo + +func (m *QueryAllMemberRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryAllMemberResponse struct { + Member []Member `protobuf:"bytes,1,rep,name=member,proto3" json:"member"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllMemberResponse) Reset() { *m = QueryAllMemberResponse{} } +func (m *QueryAllMemberResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllMemberResponse) ProtoMessage() {} +func (*QueryAllMemberResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{32} +} +func (m *QueryAllMemberResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllMemberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllMemberResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllMemberResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllMemberResponse.Merge(m, src) +} +func (m *QueryAllMemberResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllMemberResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllMemberResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllMemberResponse proto.InternalMessageInfo + +func (m *QueryAllMemberResponse) GetMember() []Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *QueryAllMemberResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryGetBurningsRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` +} + +func (m *QueryGetBurningsRequest) Reset() { *m = QueryGetBurningsRequest{} } +func (m *QueryGetBurningsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGetBurningsRequest) ProtoMessage() {} +func (*QueryGetBurningsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{33} +} +func (m *QueryGetBurningsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetBurningsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetBurningsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetBurningsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetBurningsRequest.Merge(m, src) +} +func (m *QueryGetBurningsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryGetBurningsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetBurningsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGetBurningsRequest proto.InternalMessageInfo + +func (m *QueryGetBurningsRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +type QueryGetBurningsResponse struct { + Burnings Burnings `protobuf:"bytes,1,opt,name=burnings,proto3" json:"burnings"` +} + +func (m *QueryGetBurningsResponse) Reset() { *m = QueryGetBurningsResponse{} } +func (m *QueryGetBurningsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGetBurningsResponse) ProtoMessage() {} +func (*QueryGetBurningsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{34} +} +func (m *QueryGetBurningsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetBurningsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetBurningsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetBurningsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetBurningsResponse.Merge(m, src) +} +func (m *QueryGetBurningsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryGetBurningsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetBurningsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGetBurningsResponse proto.InternalMessageInfo + +func (m *QueryGetBurningsResponse) GetBurnings() Burnings { + if m != nil { + return m.Burnings + } + return Burnings{} +} + +type QueryAllBurningsRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllBurningsRequest) Reset() { *m = QueryAllBurningsRequest{} } +func (m *QueryAllBurningsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllBurningsRequest) ProtoMessage() {} +func (*QueryAllBurningsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{35} +} +func (m *QueryAllBurningsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllBurningsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllBurningsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllBurningsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllBurningsRequest.Merge(m, src) +} +func (m *QueryAllBurningsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllBurningsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllBurningsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllBurningsRequest proto.InternalMessageInfo + +func (m *QueryAllBurningsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryAllBurningsResponse struct { + Burnings []Burnings `protobuf:"bytes,1,rep,name=burnings,proto3" json:"burnings"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllBurningsResponse) Reset() { *m = QueryAllBurningsResponse{} } +func (m *QueryAllBurningsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllBurningsResponse) ProtoMessage() {} +func (*QueryAllBurningsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{36} +} +func (m *QueryAllBurningsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllBurningsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllBurningsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllBurningsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllBurningsResponse.Merge(m, src) +} +func (m *QueryAllBurningsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllBurningsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllBurningsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllBurningsResponse proto.InternalMessageInfo + +func (m *QueryAllBurningsResponse) GetBurnings() []Burnings { + if m != nil { + return m.Burnings + } + return nil +} + +func (m *QueryAllBurningsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryOrderRequest struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } +func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrderRequest) ProtoMessage() {} +func (*QueryOrderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{37} +} +func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderRequest.Merge(m, src) +} +func (m *QueryOrderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo + +func (m *QueryOrderRequest) GetUid() uint64 { + if m != nil { + return m.Uid + } + return 0 +} + +type QueryOrderResponse struct { + Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` +} + +func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } +func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrderResponse) ProtoMessage() {} +func (*QueryOrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{38} +} +func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderResponse.Merge(m, src) +} +func (m *QueryOrderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo + +func (m *QueryOrderResponse) GetOrder() Order { + if m != nil { + return m.Order + } + return Order{} +} + +type QueryOrdersResponse struct { + Orders []Order `protobuf:"bytes,1,rep,name=orders,proto3" json:"orders"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } +func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrdersResponse) ProtoMessage() {} +func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{39} +} +func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrdersResponse.Merge(m, src) +} +func (m *QueryOrdersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrdersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo + +func (m *QueryOrdersResponse) GetOrders() []Order { + if m != nil { + return m.Orders + } + return nil +} + +func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryAllOrderRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllOrderRequest) Reset() { *m = QueryAllOrderRequest{} } +func (m *QueryAllOrderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllOrderRequest) ProtoMessage() {} +func (*QueryAllOrderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{40} +} +func (m *QueryAllOrderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllOrderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllOrderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllOrderRequest.Merge(m, src) +} +func (m *QueryAllOrderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllOrderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllOrderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllOrderRequest proto.InternalMessageInfo + +func (m *QueryAllOrderRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryOrderOwnerRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrderOwnerRequest) Reset() { *m = QueryOrderOwnerRequest{} } +func (m *QueryOrderOwnerRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrderOwnerRequest) ProtoMessage() {} +func (*QueryOrderOwnerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{41} +} +func (m *QueryOrderOwnerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderOwnerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderOwnerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderOwnerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderOwnerRequest.Merge(m, src) +} +func (m *QueryOrderOwnerRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderOwnerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderOwnerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderOwnerRequest proto.InternalMessageInfo + +func (m *QueryOrderOwnerRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *QueryOrderOwnerRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryOrderOwnerUidsResponse struct { + Orders Orders `protobuf:"bytes,1,opt,name=orders,proto3" json:"orders"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrderOwnerUidsResponse) Reset() { *m = QueryOrderOwnerUidsResponse{} } +func (m *QueryOrderOwnerUidsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrderOwnerUidsResponse) ProtoMessage() {} +func (*QueryOrderOwnerUidsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{42} +} +func (m *QueryOrderOwnerUidsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderOwnerUidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderOwnerUidsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderOwnerUidsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderOwnerUidsResponse.Merge(m, src) +} +func (m *QueryOrderOwnerUidsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderOwnerUidsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderOwnerUidsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderOwnerUidsResponse proto.InternalMessageInfo + +func (m *QueryOrderOwnerUidsResponse) GetOrders() Orders { + if m != nil { + return m.Orders + } + return Orders{} +} + +func (m *QueryOrderOwnerUidsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryOrderOwnerPairRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Pair string `protobuf:"bytes,2,opt,name=pair,proto3" json:"pair,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrderOwnerPairRequest) Reset() { *m = QueryOrderOwnerPairRequest{} } +func (m *QueryOrderOwnerPairRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrderOwnerPairRequest) ProtoMessage() {} +func (*QueryOrderOwnerPairRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{43} +} +func (m *QueryOrderOwnerPairRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderOwnerPairRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderOwnerPairRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderOwnerPairRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderOwnerPairRequest.Merge(m, src) +} +func (m *QueryOrderOwnerPairRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderOwnerPairRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderOwnerPairRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderOwnerPairRequest proto.InternalMessageInfo + +func (m *QueryOrderOwnerPairRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *QueryOrderOwnerPairRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *QueryOrderOwnerPairRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryOrderOwnerPairResponse struct { + Order []Order `protobuf:"bytes,1,rep,name=order,proto3" json:"order"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrderOwnerPairResponse) Reset() { *m = QueryOrderOwnerPairResponse{} } +func (m *QueryOrderOwnerPairResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrderOwnerPairResponse) ProtoMessage() {} +func (*QueryOrderOwnerPairResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{44} +} +func (m *QueryOrderOwnerPairResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderOwnerPairResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderOwnerPairResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderOwnerPairResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderOwnerPairResponse.Merge(m, src) +} +func (m *QueryOrderOwnerPairResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderOwnerPairResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderOwnerPairResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderOwnerPairResponse proto.InternalMessageInfo + +func (m *QueryOrderOwnerPairResponse) GetOrder() []Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *QueryOrderOwnerPairResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryBookRequest struct { + DenomA string `protobuf:"bytes,1,opt,name=denomA,proto3" json:"denomA,omitempty"` + DenomB string `protobuf:"bytes,2,opt,name=denomB,proto3" json:"denomB,omitempty"` + OrderType string `protobuf:"bytes,3,opt,name=orderType,proto3" json:"orderType,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,4,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBookRequest) Reset() { *m = QueryBookRequest{} } +func (m *QueryBookRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBookRequest) ProtoMessage() {} +func (*QueryBookRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{45} +} +func (m *QueryBookRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBookRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBookRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBookRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBookRequest.Merge(m, src) +} +func (m *QueryBookRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBookRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBookRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBookRequest proto.InternalMessageInfo + +func (m *QueryBookRequest) GetDenomA() string { + if m != nil { + return m.DenomA + } + return "" +} + +func (m *QueryBookRequest) GetDenomB() string { + if m != nil { + return m.DenomB + } + return "" +} + +func (m *QueryBookRequest) GetOrderType() string { + if m != nil { + return m.OrderType + } + return "" +} + +func (m *QueryBookRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryBookResponse struct { + Book []OrderResponse `protobuf:"bytes,1,rep,name=book,proto3" json:"book"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBookResponse) Reset() { *m = QueryBookResponse{} } +func (m *QueryBookResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBookResponse) ProtoMessage() {} +func (*QueryBookResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{46} +} +func (m *QueryBookResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBookResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBookResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBookResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBookResponse.Merge(m, src) +} +func (m *QueryBookResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBookResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBookResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBookResponse proto.InternalMessageInfo + +func (m *QueryBookResponse) GetBook() []OrderResponse { + if m != nil { + return m.Book + } + return nil +} + +func (m *QueryBookResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryBookendsRequest struct { + CoinA string `protobuf:"bytes,1,opt,name=coinA,proto3" json:"coinA,omitempty"` + CoinB string `protobuf:"bytes,2,opt,name=coinB,proto3" json:"coinB,omitempty"` + OrderType string `protobuf:"bytes,3,opt,name=orderType,proto3" json:"orderType,omitempty"` + Rate []string `protobuf:"bytes,4,rep,name=rate,proto3" json:"rate,omitempty"` +} + +func (m *QueryBookendsRequest) Reset() { *m = QueryBookendsRequest{} } +func (m *QueryBookendsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBookendsRequest) ProtoMessage() {} +func (*QueryBookendsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{47} +} +func (m *QueryBookendsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBookendsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBookendsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBookendsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBookendsRequest.Merge(m, src) +} +func (m *QueryBookendsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBookendsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBookendsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBookendsRequest proto.InternalMessageInfo + +func (m *QueryBookendsRequest) GetCoinA() string { + if m != nil { + return m.CoinA + } + return "" +} + +func (m *QueryBookendsRequest) GetCoinB() string { + if m != nil { + return m.CoinB + } + return "" +} + +func (m *QueryBookendsRequest) GetOrderType() string { + if m != nil { + return m.OrderType + } + return "" +} + +func (m *QueryBookendsRequest) GetRate() []string { + if m != nil { + return m.Rate + } + return nil +} + +type QueryBookendsResponse struct { + CoinA string `protobuf:"bytes,1,opt,name=coinA,proto3" json:"coinA,omitempty"` + CoinB string `protobuf:"bytes,2,opt,name=coinB,proto3" json:"coinB,omitempty"` + OrderType string `protobuf:"bytes,3,opt,name=orderType,proto3" json:"orderType,omitempty"` + Rate []string `protobuf:"bytes,4,rep,name=rate,proto3" json:"rate,omitempty"` + Prev uint64 `protobuf:"varint,5,opt,name=prev,proto3" json:"prev,omitempty"` + Next uint64 `protobuf:"varint,6,opt,name=next,proto3" json:"next,omitempty"` +} + +func (m *QueryBookendsResponse) Reset() { *m = QueryBookendsResponse{} } +func (m *QueryBookendsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBookendsResponse) ProtoMessage() {} +func (*QueryBookendsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{48} +} +func (m *QueryBookendsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBookendsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBookendsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBookendsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBookendsResponse.Merge(m, src) +} +func (m *QueryBookendsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBookendsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBookendsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBookendsResponse proto.InternalMessageInfo + +func (m *QueryBookendsResponse) GetCoinA() string { + if m != nil { + return m.CoinA + } + return "" +} + +func (m *QueryBookendsResponse) GetCoinB() string { + if m != nil { + return m.CoinB + } + return "" +} + +func (m *QueryBookendsResponse) GetOrderType() string { + if m != nil { + return m.OrderType + } + return "" +} + +func (m *QueryBookendsResponse) GetRate() []string { + if m != nil { + return m.Rate + } + return nil +} + +func (m *QueryBookendsResponse) GetPrev() uint64 { + if m != nil { + return m.Prev + } + return 0 +} + +func (m *QueryBookendsResponse) GetNext() uint64 { + if m != nil { + return m.Next + } + return 0 +} + +type QueryHistoryRequest struct { + Pair string `protobuf:"bytes,1,opt,name=pair,proto3" json:"pair,omitempty"` + Length string `protobuf:"bytes,2,opt,name=length,proto3" json:"length,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryHistoryRequest) Reset() { *m = QueryHistoryRequest{} } +func (m *QueryHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*QueryHistoryRequest) ProtoMessage() {} +func (*QueryHistoryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{49} +} +func (m *QueryHistoryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryHistoryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryHistoryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryHistoryRequest.Merge(m, src) +} +func (m *QueryHistoryRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryHistoryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryHistoryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryHistoryRequest proto.InternalMessageInfo + +func (m *QueryHistoryRequest) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *QueryHistoryRequest) GetLength() string { + if m != nil { + return m.Length + } + return "" +} + +func (m *QueryHistoryRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryHistoryResponse struct { + History []OrderResponse `protobuf:"bytes,1,rep,name=history,proto3" json:"history"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryHistoryResponse) Reset() { *m = QueryHistoryResponse{} } +func (m *QueryHistoryResponse) String() string { return proto.CompactTextString(m) } +func (*QueryHistoryResponse) ProtoMessage() {} +func (*QueryHistoryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{50} +} +func (m *QueryHistoryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryHistoryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryHistoryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryHistoryResponse.Merge(m, src) +} +func (m *QueryHistoryResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryHistoryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryHistoryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryHistoryResponse proto.InternalMessageInfo + +func (m *QueryHistoryResponse) GetHistory() []OrderResponse { + if m != nil { + return m.History + } + return nil +} + +func (m *QueryHistoryResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// Market Quote: denom is the denom that is input for quote +type QueryQuoteRequest struct { + DenomAsk string `protobuf:"bytes,1,opt,name=denomAsk,proto3" json:"denomAsk,omitempty"` + DenomBid string `protobuf:"bytes,2,opt,name=denomBid,proto3" json:"denomBid,omitempty"` + DenomAmount string `protobuf:"bytes,3,opt,name=denomAmount,proto3" json:"denomAmount,omitempty"` + Amount string `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *QueryQuoteRequest) Reset() { *m = QueryQuoteRequest{} } +func (m *QueryQuoteRequest) String() string { return proto.CompactTextString(m) } +func (*QueryQuoteRequest) ProtoMessage() {} +func (*QueryQuoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{51} +} +func (m *QueryQuoteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryQuoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryQuoteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryQuoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryQuoteRequest.Merge(m, src) +} +func (m *QueryQuoteRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryQuoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryQuoteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryQuoteRequest proto.InternalMessageInfo + +func (m *QueryQuoteRequest) GetDenomAsk() string { + if m != nil { + return m.DenomAsk + } + return "" +} + +func (m *QueryQuoteRequest) GetDenomBid() string { + if m != nil { + return m.DenomBid + } + return "" +} + +func (m *QueryQuoteRequest) GetDenomAmount() string { + if m != nil { + return m.DenomAmount + } + return "" +} + +func (m *QueryQuoteRequest) GetAmount() string { + if m != nil { + return m.Amount + } + return "" +} + +type QueryQuoteResponse struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + Amount string `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *QueryQuoteResponse) Reset() { *m = QueryQuoteResponse{} } +func (m *QueryQuoteResponse) String() string { return proto.CompactTextString(m) } +func (*QueryQuoteResponse) ProtoMessage() {} +func (*QueryQuoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dc4839d34748ebb1, []int{52} +} +func (m *QueryQuoteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryQuoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryQuoteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryQuoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryQuoteResponse.Merge(m, src) +} +func (m *QueryQuoteResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryQuoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryQuoteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryQuoteResponse proto.InternalMessageInfo + +func (m *QueryQuoteResponse) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryQuoteResponse) GetAmount() string { + if m != nil { + return m.Amount + } + return "" +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "pendulumlabs.market.market.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "pendulumlabs.market.market.QueryParamsResponse") + proto.RegisterType((*QueryGetPoolRequest)(nil), "pendulumlabs.market.market.QueryGetPoolRequest") + proto.RegisterType((*QueryGetPoolResponse)(nil), "pendulumlabs.market.market.QueryGetPoolResponse") + proto.RegisterType((*QueryAllPoolRequest)(nil), "pendulumlabs.market.market.QueryAllPoolRequest") + proto.RegisterType((*QueryAllPoolResponse)(nil), "pendulumlabs.market.market.QueryAllPoolResponse") + proto.RegisterType((*QueryVolumeRequest)(nil), "pendulumlabs.market.market.QueryVolumeRequest") + proto.RegisterType((*QueryVolumeResponse)(nil), "pendulumlabs.market.market.QueryVolumeResponse") + proto.RegisterType((*QueryAllVolumeRequest)(nil), "pendulumlabs.market.market.QueryAllVolumeRequest") + proto.RegisterType((*QueryAllVolumeResponse)(nil), "pendulumlabs.market.market.QueryAllVolumeResponse") + proto.RegisterType((*QueryBurnedRequest)(nil), "pendulumlabs.market.market.QueryBurnedRequest") + proto.RegisterType((*QueryBurnedResponse)(nil), "pendulumlabs.market.market.QueryBurnedResponse") + proto.RegisterType((*QueryDropRequest)(nil), "pendulumlabs.market.market.QueryDropRequest") + proto.RegisterType((*QueryDropCoinRequest)(nil), "pendulumlabs.market.market.QueryDropCoinRequest") + proto.RegisterType((*QueryDropCoinResponse)(nil), "pendulumlabs.market.market.QueryDropCoinResponse") + proto.RegisterType((*QueryDropResponse)(nil), "pendulumlabs.market.market.QueryDropResponse") + proto.RegisterType((*QueryDropAmountsRequest)(nil), "pendulumlabs.market.market.QueryDropAmountsRequest") + proto.RegisterType((*QueryDropAmountsResponse)(nil), "pendulumlabs.market.market.QueryDropAmountsResponse") + proto.RegisterType((*QueryDropsToCoinsRequest)(nil), "pendulumlabs.market.market.QueryDropsToCoinsRequest") + proto.RegisterType((*QueryDropPairsRequest)(nil), "pendulumlabs.market.market.QueryDropPairsRequest") + proto.RegisterType((*QueryDropPairsResponse)(nil), "pendulumlabs.market.market.QueryDropPairsResponse") + proto.RegisterType((*QueryDropOwnerPairRequest)(nil), "pendulumlabs.market.market.QueryDropOwnerPairRequest") + proto.RegisterType((*QueryDropOwnerPairSumRequest)(nil), "pendulumlabs.market.market.QueryDropOwnerPairSumRequest") + proto.RegisterType((*QueryDropOwnerPairSumResponse)(nil), "pendulumlabs.market.market.QueryDropOwnerPairSumResponse") + proto.RegisterType((*QueryDropOwnerPairUidsRequest)(nil), "pendulumlabs.market.market.QueryDropOwnerPairUidsRequest") + proto.RegisterType((*QueryUidsResponse)(nil), "pendulumlabs.market.market.QueryUidsResponse") + proto.RegisterType((*QueryDropOwnerPairDetailRequest)(nil), "pendulumlabs.market.market.QueryDropOwnerPairDetailRequest") + proto.RegisterType((*QueryAllDropRequest)(nil), "pendulumlabs.market.market.QueryAllDropRequest") + proto.RegisterType((*QueryDropsResponse)(nil), "pendulumlabs.market.market.QueryDropsResponse") + proto.RegisterType((*QueryGetMemberRequest)(nil), "pendulumlabs.market.market.QueryGetMemberRequest") + proto.RegisterType((*QueryGetMemberResponse)(nil), "pendulumlabs.market.market.QueryGetMemberResponse") + proto.RegisterType((*QueryAllMemberRequest)(nil), "pendulumlabs.market.market.QueryAllMemberRequest") + proto.RegisterType((*QueryAllMemberResponse)(nil), "pendulumlabs.market.market.QueryAllMemberResponse") + proto.RegisterType((*QueryGetBurningsRequest)(nil), "pendulumlabs.market.market.QueryGetBurningsRequest") + proto.RegisterType((*QueryGetBurningsResponse)(nil), "pendulumlabs.market.market.QueryGetBurningsResponse") + proto.RegisterType((*QueryAllBurningsRequest)(nil), "pendulumlabs.market.market.QueryAllBurningsRequest") + proto.RegisterType((*QueryAllBurningsResponse)(nil), "pendulumlabs.market.market.QueryAllBurningsResponse") + proto.RegisterType((*QueryOrderRequest)(nil), "pendulumlabs.market.market.QueryOrderRequest") + proto.RegisterType((*QueryOrderResponse)(nil), "pendulumlabs.market.market.QueryOrderResponse") + proto.RegisterType((*QueryOrdersResponse)(nil), "pendulumlabs.market.market.QueryOrdersResponse") + proto.RegisterType((*QueryAllOrderRequest)(nil), "pendulumlabs.market.market.QueryAllOrderRequest") + proto.RegisterType((*QueryOrderOwnerRequest)(nil), "pendulumlabs.market.market.QueryOrderOwnerRequest") + proto.RegisterType((*QueryOrderOwnerUidsResponse)(nil), "pendulumlabs.market.market.QueryOrderOwnerUidsResponse") + proto.RegisterType((*QueryOrderOwnerPairRequest)(nil), "pendulumlabs.market.market.QueryOrderOwnerPairRequest") + proto.RegisterType((*QueryOrderOwnerPairResponse)(nil), "pendulumlabs.market.market.QueryOrderOwnerPairResponse") + proto.RegisterType((*QueryBookRequest)(nil), "pendulumlabs.market.market.QueryBookRequest") + proto.RegisterType((*QueryBookResponse)(nil), "pendulumlabs.market.market.QueryBookResponse") + proto.RegisterType((*QueryBookendsRequest)(nil), "pendulumlabs.market.market.QueryBookendsRequest") + proto.RegisterType((*QueryBookendsResponse)(nil), "pendulumlabs.market.market.QueryBookendsResponse") + proto.RegisterType((*QueryHistoryRequest)(nil), "pendulumlabs.market.market.QueryHistoryRequest") + proto.RegisterType((*QueryHistoryResponse)(nil), "pendulumlabs.market.market.QueryHistoryResponse") + proto.RegisterType((*QueryQuoteRequest)(nil), "pendulumlabs.market.market.QueryQuoteRequest") + proto.RegisterType((*QueryQuoteResponse)(nil), "pendulumlabs.market.market.QueryQuoteResponse") +} + +func init() { proto.RegisterFile("market/query.proto", fileDescriptor_dc4839d34748ebb1) } + +var fileDescriptor_dc4839d34748ebb1 = []byte{ + // 2073 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xcf, 0x8f, 0x23, 0x47, + 0x15, 0x9e, 0x1a, 0x7b, 0x3c, 0x33, 0x35, 0x80, 0x36, 0x95, 0xc9, 0x60, 0xcc, 0x32, 0xd9, 0xad, + 0xec, 0xb2, 0x99, 0xd9, 0x8c, 0x7b, 0xed, 0xd9, 0x65, 0x21, 0x22, 0x24, 0xf6, 0x8e, 0x76, 0x42, + 0x04, 0x9a, 0x59, 0x27, 0x10, 0x29, 0x12, 0x3f, 0xda, 0xe9, 0x96, 0xd3, 0x9a, 0x76, 0x97, 0xb7, + 0xbb, 0xbd, 0x64, 0xb0, 0xcc, 0x01, 0x89, 0x03, 0x37, 0x7e, 0x85, 0x04, 0x45, 0x10, 0x45, 0x28, + 0x22, 0x82, 0x05, 0x09, 0x4e, 0x08, 0x0e, 0x88, 0xdb, 0x1e, 0x57, 0xe2, 0xc2, 0x09, 0xa1, 0x5d, + 0xfe, 0x10, 0x54, 0x55, 0xaf, 0xba, 0xab, 0xed, 0x19, 0x77, 0xd9, 0xf2, 0x6a, 0x4f, 0xd3, 0x55, + 0xae, 0x57, 0xef, 0xab, 0xef, 0x55, 0xbd, 0x57, 0xf5, 0x69, 0x30, 0xe9, 0xda, 0xe1, 0x91, 0x1b, + 0x5b, 0xb7, 0xfb, 0x6e, 0x78, 0x5c, 0xed, 0x85, 0x2c, 0x66, 0xa4, 0xd2, 0x73, 0x03, 0xa7, 0xef, + 0xf7, 0xbb, 0xbe, 0xdd, 0x8e, 0xaa, 0x72, 0x00, 0xfc, 0xa9, 0xac, 0x77, 0x58, 0x87, 0x89, 0x61, + 0x16, 0xff, 0x92, 0x16, 0x95, 0xb3, 0x1d, 0xc6, 0x3a, 0xbe, 0x6b, 0xd9, 0x3d, 0xcf, 0xb2, 0x83, + 0x80, 0xc5, 0x76, 0xec, 0xb1, 0x20, 0x82, 0x5f, 0xb7, 0xdf, 0x64, 0x51, 0x97, 0x45, 0x56, 0xdb, + 0x8e, 0x5c, 0xe9, 0xc8, 0xba, 0x53, 0x6b, 0xbb, 0xb1, 0x5d, 0xb3, 0x7a, 0x76, 0xc7, 0x0b, 0xc4, + 0x60, 0x18, 0xfb, 0x24, 0xe0, 0xe9, 0xd9, 0xa1, 0xdd, 0x55, 0x13, 0x3c, 0xa1, 0x3a, 0x19, 0xf3, + 0x47, 0xba, 0x9c, 0x90, 0xf5, 0x46, 0x4c, 0xbb, 0x6e, 0xb7, 0xed, 0x86, 0xd0, 0xf9, 0x14, 0x74, + 0xb6, 0xfb, 0x61, 0xe0, 0x05, 0x1d, 0x35, 0xa3, 0x5a, 0x36, 0x0b, 0x1d, 0x35, 0x94, 0xae, 0x63, + 0x72, 0x8b, 0x83, 0x3b, 0x14, 0xae, 0x5b, 0xee, 0xed, 0xbe, 0x1b, 0xc5, 0xf4, 0x75, 0xfc, 0x64, + 0xa6, 0x37, 0xea, 0xb1, 0x20, 0x72, 0xc9, 0x4b, 0xb8, 0x24, 0x21, 0x96, 0xd1, 0x39, 0xf4, 0xec, + 0x5a, 0x9d, 0x56, 0x4f, 0x27, 0xad, 0x2a, 0x6d, 0x9b, 0xc5, 0x7b, 0xff, 0x79, 0x7a, 0xa1, 0x05, + 0x76, 0x74, 0x0b, 0x26, 0xde, 0x77, 0xe3, 0x43, 0xc6, 0x7c, 0xf0, 0x47, 0x08, 0x2e, 0xf6, 0x6c, + 0x2f, 0x14, 0xd3, 0xae, 0xb6, 0xc4, 0x37, 0x6d, 0xe1, 0xf5, 0xec, 0x50, 0x00, 0xf1, 0x3c, 0x2e, + 0x72, 0x4a, 0x00, 0xc2, 0xb9, 0x89, 0x10, 0x18, 0xf3, 0x01, 0x80, 0xb0, 0xa1, 0xdf, 0x02, 0xf7, + 0x0d, 0xdf, 0xd7, 0xdd, 0xdf, 0xc4, 0x38, 0x8d, 0x09, 0x4c, 0xfc, 0xf9, 0xaa, 0x0c, 0x60, 0x95, + 0x07, 0xb0, 0x2a, 0x77, 0x0a, 0x04, 0xb0, 0x7a, 0x68, 0x77, 0x5c, 0xb0, 0x6d, 0x69, 0x96, 0xf4, + 0x7d, 0x04, 0x98, 0x93, 0xf9, 0xc7, 0x30, 0x17, 0xa6, 0xc5, 0x4c, 0xf6, 0x33, 0xe0, 0x16, 0x05, + 0xb8, 0x4b, 0xb9, 0xe0, 0xa4, 0xe3, 0x0c, 0xba, 0x6d, 0x08, 0xf5, 0x37, 0x99, 0xdf, 0xef, 0x2a, + 0xfc, 0x64, 0x1d, 0x2f, 0x39, 0x6e, 0xc0, 0xba, 0xc0, 0xbd, 0x6c, 0xd0, 0x1d, 0x20, 0x4a, 0x8d, + 0x85, 0x75, 0x6c, 0xe0, 0x92, 0xdd, 0x65, 0xfd, 0x20, 0x86, 0xd1, 0xd0, 0xa2, 0xdf, 0xc1, 0x4f, + 0xa9, 0x75, 0x67, 0x67, 0x9f, 0x17, 0xb3, 0x1f, 0x21, 0xbc, 0x31, 0xea, 0x01, 0x30, 0x35, 0xf1, + 0xf2, 0x1d, 0xd1, 0x13, 0x01, 0xbd, 0x13, 0x77, 0xa5, 0x34, 0x06, 0x82, 0x95, 0xe1, 0xfc, 0x38, + 0x56, 0xc7, 0xa9, 0xd9, 0x0f, 0x03, 0xd7, 0x51, 0xc7, 0xe9, 0x06, 0xb0, 0xa9, 0x7a, 0x01, 0xf9, + 0x89, 0xd4, 0x6b, 0x1c, 0x2f, 0x66, 0x38, 0xbe, 0x80, 0xcf, 0x88, 0x49, 0xf6, 0x42, 0xd6, 0x53, + 0xf4, 0x9e, 0xc1, 0x85, 0xbe, 0xe7, 0x08, 0xfb, 0x62, 0x8b, 0x7f, 0xd2, 0xef, 0xc2, 0x0e, 0xe4, + 0xa3, 0x6e, 0x30, 0x2f, 0x50, 0x23, 0x37, 0x70, 0x49, 0x4c, 0xdf, 0x50, 0x91, 0x93, 0xad, 0xa4, + 0xbf, 0xa9, 0xbc, 0xc9, 0x16, 0x29, 0xe3, 0x65, 0xe9, 0xb7, 0x51, 0x2e, 0x88, 0x1f, 0x54, 0x93, + 0xee, 0x43, 0xac, 0x53, 0x0f, 0xda, 0x72, 0x42, 0xd6, 0x8b, 0x92, 0xe5, 0xf0, 0x46, 0x3a, 0x91, + 0xf2, 0xa0, 0x9a, 0xf4, 0x00, 0x3f, 0xa1, 0x2d, 0x28, 0x3d, 0x29, 0xdc, 0xce, 0xe4, 0x74, 0x73, + 0x3b, 0x75, 0x52, 0xb8, 0x0d, 0xbd, 0x8c, 0x3f, 0x9d, 0x4c, 0xd8, 0x10, 0x4e, 0xa2, 0xd3, 0x89, + 0xfa, 0x01, 0x2e, 0x8f, 0x0f, 0x4e, 0xb7, 0xb9, 0xa0, 0xa1, 0x96, 0x21, 0xab, 0x96, 0xf4, 0xd7, + 0x33, 0x64, 0xd5, 0xd3, 0x35, 0xd6, 0xb2, 0x64, 0xd5, 0xd2, 0x5f, 0xea, 0xe5, 0xa2, 0xfe, 0x4b, + 0x9d, 0xee, 0x69, 0xfe, 0xa3, 0xd7, 0x18, 0x67, 0x32, 0x9a, 0x90, 0x0e, 0x53, 0x76, 0x17, 0x35, + 0x76, 0x69, 0x4d, 0x0b, 0xc6, 0xa1, 0xed, 0x85, 0xc9, 0x14, 0xdc, 0xb1, 0xe3, 0x84, 0x6e, 0xa4, + 0xc2, 0xa1, 0x9a, 0xb4, 0x0a, 0x27, 0x49, 0x33, 0x49, 0x03, 0xc8, 0x5d, 0xc9, 0x73, 0xb4, 0xda, + 0x92, 0x0d, 0xfa, 0x53, 0x84, 0x3f, 0x93, 0x18, 0x1c, 0x7c, 0x2f, 0x70, 0x43, 0x6e, 0x95, 0xeb, + 0x27, 0x59, 0xc4, 0xa2, 0xb6, 0x88, 0x6c, 0x3a, 0x28, 0xcc, 0x9c, 0x0e, 0xbe, 0x86, 0xcf, 0x8e, + 0x43, 0x7a, 0xb5, 0xdf, 0x9d, 0x09, 0x15, 0xad, 0xe1, 0xcf, 0x9d, 0x32, 0x1b, 0x10, 0x73, 0x06, + 0x17, 0xa2, 0xbe, 0x3a, 0xa6, 0xfc, 0x93, 0xbe, 0x83, 0x4e, 0xb2, 0xf9, 0x86, 0xe7, 0x44, 0x8f, + 0x97, 0x98, 0x1e, 0x9c, 0x29, 0x89, 0x04, 0xe0, 0x13, 0x5c, 0xec, 0x7b, 0x8e, 0x0c, 0x6b, 0xb1, + 0x25, 0xbe, 0xe7, 0x97, 0xf1, 0xde, 0x45, 0xf8, 0xe9, 0x71, 0x26, 0xf6, 0xdc, 0xd8, 0xf6, 0xfc, + 0xc7, 0xcb, 0x85, 0x56, 0xec, 0xf5, 0x9c, 0x39, 0xc7, 0x62, 0x4f, 0xd2, 0x13, 0x9c, 0x90, 0xfd, + 0xe5, 0x34, 0x0b, 0x16, 0xa6, 0xc8, 0x60, 0x90, 0x2d, 0xe7, 0x16, 0x16, 0x95, 0xa5, 0xf7, 0xdd, + 0xf8, 0xeb, 0xe2, 0x6a, 0x38, 0x5e, 0x08, 0x16, 0x4f, 0x29, 0x04, 0x05, 0xbd, 0x10, 0xd0, 0x37, + 0x20, 0x5d, 0x68, 0x13, 0xa5, 0xb7, 0x41, 0x79, 0xeb, 0x34, 0xb9, 0x0d, 0x4a, 0x5b, 0x75, 0x1b, + 0x94, 0x76, 0xfa, 0xb5, 0x21, 0x0b, 0x72, 0x5e, 0x31, 0xfa, 0xad, 0x76, 0x6d, 0x98, 0x80, 0xbe, + 0x30, 0x0b, 0xfa, 0xf9, 0xc5, 0xca, 0x82, 0xba, 0xb5, 0xef, 0xc6, 0x4d, 0xb8, 0xb1, 0x4f, 0xbe, + 0x9d, 0xb5, 0xa1, 0x76, 0x64, 0x0c, 0x60, 0x5d, 0x37, 0xf1, 0x8a, 0xba, 0xf6, 0x03, 0x71, 0x17, + 0x26, 0xad, 0x4c, 0xd9, 0xc3, 0xda, 0x12, 0x5b, 0x6a, 0x03, 0xa8, 0x86, 0xef, 0x8f, 0x82, 0x9a, + 0x57, 0x74, 0xfe, 0x80, 0x60, 0x1d, 0x19, 0x1f, 0x27, 0xae, 0xa3, 0x30, 0xeb, 0x3a, 0xe6, 0x17, + 0xa5, 0x8b, 0x90, 0x5a, 0x0f, 0xf8, 0xeb, 0xe9, 0xf4, 0x7b, 0xc5, 0xab, 0x90, 0x15, 0x60, 0x18, + 0xac, 0xe6, 0x05, 0xbc, 0x24, 0x5e, 0x5d, 0xc0, 0xd6, 0xf9, 0x49, 0x4b, 0x11, 0x96, 0x2a, 0x2d, + 0x08, 0x2b, 0xfa, 0x01, 0x82, 0x5c, 0x26, 0x7e, 0x4b, 0x49, 0x7a, 0x11, 0x97, 0xc4, 0x00, 0x45, + 0x91, 0xf1, 0xbc, 0x60, 0x36, 0x3f, 0x76, 0xbe, 0x9d, 0xbe, 0x7c, 0x32, 0x04, 0xcd, 0x6b, 0xaf, + 0x7c, 0x1f, 0x0e, 0xb2, 0x98, 0x5c, 0x94, 0x99, 0xfc, 0xe2, 0x72, 0xf3, 0x84, 0xc5, 0xcd, 0xe2, + 0xfb, 0x63, 0x84, 0x3f, 0x3b, 0xe2, 0x3c, 0x53, 0x5f, 0x5f, 0xd2, 0xa2, 0x90, 0x9b, 0x08, 0x65, + 0x04, 0x1f, 0x55, 0x18, 0x7e, 0x86, 0x70, 0x65, 0x04, 0xea, 0xe3, 0xbf, 0xad, 0x7d, 0x34, 0xce, + 0x9f, 0x04, 0x35, 0x7e, 0x38, 0x0a, 0xd3, 0x1f, 0x8e, 0xf9, 0x91, 0xf7, 0x31, 0x82, 0x27, 0x56, + 0x93, 0xb1, 0xa3, 0x59, 0x1f, 0x4e, 0x67, 0xf1, 0xaa, 0x80, 0xf5, 0xda, 0x71, 0xcf, 0x85, 0x52, + 0x9a, 0x76, 0x8c, 0x50, 0x5a, 0x9c, 0x99, 0xd2, 0x0f, 0x11, 0x64, 0x23, 0x09, 0x15, 0x88, 0xbc, + 0x81, 0x8b, 0x6d, 0xc6, 0x8e, 0x80, 0xc7, 0xad, 0x5c, 0x1e, 0x95, 0xa1, 0x7a, 0x45, 0x71, 0xe3, + 0xf9, 0xd1, 0x19, 0x43, 0x4a, 0xe0, 0x10, 0xdd, 0xc0, 0xd1, 0x6b, 0xda, 0x9b, 0xcc, 0x0b, 0x14, + 0xa1, 0xb2, 0xa1, 0x7a, 0x15, 0x9d, 0xb2, 0x91, 0xc3, 0x26, 0xc1, 0xc5, 0xd0, 0x8e, 0xdd, 0x72, + 0x51, 0xbc, 0x57, 0xc4, 0x37, 0xfd, 0x35, 0x82, 0x4b, 0x45, 0xea, 0x36, 0x7d, 0xde, 0x3c, 0x3a, + 0xbf, 0xe2, 0x00, 0x85, 0xee, 0x9d, 0xf2, 0x92, 0x28, 0x05, 0xe2, 0x9b, 0xf7, 0x05, 0xee, 0xdb, + 0x71, 0xb9, 0x24, 0xfb, 0xf8, 0x37, 0xfd, 0xb1, 0x4a, 0xe5, 0x2f, 0x7b, 0x51, 0xcc, 0xc2, 0xe3, + 0x49, 0x6f, 0xbe, 0x0d, 0x5c, 0xf2, 0xdd, 0xa0, 0x13, 0xbf, 0xa5, 0xf6, 0x98, 0x6c, 0xcd, 0xed, + 0x60, 0xfe, 0x5e, 0xe9, 0x55, 0x09, 0x16, 0xa0, 0xea, 0xab, 0x78, 0xf9, 0x2d, 0xd9, 0x35, 0xeb, + 0x5e, 0x52, 0xf6, 0xf3, 0xdb, 0x4e, 0x3f, 0x52, 0x5b, 0xfe, 0x56, 0x9f, 0xc5, 0x89, 0xc0, 0x54, + 0xc1, 0x2b, 0xf2, 0x40, 0x46, 0x47, 0x40, 0x5d, 0xd2, 0x4e, 0x7e, 0x6b, 0x7a, 0x0e, 0x10, 0x98, + 0xb4, 0xc9, 0x39, 0xbc, 0x26, 0xc7, 0x49, 0xa9, 0x45, 0x86, 0x58, 0xef, 0xd2, 0x74, 0x98, 0x62, + 0x46, 0x87, 0x69, 0x42, 0x81, 0x07, 0x18, 0xb3, 0x68, 0x39, 0xf5, 0xf7, 0xce, 0xe3, 0x25, 0x31, + 0x09, 0xf9, 0x25, 0xc2, 0x25, 0xa9, 0x94, 0x92, 0xea, 0x24, 0x8e, 0xc7, 0x45, 0xda, 0x8a, 0x65, + 0x3c, 0x5e, 0x62, 0xa4, 0x97, 0x7f, 0xf8, 0xaf, 0xff, 0xfd, 0x7c, 0xf1, 0x22, 0x79, 0xc6, 0x52, + 0x86, 0x3b, 0xdc, 0xd2, 0x52, 0x12, 0xb2, 0x2e, 0x42, 0x0b, 0x60, 0x52, 0xaf, 0x32, 0x00, 0x96, + 0x91, 0xbb, 0x0c, 0x80, 0x65, 0x85, 0x30, 0x43, 0x60, 0x6d, 0x89, 0xe6, 0x7d, 0x84, 0x8b, 0x87, + 0x8c, 0xf9, 0x24, 0xdf, 0x4d, 0x56, 0x65, 0xae, 0x5c, 0x31, 0x37, 0x00, 0x60, 0x57, 0x04, 0xb0, + 0x6d, 0xf2, 0xec, 0x64, 0xc6, 0x18, 0xf3, 0xad, 0x01, 0x3f, 0xb1, 0x43, 0xf2, 0x2e, 0xc2, 0xcb, + 0x7c, 0x8a, 0x86, 0x6f, 0x02, 0x30, 0xab, 0x43, 0x1b, 0x00, 0x1c, 0x11, 0x96, 0xe9, 0x96, 0x00, + 0xf8, 0x0c, 0x39, 0x9f, 0x0b, 0x90, 0xfc, 0x06, 0xe1, 0x92, 0x54, 0x3f, 0x0d, 0x02, 0x9a, 0x51, + 0x71, 0x0d, 0x02, 0x9a, 0xd5, 0x64, 0xe9, 0xae, 0x80, 0xb5, 0x43, 0x2e, 0x4f, 0x84, 0x25, 0xd5, + 0x57, 0x6b, 0x20, 0xce, 0xca, 0x90, 0x7c, 0x80, 0xf0, 0xaa, 0x9c, 0x87, 0x93, 0x57, 0x33, 0xe1, + 0x22, 0x0b, 0xb3, 0x3e, 0x8d, 0xc9, 0x54, 0x5b, 0x4f, 0x22, 0x25, 0xbf, 0x40, 0xb8, 0xc8, 0xdf, + 0xec, 0xe4, 0xb9, 0x5c, 0x4f, 0x9a, 0xe2, 0x50, 0xd9, 0x31, 0x1c, 0x0d, 0x90, 0x2c, 0x01, 0x69, + 0x8b, 0x5c, 0x9a, 0x08, 0xc9, 0x09, 0x59, 0xcf, 0x1a, 0xf4, 0x3d, 0x67, 0x48, 0xfe, 0x8c, 0xf0, + 0x9a, 0x26, 0x63, 0x92, 0x5d, 0x23, 0x7f, 0x59, 0x85, 0xb4, 0x72, 0x75, 0x3a, 0x23, 0xc0, 0x7a, + 0x5d, 0x60, 0xad, 0x11, 0x2b, 0x1f, 0xab, 0x4c, 0x7d, 0x11, 0x60, 0xfe, 0x3b, 0xc2, 0x2b, 0x4a, + 0x41, 0x26, 0x57, 0x8c, 0x7c, 0x6b, 0x72, 0x76, 0xa5, 0x36, 0x85, 0x05, 0x40, 0x7d, 0x45, 0x40, + 0xdd, 0x23, 0xcd, 0x7c, 0xa8, 0xfc, 0x0e, 0x00, 0xdb, 0xb2, 0x31, 0x84, 0x8f, 0xe6, 0xd0, 0x1a, + 0x80, 0x04, 0x3e, 0x24, 0x7f, 0x43, 0xf8, 0x13, 0xba, 0x70, 0x4b, 0xcc, 0xd8, 0x1b, 0xd1, 0x79, + 0x67, 0xe4, 0xfc, 0x2b, 0x62, 0x21, 0x5f, 0x24, 0x5f, 0x30, 0x5b, 0x48, 0x04, 0xa9, 0xc9, 0x1a, + 0x08, 0x89, 0x69, 0x48, 0xfe, 0x88, 0xf0, 0x6a, 0x22, 0xfe, 0x12, 0x33, 0x26, 0x75, 0x6d, 0xd9, + 0xe0, 0x9c, 0x8d, 0x69, 0xcb, 0xf4, 0x4b, 0x02, 0xf4, 0x2e, 0xa9, 0xe5, 0x83, 0x16, 0xb2, 0xb3, + 0x35, 0x80, 0x97, 0xc9, 0x90, 0xfc, 0x05, 0xe1, 0x4f, 0x66, 0xc4, 0x45, 0x72, 0xcd, 0x08, 0xc0, + 0xe8, 0xeb, 0xa7, 0x52, 0x35, 0x0b, 0x52, 0x82, 0xf9, 0x79, 0x81, 0xf9, 0x2a, 0xa9, 0x1b, 0x1c, + 0x44, 0x85, 0x56, 0xd5, 0x81, 0x77, 0x10, 0x5e, 0x16, 0xc1, 0x33, 0xad, 0x03, 0x7a, 0xc2, 0x98, + 0x16, 0xa8, 0x59, 0x15, 0xe0, 0x40, 0xc9, 0x9f, 0x10, 0x2e, 0x49, 0x35, 0xcb, 0x20, 0xf2, 0xa3, + 0xe2, 0xa1, 0x41, 0xe4, 0xc7, 0x64, 0x42, 0xfa, 0x82, 0x00, 0x77, 0x9d, 0x5c, 0x9b, 0x08, 0x4e, + 0x6a, 0x6a, 0xe3, 0x87, 0x4e, 0x54, 0x05, 0x39, 0xa3, 0x71, 0x55, 0x98, 0x16, 0xf3, 0x98, 0x38, + 0x68, 0x58, 0x15, 0x40, 0x07, 0xbc, 0x8b, 0xf0, 0x8a, 0x92, 0x9f, 0x0c, 0x72, 0xef, 0xb8, 0xca, + 0x67, 0x90, 0x07, 0x4e, 0x50, 0xfa, 0xe8, 0x35, 0x01, 0xd2, 0x22, 0x3b, 0xb9, 0xb7, 0x26, 0x6e, + 0x96, 0x94, 0xd9, 0xdf, 0x21, 0xbc, 0xa6, 0xe6, 0xe2, 0x94, 0xee, 0x9a, 0xf0, 0x33, 0x3d, 0xe2, + 0x13, 0x34, 0x3d, 0xba, 0x23, 0x10, 0x5f, 0x22, 0x17, 0x8d, 0x10, 0x93, 0xf7, 0x10, 0x5e, 0x12, + 0x6f, 0x0b, 0x92, 0x5f, 0x41, 0x75, 0xd1, 0xc9, 0xe0, 0xfc, 0x64, 0x9e, 0x2c, 0x86, 0xd7, 0x3c, + 0xf1, 0xfe, 0x83, 0xf2, 0xf5, 0x2b, 0x84, 0x57, 0xc4, 0x1c, 0x9c, 0x41, 0xa3, 0x6b, 0x5b, 0x06, + 0xa0, 0x65, 0x06, 0x30, 0x65, 0x6e, 0x5b, 0x20, 0xbc, 0x40, 0x68, 0x3e, 0x42, 0x1e, 0x60, 0x9c, + 0x2a, 0x2d, 0xa4, 0x6e, 0xe6, 0x4b, 0xd7, 0xd4, 0xa6, 0xc7, 0x77, 0x55, 0xe0, 0xab, 0x92, 0xe7, + 0x4c, 0x18, 0x4c, 0x32, 0xfb, 0x5f, 0x11, 0xfe, 0x54, 0x56, 0x53, 0x9b, 0x09, 0xed, 0xf5, 0x29, + 0x6c, 0x74, 0xe1, 0xce, 0xb0, 0x28, 0x49, 0xd4, 0x7d, 0xcf, 0xd1, 0x8b, 0xd2, 0x5d, 0x84, 0x8b, + 0x4d, 0xc6, 0x8e, 0x0c, 0xae, 0x82, 0x9a, 0x9a, 0x64, 0x70, 0x15, 0xd4, 0x05, 0x1d, 0xba, 0x2f, + 0x00, 0x36, 0xc8, 0x8b, 0x93, 0x0f, 0x0c, 0x63, 0x47, 0x27, 0x5d, 0x57, 0x12, 0xc1, 0x62, 0x48, + 0xfe, 0xc9, 0x73, 0x14, 0x08, 0x22, 0x06, 0xfb, 0x75, 0x44, 0xb2, 0x31, 0xb8, 0x6e, 0x8d, 0xaa, + 0x2d, 0xf4, 0x50, 0x40, 0x7f, 0x85, 0xbc, 0x9c, 0x0b, 0x9d, 0x9b, 0x59, 0x03, 0xa1, 0xc6, 0x0c, + 0xe5, 0xdf, 0x2c, 0x78, 0x6b, 0x10, 0xda, 0xb1, 0x3b, 0x24, 0x1f, 0x22, 0xbc, 0x0c, 0x42, 0x85, + 0x41, 0x49, 0xcd, 0xca, 0x2b, 0x06, 0x4f, 0xab, 0x11, 0x0d, 0xc4, 0xf0, 0x0d, 0x03, 0x32, 0x87, + 0x2a, 0xfb, 0xff, 0x40, 0xfc, 0x61, 0xcf, 0x62, 0xd7, 0x20, 0x65, 0xe9, 0x3a, 0x86, 0x41, 0xca, + 0xca, 0xe8, 0x0d, 0xf4, 0x75, 0x81, 0xee, 0x16, 0x39, 0x98, 0x88, 0xee, 0x36, 0xb7, 0x51, 0x3b, + 0xc2, 0x73, 0xd4, 0xe6, 0x68, 0x44, 0x47, 0xc9, 0xa7, 0xb8, 0x5c, 0x26, 0x77, 0xdb, 0x61, 0xd3, + 0xba, 0xf7, 0x60, 0x13, 0xdd, 0x7f, 0xb0, 0x89, 0xfe, 0xfb, 0x60, 0x13, 0xfd, 0xe4, 0xe1, 0xe6, + 0xc2, 0xfd, 0x87, 0x9b, 0x0b, 0xff, 0x7e, 0xb8, 0xb9, 0xf0, 0x86, 0xfa, 0xaf, 0xb2, 0xb7, 0xd5, + 0xec, 0xf1, 0x71, 0xcf, 0x8d, 0xda, 0x25, 0xf1, 0x8f, 0x64, 0xbb, 0xff, 0x0f, 0x00, 0x00, 0xff, + 0xff, 0x4c, 0xb3, 0xd8, 0xe3, 0x55, 0x27, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // Queries total burned. + Burned(ctx context.Context, in *QueryBurnedRequest, opts ...grpc.CallOption) (*QueryBurnedResponse, error) + // Queries a Pool by index. + Pool(ctx context.Context, in *QueryGetPoolRequest, opts ...grpc.CallOption) (*QueryGetPoolResponse, error) + // Queries a list of Pool items. + PoolAll(ctx context.Context, in *QueryAllPoolRequest, opts ...grpc.CallOption) (*QueryAllPoolResponse, error) + // Queries a Volume by index. + Volume(ctx context.Context, in *QueryVolumeRequest, opts ...grpc.CallOption) (*QueryVolumeResponse, error) + // Queries all Volumes. + VolumeAll(ctx context.Context, in *QueryAllVolumeRequest, opts ...grpc.CallOption) (*QueryAllVolumeResponse, error) + // Queries a Drop by index. + Drop(ctx context.Context, in *QueryDropRequest, opts ...grpc.CallOption) (*QueryDropResponse, error) + // Queries a Drop by index. + DropAmounts(ctx context.Context, in *QueryDropAmountsRequest, opts ...grpc.CallOption) (*QueryDropAmountsResponse, error) + // Queries a Drop by index. + DropCoin(ctx context.Context, in *QueryDropCoinRequest, opts ...grpc.CallOption) (*QueryDropCoinResponse, error) + // Converts drops to coin amounts + DropsToCoins(ctx context.Context, in *QueryDropsToCoinsRequest, opts ...grpc.CallOption) (*QueryDropAmountsResponse, error) + // Queries a Drop by index. + DropPairs(ctx context.Context, in *QueryDropPairsRequest, opts ...grpc.CallOption) (*QueryDropPairsResponse, error) + // Queries a Drop by index. + DropOwnerPair(ctx context.Context, in *QueryDropOwnerPairRequest, opts ...grpc.CallOption) (*QueryDropsResponse, error) + // Queries a list of Drop items. + DropAll(ctx context.Context, in *QueryAllDropRequest, opts ...grpc.CallOption) (*QueryDropsResponse, error) + // Queries a Member by index. + Member(ctx context.Context, in *QueryGetMemberRequest, opts ...grpc.CallOption) (*QueryGetMemberResponse, error) + // Queries a list of Member items. + MemberAll(ctx context.Context, in *QueryAllMemberRequest, opts ...grpc.CallOption) (*QueryAllMemberResponse, error) + // Queries a Burnings by index. + Burnings(ctx context.Context, in *QueryGetBurningsRequest, opts ...grpc.CallOption) (*QueryGetBurningsResponse, error) + // Queries a list of Burnings items. + BurningsAll(ctx context.Context, in *QueryAllBurningsRequest, opts ...grpc.CallOption) (*QueryAllBurningsResponse, error) + // Queries a Order by index. + Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) + // Queries a list of Order items. + OrderAll(ctx context.Context, in *QueryAllOrderRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) + // Queries a list of Order items. + OrderOwner(ctx context.Context, in *QueryOrderOwnerRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) + // Queries a list of Order items. + OrderOwnerUids(ctx context.Context, in *QueryOrderOwnerRequest, opts ...grpc.CallOption) (*QueryOrderOwnerUidsResponse, error) + // Queries a list of Book items. + Book(ctx context.Context, in *QueryBookRequest, opts ...grpc.CallOption) (*QueryBookResponse, error) + // Queries a list of Bookends items. + Bookends(ctx context.Context, in *QueryBookendsRequest, opts ...grpc.CallOption) (*QueryBookendsResponse, error) + // Queries pool trade history. + History(ctx context.Context, in *QueryHistoryRequest, opts ...grpc.CallOption) (*QueryHistoryResponse, error) + // Queries pool trade history. + Quote(ctx context.Context, in *QueryQuoteRequest, opts ...grpc.CallOption) (*QueryQuoteResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Burned(ctx context.Context, in *QueryBurnedRequest, opts ...grpc.CallOption) (*QueryBurnedResponse, error) { + out := new(QueryBurnedResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Burned", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Pool(ctx context.Context, in *QueryGetPoolRequest, opts ...grpc.CallOption) (*QueryGetPoolResponse, error) { + out := new(QueryGetPoolResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Pool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PoolAll(ctx context.Context, in *QueryAllPoolRequest, opts ...grpc.CallOption) (*QueryAllPoolResponse, error) { + out := new(QueryAllPoolResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/PoolAll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Volume(ctx context.Context, in *QueryVolumeRequest, opts ...grpc.CallOption) (*QueryVolumeResponse, error) { + out := new(QueryVolumeResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Volume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) VolumeAll(ctx context.Context, in *QueryAllVolumeRequest, opts ...grpc.CallOption) (*QueryAllVolumeResponse, error) { + out := new(QueryAllVolumeResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/VolumeAll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Drop(ctx context.Context, in *QueryDropRequest, opts ...grpc.CallOption) (*QueryDropResponse, error) { + out := new(QueryDropResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Drop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DropAmounts(ctx context.Context, in *QueryDropAmountsRequest, opts ...grpc.CallOption) (*QueryDropAmountsResponse, error) { + out := new(QueryDropAmountsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/DropAmounts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DropCoin(ctx context.Context, in *QueryDropCoinRequest, opts ...grpc.CallOption) (*QueryDropCoinResponse, error) { + out := new(QueryDropCoinResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/DropCoin", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DropsToCoins(ctx context.Context, in *QueryDropsToCoinsRequest, opts ...grpc.CallOption) (*QueryDropAmountsResponse, error) { + out := new(QueryDropAmountsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/DropsToCoins", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DropPairs(ctx context.Context, in *QueryDropPairsRequest, opts ...grpc.CallOption) (*QueryDropPairsResponse, error) { + out := new(QueryDropPairsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/DropPairs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DropOwnerPair(ctx context.Context, in *QueryDropOwnerPairRequest, opts ...grpc.CallOption) (*QueryDropsResponse, error) { + out := new(QueryDropsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/DropOwnerPair", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DropAll(ctx context.Context, in *QueryAllDropRequest, opts ...grpc.CallOption) (*QueryDropsResponse, error) { + out := new(QueryDropsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/DropAll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Member(ctx context.Context, in *QueryGetMemberRequest, opts ...grpc.CallOption) (*QueryGetMemberResponse, error) { + out := new(QueryGetMemberResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Member", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) MemberAll(ctx context.Context, in *QueryAllMemberRequest, opts ...grpc.CallOption) (*QueryAllMemberResponse, error) { + out := new(QueryAllMemberResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/MemberAll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Burnings(ctx context.Context, in *QueryGetBurningsRequest, opts ...grpc.CallOption) (*QueryGetBurningsResponse, error) { + out := new(QueryGetBurningsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Burnings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) BurningsAll(ctx context.Context, in *QueryAllBurningsRequest, opts ...grpc.CallOption) (*QueryAllBurningsResponse, error) { + out := new(QueryAllBurningsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/BurningsAll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { + out := new(QueryOrderResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Order", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) OrderAll(ctx context.Context, in *QueryAllOrderRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { + out := new(QueryOrdersResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/OrderAll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) OrderOwner(ctx context.Context, in *QueryOrderOwnerRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { + out := new(QueryOrdersResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/OrderOwner", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) OrderOwnerUids(ctx context.Context, in *QueryOrderOwnerRequest, opts ...grpc.CallOption) (*QueryOrderOwnerUidsResponse, error) { + out := new(QueryOrderOwnerUidsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/OrderOwnerUids", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Book(ctx context.Context, in *QueryBookRequest, opts ...grpc.CallOption) (*QueryBookResponse, error) { + out := new(QueryBookResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Book", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Bookends(ctx context.Context, in *QueryBookendsRequest, opts ...grpc.CallOption) (*QueryBookendsResponse, error) { + out := new(QueryBookendsResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Bookends", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) History(ctx context.Context, in *QueryHistoryRequest, opts ...grpc.CallOption) (*QueryHistoryResponse, error) { + out := new(QueryHistoryResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/History", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Quote(ctx context.Context, in *QueryQuoteRequest, opts ...grpc.CallOption) (*QueryQuoteResponse, error) { + out := new(QueryQuoteResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Query/Quote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // Queries total burned. + Burned(context.Context, *QueryBurnedRequest) (*QueryBurnedResponse, error) + // Queries a Pool by index. + Pool(context.Context, *QueryGetPoolRequest) (*QueryGetPoolResponse, error) + // Queries a list of Pool items. + PoolAll(context.Context, *QueryAllPoolRequest) (*QueryAllPoolResponse, error) + // Queries a Volume by index. + Volume(context.Context, *QueryVolumeRequest) (*QueryVolumeResponse, error) + // Queries all Volumes. + VolumeAll(context.Context, *QueryAllVolumeRequest) (*QueryAllVolumeResponse, error) + // Queries a Drop by index. + Drop(context.Context, *QueryDropRequest) (*QueryDropResponse, error) + // Queries a Drop by index. + DropAmounts(context.Context, *QueryDropAmountsRequest) (*QueryDropAmountsResponse, error) + // Queries a Drop by index. + DropCoin(context.Context, *QueryDropCoinRequest) (*QueryDropCoinResponse, error) + // Converts drops to coin amounts + DropsToCoins(context.Context, *QueryDropsToCoinsRequest) (*QueryDropAmountsResponse, error) + // Queries a Drop by index. + DropPairs(context.Context, *QueryDropPairsRequest) (*QueryDropPairsResponse, error) + // Queries a Drop by index. + DropOwnerPair(context.Context, *QueryDropOwnerPairRequest) (*QueryDropsResponse, error) + // Queries a list of Drop items. + DropAll(context.Context, *QueryAllDropRequest) (*QueryDropsResponse, error) + // Queries a Member by index. + Member(context.Context, *QueryGetMemberRequest) (*QueryGetMemberResponse, error) + // Queries a list of Member items. + MemberAll(context.Context, *QueryAllMemberRequest) (*QueryAllMemberResponse, error) + // Queries a Burnings by index. + Burnings(context.Context, *QueryGetBurningsRequest) (*QueryGetBurningsResponse, error) + // Queries a list of Burnings items. + BurningsAll(context.Context, *QueryAllBurningsRequest) (*QueryAllBurningsResponse, error) + // Queries a Order by index. + Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) + // Queries a list of Order items. + OrderAll(context.Context, *QueryAllOrderRequest) (*QueryOrdersResponse, error) + // Queries a list of Order items. + OrderOwner(context.Context, *QueryOrderOwnerRequest) (*QueryOrdersResponse, error) + // Queries a list of Order items. + OrderOwnerUids(context.Context, *QueryOrderOwnerRequest) (*QueryOrderOwnerUidsResponse, error) + // Queries a list of Book items. + Book(context.Context, *QueryBookRequest) (*QueryBookResponse, error) + // Queries a list of Bookends items. + Bookends(context.Context, *QueryBookendsRequest) (*QueryBookendsResponse, error) + // Queries pool trade history. + History(context.Context, *QueryHistoryRequest) (*QueryHistoryResponse, error) + // Queries pool trade history. + Quote(context.Context, *QueryQuoteRequest) (*QueryQuoteResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) Burned(ctx context.Context, req *QueryBurnedRequest) (*QueryBurnedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Burned not implemented") +} +func (*UnimplementedQueryServer) Pool(ctx context.Context, req *QueryGetPoolRequest) (*QueryGetPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pool not implemented") +} +func (*UnimplementedQueryServer) PoolAll(ctx context.Context, req *QueryAllPoolRequest) (*QueryAllPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PoolAll not implemented") +} +func (*UnimplementedQueryServer) Volume(ctx context.Context, req *QueryVolumeRequest) (*QueryVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Volume not implemented") +} +func (*UnimplementedQueryServer) VolumeAll(ctx context.Context, req *QueryAllVolumeRequest) (*QueryAllVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeAll not implemented") +} +func (*UnimplementedQueryServer) Drop(ctx context.Context, req *QueryDropRequest) (*QueryDropResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Drop not implemented") +} +func (*UnimplementedQueryServer) DropAmounts(ctx context.Context, req *QueryDropAmountsRequest) (*QueryDropAmountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropAmounts not implemented") +} +func (*UnimplementedQueryServer) DropCoin(ctx context.Context, req *QueryDropCoinRequest) (*QueryDropCoinResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropCoin not implemented") +} +func (*UnimplementedQueryServer) DropsToCoins(ctx context.Context, req *QueryDropsToCoinsRequest) (*QueryDropAmountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropsToCoins not implemented") +} +func (*UnimplementedQueryServer) DropPairs(ctx context.Context, req *QueryDropPairsRequest) (*QueryDropPairsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropPairs not implemented") +} +func (*UnimplementedQueryServer) DropOwnerPair(ctx context.Context, req *QueryDropOwnerPairRequest) (*QueryDropsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropOwnerPair not implemented") +} +func (*UnimplementedQueryServer) DropAll(ctx context.Context, req *QueryAllDropRequest) (*QueryDropsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropAll not implemented") +} +func (*UnimplementedQueryServer) Member(ctx context.Context, req *QueryGetMemberRequest) (*QueryGetMemberResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Member not implemented") +} +func (*UnimplementedQueryServer) MemberAll(ctx context.Context, req *QueryAllMemberRequest) (*QueryAllMemberResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MemberAll not implemented") +} +func (*UnimplementedQueryServer) Burnings(ctx context.Context, req *QueryGetBurningsRequest) (*QueryGetBurningsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Burnings not implemented") +} +func (*UnimplementedQueryServer) BurningsAll(ctx context.Context, req *QueryAllBurningsRequest) (*QueryAllBurningsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BurningsAll not implemented") +} +func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") +} +func (*UnimplementedQueryServer) OrderAll(ctx context.Context, req *QueryAllOrderRequest) (*QueryOrdersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OrderAll not implemented") +} +func (*UnimplementedQueryServer) OrderOwner(ctx context.Context, req *QueryOrderOwnerRequest) (*QueryOrdersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OrderOwner not implemented") +} +func (*UnimplementedQueryServer) OrderOwnerUids(ctx context.Context, req *QueryOrderOwnerRequest) (*QueryOrderOwnerUidsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OrderOwnerUids not implemented") +} +func (*UnimplementedQueryServer) Book(ctx context.Context, req *QueryBookRequest) (*QueryBookResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Book not implemented") +} +func (*UnimplementedQueryServer) Bookends(ctx context.Context, req *QueryBookendsRequest) (*QueryBookendsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Bookends not implemented") +} +func (*UnimplementedQueryServer) History(ctx context.Context, req *QueryHistoryRequest) (*QueryHistoryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method History not implemented") +} +func (*UnimplementedQueryServer) Quote(ctx context.Context, req *QueryQuoteRequest) (*QueryQuoteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Quote not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Burned_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBurnedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Burned(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Burned", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Burned(ctx, req.(*QueryBurnedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Pool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGetPoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Pool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Pool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Pool(ctx, req.(*QueryGetPoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PoolAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllPoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PoolAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/PoolAll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PoolAll(ctx, req.(*QueryAllPoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Volume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Volume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Volume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Volume(ctx, req.(*QueryVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_VolumeAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).VolumeAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/VolumeAll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).VolumeAll(ctx, req.(*QueryAllVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Drop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDropRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Drop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Drop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Drop(ctx, req.(*QueryDropRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DropAmounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDropAmountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DropAmounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/DropAmounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DropAmounts(ctx, req.(*QueryDropAmountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DropCoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDropCoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DropCoin(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/DropCoin", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DropCoin(ctx, req.(*QueryDropCoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DropsToCoins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDropsToCoinsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DropsToCoins(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/DropsToCoins", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DropsToCoins(ctx, req.(*QueryDropsToCoinsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DropPairs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDropPairsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DropPairs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/DropPairs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DropPairs(ctx, req.(*QueryDropPairsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DropOwnerPair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDropOwnerPairRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DropOwnerPair(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/DropOwnerPair", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DropOwnerPair(ctx, req.(*QueryDropOwnerPairRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DropAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllDropRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DropAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/DropAll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DropAll(ctx, req.(*QueryAllDropRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Member_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGetMemberRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Member(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Member", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Member(ctx, req.(*QueryGetMemberRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_MemberAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllMemberRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).MemberAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/MemberAll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).MemberAll(ctx, req.(*QueryAllMemberRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Burnings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGetBurningsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Burnings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Burnings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Burnings(ctx, req.(*QueryGetBurningsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_BurningsAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllBurningsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).BurningsAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/BurningsAll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).BurningsAll(ctx, req.(*QueryAllBurningsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Order(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Order", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_OrderAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllOrderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).OrderAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/OrderAll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).OrderAll(ctx, req.(*QueryAllOrderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_OrderOwner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrderOwnerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).OrderOwner(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/OrderOwner", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).OrderOwner(ctx, req.(*QueryOrderOwnerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_OrderOwnerUids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrderOwnerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).OrderOwnerUids(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/OrderOwnerUids", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).OrderOwnerUids(ctx, req.(*QueryOrderOwnerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Book_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Book(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Book", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Book(ctx, req.(*QueryBookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Bookends_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBookendsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Bookends(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Bookends", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Bookends(ctx, req.(*QueryBookendsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_History_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryHistoryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).History(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/History", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).History(ctx, req.(*QueryHistoryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Quote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryQuoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Quote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Query/Quote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Quote(ctx, req.(*QueryQuoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pendulumlabs.market.market.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "Burned", + Handler: _Query_Burned_Handler, + }, + { + MethodName: "Pool", + Handler: _Query_Pool_Handler, + }, + { + MethodName: "PoolAll", + Handler: _Query_PoolAll_Handler, + }, + { + MethodName: "Volume", + Handler: _Query_Volume_Handler, + }, + { + MethodName: "VolumeAll", + Handler: _Query_VolumeAll_Handler, + }, + { + MethodName: "Drop", + Handler: _Query_Drop_Handler, + }, + { + MethodName: "DropAmounts", + Handler: _Query_DropAmounts_Handler, + }, + { + MethodName: "DropCoin", + Handler: _Query_DropCoin_Handler, + }, + { + MethodName: "DropsToCoins", + Handler: _Query_DropsToCoins_Handler, + }, + { + MethodName: "DropPairs", + Handler: _Query_DropPairs_Handler, + }, + { + MethodName: "DropOwnerPair", + Handler: _Query_DropOwnerPair_Handler, + }, + { + MethodName: "DropAll", + Handler: _Query_DropAll_Handler, + }, + { + MethodName: "Member", + Handler: _Query_Member_Handler, + }, + { + MethodName: "MemberAll", + Handler: _Query_MemberAll_Handler, + }, + { + MethodName: "Burnings", + Handler: _Query_Burnings_Handler, + }, + { + MethodName: "BurningsAll", + Handler: _Query_BurningsAll_Handler, + }, + { + MethodName: "Order", + Handler: _Query_Order_Handler, + }, + { + MethodName: "OrderAll", + Handler: _Query_OrderAll_Handler, + }, + { + MethodName: "OrderOwner", + Handler: _Query_OrderOwner_Handler, + }, + { + MethodName: "OrderOwnerUids", + Handler: _Query_OrderOwnerUids_Handler, + }, + { + MethodName: "Book", + Handler: _Query_Book_Handler, + }, + { + MethodName: "Bookends", + Handler: _Query_Bookends_Handler, + }, + { + MethodName: "History", + Handler: _Query_History_Handler, + }, + { + MethodName: "Quote", + Handler: _Query_Quote_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "market/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryGetPoolRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetPoolRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetPoolRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryGetPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryAllPoolRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllPoolRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllPoolRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Pool) > 0 { + for iNdEx := len(m.Pool) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pool[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryVolumeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryVolumeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryVolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryVolumeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryVolumeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryVolumeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Amount) > 0 { + i -= len(m.Amount) + copy(dAtA[i:], m.Amount) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Amount))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllVolumeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllVolumeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllVolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllVolumeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllVolumeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllVolumeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryBurnedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBurnedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBurnedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryBurnedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBurnedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBurnedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Amount) > 0 { + i -= len(m.Amount) + copy(dAtA[i:], m.Amount) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Amount))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Uid != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryDropCoinRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropCoinRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropCoinRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AmountA) > 0 { + i -= len(m.AmountA) + copy(dAtA[i:], m.AmountA) + i = encodeVarintQuery(dAtA, i, uint64(len(m.AmountA))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomB) > 0 { + i -= len(m.DenomB) + copy(dAtA[i:], m.DenomB) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomB))) + i-- + dAtA[i] = 0x12 + } + if len(m.DenomA) > 0 { + i -= len(m.DenomA) + copy(dAtA[i:], m.DenomA) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomA))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropCoinResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropCoinResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropCoinResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AmountB) > 0 { + i -= len(m.AmountB) + copy(dAtA[i:], m.AmountB) + i = encodeVarintQuery(dAtA, i, uint64(len(m.AmountB))) + i-- + dAtA[i] = 0x12 + } + if len(m.Drops) > 0 { + i -= len(m.Drops) + copy(dAtA[i:], m.Drops) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Drops))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Drop.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryDropAmountsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropAmountsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropAmountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Uid != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryDropAmountsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropAmountsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropAmountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Amount2) > 0 { + i -= len(m.Amount2) + copy(dAtA[i:], m.Amount2) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Amount2))) + i-- + dAtA[i] = 0x22 + } + if len(m.Amount1) > 0 { + i -= len(m.Amount1) + copy(dAtA[i:], m.Amount1) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Amount1))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom2) > 0 { + i -= len(m.Denom2) + copy(dAtA[i:], m.Denom2) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom2))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom1) > 0 { + i -= len(m.Denom1) + copy(dAtA[i:], m.Denom1) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom1))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropsToCoinsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropsToCoinsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropsToCoinsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drops) > 0 { + i -= len(m.Drops) + copy(dAtA[i:], m.Drops) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Drops))) + i-- + dAtA[i] = 0x12 + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropPairsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropPairsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropPairsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropPairsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropPairsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropPairsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pairs) > 0 { + for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Pairs[iNdEx]) + copy(dAtA[i:], m.Pairs[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pairs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryDropOwnerPairRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropOwnerPairRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropOwnerPairRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropOwnerPairSumRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropOwnerPairSumRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropOwnerPairSumRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropOwnerPairSumResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropOwnerPairSumResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropOwnerPairSumResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sum) > 0 { + i -= len(m.Sum) + copy(dAtA[i:], m.Sum) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Sum))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropOwnerPairUidsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropOwnerPairUidsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropOwnerPairUidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryUidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryUidsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryUidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Uids) > 0 { + dAtA12 := make([]byte, len(m.Uids)*10) + var j11 int + for _, num := range m.Uids { + for num >= 1<<7 { + dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j11++ + } + dAtA12[j11] = uint8(num) + j11++ + } + i -= j11 + copy(dAtA[i:], dAtA12[:j11]) + i = encodeVarintQuery(dAtA, i, uint64(j11)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropOwnerPairDetailRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropOwnerPairDetailRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropOwnerPairDetailRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllDropRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllDropRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllDropRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDropsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDropsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDropsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Drops) > 0 { + for iNdEx := len(m.Drops) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drops[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryGetMemberRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetMemberRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetMemberRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DenomB) > 0 { + i -= len(m.DenomB) + copy(dAtA[i:], m.DenomB) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomB))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomA) > 0 { + i -= len(m.DenomA) + copy(dAtA[i:], m.DenomA) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomA))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *QueryGetMemberResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetMemberResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetMemberResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryAllMemberRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllMemberRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllMemberRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllMemberResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllMemberResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllMemberResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Member) > 0 { + for iNdEx := len(m.Member) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Member[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryGetBurningsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetBurningsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetBurningsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryGetBurningsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetBurningsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetBurningsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Burnings.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryAllBurningsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllBurningsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllBurningsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllBurningsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllBurningsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllBurningsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Burnings) > 0 { + for iNdEx := len(m.Burnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Burnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Uid != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Orders) > 0 { + for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllOrderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllOrderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderOwnerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderOwnerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderOwnerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderOwnerUidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderOwnerUidsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderOwnerUidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Orders.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryOrderOwnerPairRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderOwnerPairRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderOwnerPairRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderOwnerPairResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderOwnerPairResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderOwnerPairResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Order) > 0 { + for iNdEx := len(m.Order) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Order[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryBookRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBookRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBookRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.OrderType) > 0 { + i -= len(m.OrderType) + copy(dAtA[i:], m.OrderType) + i = encodeVarintQuery(dAtA, i, uint64(len(m.OrderType))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomB) > 0 { + i -= len(m.DenomB) + copy(dAtA[i:], m.DenomB) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomB))) + i-- + dAtA[i] = 0x12 + } + if len(m.DenomA) > 0 { + i -= len(m.DenomA) + copy(dAtA[i:], m.DenomA) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomA))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryBookResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBookResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBookResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Book) > 0 { + for iNdEx := len(m.Book) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Book[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryBookendsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBookendsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBookendsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rate) > 0 { + for iNdEx := len(m.Rate) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Rate[iNdEx]) + copy(dAtA[i:], m.Rate[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Rate[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.OrderType) > 0 { + i -= len(m.OrderType) + copy(dAtA[i:], m.OrderType) + i = encodeVarintQuery(dAtA, i, uint64(len(m.OrderType))) + i-- + dAtA[i] = 0x1a + } + if len(m.CoinB) > 0 { + i -= len(m.CoinB) + copy(dAtA[i:], m.CoinB) + i = encodeVarintQuery(dAtA, i, uint64(len(m.CoinB))) + i-- + dAtA[i] = 0x12 + } + if len(m.CoinA) > 0 { + i -= len(m.CoinA) + copy(dAtA[i:], m.CoinA) + i = encodeVarintQuery(dAtA, i, uint64(len(m.CoinA))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryBookendsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBookendsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBookendsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Next != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Next)) + i-- + dAtA[i] = 0x30 + } + if m.Prev != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Prev)) + i-- + dAtA[i] = 0x28 + } + if len(m.Rate) > 0 { + for iNdEx := len(m.Rate) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Rate[iNdEx]) + copy(dAtA[i:], m.Rate[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Rate[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.OrderType) > 0 { + i -= len(m.OrderType) + copy(dAtA[i:], m.OrderType) + i = encodeVarintQuery(dAtA, i, uint64(len(m.OrderType))) + i-- + dAtA[i] = 0x1a + } + if len(m.CoinB) > 0 { + i -= len(m.CoinB) + copy(dAtA[i:], m.CoinB) + i = encodeVarintQuery(dAtA, i, uint64(len(m.CoinB))) + i-- + dAtA[i] = 0x12 + } + if len(m.CoinA) > 0 { + i -= len(m.CoinA) + copy(dAtA[i:], m.CoinA) + i = encodeVarintQuery(dAtA, i, uint64(len(m.CoinA))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryHistoryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryHistoryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Length) > 0 { + i -= len(m.Length) + copy(dAtA[i:], m.Length) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Length))) + i-- + dAtA[i] = 0x12 + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryHistoryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryHistoryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryHistoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.History) > 0 { + for iNdEx := len(m.History) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.History[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryQuoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryQuoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryQuoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Amount) > 0 { + i -= len(m.Amount) + copy(dAtA[i:], m.Amount) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Amount))) + i-- + dAtA[i] = 0x22 + } + if len(m.DenomAmount) > 0 { + i -= len(m.DenomAmount) + copy(dAtA[i:], m.DenomAmount) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomAmount))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomBid) > 0 { + i -= len(m.DenomBid) + copy(dAtA[i:], m.DenomBid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomBid))) + i-- + dAtA[i] = 0x12 + } + if len(m.DenomAsk) > 0 { + i -= len(m.DenomAsk) + copy(dAtA[i:], m.DenomAsk) + i = encodeVarintQuery(dAtA, i, uint64(len(m.DenomAsk))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryQuoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryQuoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryQuoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Amount) > 0 { + i -= len(m.Amount) + copy(dAtA[i:], m.Amount) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Amount))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryGetPoolRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryGetPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Pool.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryAllPoolRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pool) > 0 { + for _, e := range m.Pool { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryVolumeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryVolumeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllVolumeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllVolumeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBurnedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryBurnedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovQuery(uint64(m.Uid)) + } + return n +} + +func (m *QueryDropCoinRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DenomA) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.DenomB) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.AmountA) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropCoinResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Drops) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.AmountB) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Drop.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryDropAmountsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovQuery(uint64(m.Uid)) + } + return n +} + +func (m *QueryDropAmountsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom1) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Denom2) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Amount1) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Amount2) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropsToCoinsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Drops) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropPairsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropPairsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pairs) > 0 { + for _, s := range m.Pairs { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryDropOwnerPairRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropOwnerPairSumRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropOwnerPairSumResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sum) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropOwnerPairUidsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryUidsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Uids) > 0 { + l = 0 + for _, e := range m.Uids { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropOwnerPairDetailRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllDropRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDropsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Drops) > 0 { + for _, e := range m.Drops { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryGetMemberRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DenomA) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.DenomB) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryGetMemberResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Member.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryAllMemberRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllMemberResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Member) > 0 { + for _, e := range m.Member { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryGetBurningsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryGetBurningsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Burnings.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryAllBurningsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllBurningsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Burnings) > 0 { + for _, e := range m.Burnings { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovQuery(uint64(m.Uid)) + } + return n +} + +func (m *QueryOrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Order.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryOrdersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Orders) > 0 { + for _, e := range m.Orders { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllOrderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderOwnerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderOwnerUidsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Orders.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderOwnerPairRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderOwnerPairResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Order) > 0 { + for _, e := range m.Order { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBookRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DenomA) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.DenomB) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.OrderType) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBookResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Book) > 0 { + for _, e := range m.Book { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBookendsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CoinA) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.CoinB) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.OrderType) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Rate) > 0 { + for _, s := range m.Rate { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryBookendsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CoinA) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.CoinB) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.OrderType) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Rate) > 0 { + for _, s := range m.Rate { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Prev != 0 { + n += 1 + sovQuery(uint64(m.Prev)) + } + if m.Next != 0 { + n += 1 + sovQuery(uint64(m.Next)) + } + return n +} + +func (m *QueryHistoryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Length) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryHistoryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.History) > 0 { + for _, e := range m.History { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryQuoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DenomAsk) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.DenomBid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.DenomAmount) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryQuoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetPoolRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetPoolRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetPoolRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllPoolRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllPoolRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllPoolRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = append(m.Pool, Pool{}) + if err := m.Pool[len(m.Pool)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryVolumeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryVolumeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryVolumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryVolumeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryVolumeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryVolumeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllVolumeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllVolumeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllVolumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllVolumeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllVolumeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllVolumeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBurnedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBurnedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBurnedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBurnedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBurnedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBurnedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropCoinRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropCoinRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropCoinRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AmountA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AmountA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropCoinResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropCoinResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropCoinResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drops = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AmountB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AmountB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Drop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropAmountsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropAmountsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropAmountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropAmountsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropAmountsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropAmountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom1", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom1 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom2", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom2 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount1", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount1 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount2", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount2 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropsToCoinsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropsToCoinsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropsToCoinsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drops = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropPairsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropPairsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropPairsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropPairsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropPairsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropPairsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pairs = append(m.Pairs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropOwnerPairRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropOwnerPairRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropOwnerPairRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropOwnerPairSumRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropOwnerPairSumRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropOwnerPairSumRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropOwnerPairSumResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropOwnerPairSumResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropOwnerPairSumResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sum = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropOwnerPairUidsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropOwnerPairUidsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropOwnerPairUidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryUidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryUidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryUidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Uids = append(m.Uids, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Uids) == 0 { + m.Uids = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Uids = append(m.Uids, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Uids", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropOwnerPairDetailRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropOwnerPairDetailRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropOwnerPairDetailRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllDropRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllDropRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllDropRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDropsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDropsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDropsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drops = append(m.Drops, Drop{}) + if err := m.Drops[len(m.Drops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetMemberRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetMemberRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetMemberRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetMemberResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetMemberResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetMemberResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllMemberRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllMemberRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllMemberRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllMemberResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllMemberResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllMemberResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Member = append(m.Member, Member{}) + if err := m.Member[len(m.Member)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetBurningsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetBurningsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetBurningsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetBurningsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetBurningsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetBurningsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Burnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Burnings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllBurningsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllBurningsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllBurningsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllBurningsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllBurningsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllBurningsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Burnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Burnings = append(m.Burnings, Burnings{}) + if err := m.Burnings[len(m.Burnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Orders = append(m.Orders, Order{}) + if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllOrderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllOrderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderOwnerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderOwnerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderOwnerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderOwnerUidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderOwnerUidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderOwnerUidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Orders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderOwnerPairRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderOwnerPairRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderOwnerPairRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderOwnerPairResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderOwnerPairResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderOwnerPairResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Order = append(m.Order, Order{}) + if err := m.Order[len(m.Order)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBookRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBookRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBookResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBookResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Book", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Book = append(m.Book, OrderResponse{}) + if err := m.Book[len(m.Book)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBookendsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBookendsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBookendsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoinA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoinA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoinB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoinB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rate = append(m.Rate, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBookendsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBookendsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBookendsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoinA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoinA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoinB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoinB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rate = append(m.Rate, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prev", wireType) + } + m.Prev = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Prev |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Next", wireType) + } + m.Next = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Next |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryHistoryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryHistoryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Length = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryHistoryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryHistoryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryHistoryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field History", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.History = append(m.History, OrderResponse{}) + if err := m.History[len(m.History)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryParamsRequest) Size() (n int) { - if m == nil { - return 0 } - var l int - _ = l - return n -} -func (m *QueryParamsResponse) Size() (n int) { - if m == nil { - return 0 + if iNdEx > l { + return io.ErrUnexpectedEOF } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { +func (m *QueryQuoteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -340,12 +12539,140 @@ func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryQuoteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryQuoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomAmount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomAmount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -367,7 +12694,7 @@ func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { +func (m *QueryQuoteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -390,17 +12717,17 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryQuoteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryQuoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -410,24 +12737,55 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Amount = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/x/market/types/query.pb.gw.go b/x/market/types/query.pb.gw.go index d39391b3..815e74a7 100644 --- a/x/market/types/query.pb.gw.go +++ b/x/market/types/query.pb.gw.go @@ -51,77 +51,2497 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } +func request_Query_Burned_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBurnedRequest + var metadata runtime.ServerMetadata + + msg, err := client.Burned(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Burned_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBurnedRequest + var metadata runtime.ServerMetadata + + msg, err := server.Burned(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Pool_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetPoolRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + msg, err := client.Pool(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Pool_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetPoolRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + msg, err := server.Pool(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_PoolAll_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_PoolAll_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllPoolRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PoolAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.PoolAll(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PoolAll_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllPoolRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PoolAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.PoolAll(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Volume_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryVolumeRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denom"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denom") + } + + protoReq.Denom, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denom", err) + } + + msg, err := client.Volume(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Volume_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryVolumeRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denom"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denom") + } + + protoReq.Denom, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denom", err) + } + + msg, err := server.Volume(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_VolumeAll_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_VolumeAll_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllVolumeRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_VolumeAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.VolumeAll(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_VolumeAll_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllVolumeRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_VolumeAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.VolumeAll(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Drop_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uid") + } + + protoReq.Uid, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uid", err) + } + + msg, err := client.Drop(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Drop_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uid") + } + + protoReq.Uid, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uid", err) + } + + msg, err := server.Drop(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_DropAmounts_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropAmountsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uid") + } + + protoReq.Uid, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uid", err) + } + + msg, err := client.DropAmounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DropAmounts_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropAmountsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uid") + } + + protoReq.Uid, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uid", err) + } + + msg, err := server.DropAmounts(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_DropCoin_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropCoinRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomA") + } + + protoReq.DenomA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomA", err) + } + + val, ok = pathParams["denomB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomB") + } + + protoReq.DenomB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomB", err) + } + + val, ok = pathParams["amountA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amountA") + } + + protoReq.AmountA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amountA", err) + } + + msg, err := client.DropCoin(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DropCoin_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropCoinRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomA") + } + + protoReq.DenomA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomA", err) + } + + val, ok = pathParams["denomB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomB") + } + + protoReq.DenomB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomB", err) + } + + val, ok = pathParams["amountA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amountA") + } + + protoReq.AmountA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amountA", err) + } + + msg, err := server.DropCoin(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_DropsToCoins_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropsToCoinsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + val, ok = pathParams["drops"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "drops") + } + + protoReq.Drops, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "drops", err) + } + + msg, err := client.DropsToCoins(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DropsToCoins_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropsToCoinsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + val, ok = pathParams["drops"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "drops") + } + + protoReq.Drops, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "drops", err) + } + + msg, err := server.DropsToCoins(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_DropPairs_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropPairsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := client.DropPairs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DropPairs_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropPairsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := server.DropPairs(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_DropOwnerPair_0 = &utilities.DoubleArray{Encoding: map[string]int{"address": 0, "pair": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_Query_DropOwnerPair_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropOwnerPairRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DropOwnerPair_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DropOwnerPair(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DropOwnerPair_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDropOwnerPairRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DropOwnerPair_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DropOwnerPair(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_DropAll_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_DropAll_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllDropRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DropAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DropAll(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DropAll_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllDropRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DropAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DropAll(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Member_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetMemberRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomA") + } + + protoReq.DenomA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomA", err) + } + + val, ok = pathParams["denomB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomB") + } + + protoReq.DenomB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomB", err) + } + + msg, err := client.Member(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Member_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetMemberRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomA") + } + + protoReq.DenomA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomA", err) + } + + val, ok = pathParams["denomB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomB") + } + + protoReq.DenomB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomB", err) + } + + msg, err := server.Member(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_MemberAll_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_MemberAll_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllMemberRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_MemberAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MemberAll(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_MemberAll_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllMemberRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_MemberAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.MemberAll(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Burnings_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetBurningsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denom"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denom") + } + + protoReq.Denom, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denom", err) + } + + msg, err := client.Burnings(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Burnings_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetBurningsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denom"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denom") + } + + protoReq.Denom, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denom", err) + } + + msg, err := server.Burnings(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_BurningsAll_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_BurningsAll_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllBurningsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BurningsAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BurningsAll(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_BurningsAll_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllBurningsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BurningsAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BurningsAll(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uid") + } + + protoReq.Uid, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uid", err) + } + + msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uid") + } + + protoReq.Uid, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uid", err) + } + + msg, err := server.Order(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_OrderAll_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_OrderAll_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllOrderRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_OrderAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.OrderAll(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_OrderAll_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllOrderRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_OrderAll_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.OrderAll(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_OrderOwner_0 = &utilities.DoubleArray{Encoding: map[string]int{"address": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_OrderOwner_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderOwnerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_OrderOwner_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.OrderOwner(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_OrderOwner_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderOwnerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_OrderOwner_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.OrderOwner(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_OrderOwnerUids_0 = &utilities.DoubleArray{Encoding: map[string]int{"address": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_OrderOwnerUids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderOwnerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_OrderOwnerUids_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.OrderOwnerUids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_OrderOwnerUids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderOwnerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_OrderOwnerUids_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.OrderOwnerUids(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Book_0 = &utilities.DoubleArray{Encoding: map[string]int{"denomA": 0, "denomB": 1, "orderType": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}} +) + +func request_Query_Book_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBookRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomA") + } + + protoReq.DenomA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomA", err) + } + + val, ok = pathParams["denomB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomB") + } + + protoReq.DenomB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomB", err) + } + + val, ok = pathParams["orderType"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "orderType") + } + + protoReq.OrderType, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "orderType", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Book_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Book(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Book_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBookRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomA") + } + + protoReq.DenomA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomA", err) + } + + val, ok = pathParams["denomB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomB") + } + + protoReq.DenomB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomB", err) + } + + val, ok = pathParams["orderType"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "orderType") + } + + protoReq.OrderType, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "orderType", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Book_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Book(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Bookends_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBookendsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["coinA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "coinA") + } + + protoReq.CoinA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "coinA", err) + } + + val, ok = pathParams["coinB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "coinB") + } + + protoReq.CoinB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "coinB", err) + } + + val, ok = pathParams["orderType"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "orderType") + } + + protoReq.OrderType, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "orderType", err) + } + + val, ok = pathParams["rate"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "rate") + } + + protoReq.Rate, err = runtime.StringSlice(val, ",") + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "rate", err) + } + + msg, err := client.Bookends(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Bookends_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBookendsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["coinA"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "coinA") + } + + protoReq.CoinA, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "coinA", err) + } + + val, ok = pathParams["coinB"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "coinB") + } + + protoReq.CoinB, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "coinB", err) + } + + val, ok = pathParams["orderType"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "orderType") + } + + protoReq.OrderType, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "orderType", err) + } + + val, ok = pathParams["rate"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "rate") + } + + protoReq.Rate, err = runtime.StringSlice(val, ",") + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "rate", err) + } + + msg, err := server.Bookends(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_History_0 = &utilities.DoubleArray{Encoding: map[string]int{"pair": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_History_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryHistoryRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_History_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.History(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_History_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryHistoryRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pair"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pair") + } + + protoReq.Pair, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pair", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_History_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.History(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Quote_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryQuoteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomBid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomBid") + } + + protoReq.DenomBid, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomBid", err) + } + + val, ok = pathParams["denomAsk"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomAsk") + } + + protoReq.DenomAsk, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomAsk", err) + } + + val, ok = pathParams["denomAmount"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomAmount") + } + + protoReq.DenomAmount, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomAmount", err) + } + + val, ok = pathParams["amount"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amount") + } + + protoReq.Amount, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amount", err) + } + + msg, err := client.Quote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Quote_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryQuoteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["denomBid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomBid") + } + + protoReq.DenomBid, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomBid", err) + } + + val, ok = pathParams["denomAsk"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomAsk") + } + + protoReq.DenomAsk, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomAsk", err) + } + + val, ok = pathParams["denomAmount"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "denomAmount") + } + + protoReq.DenomAmount, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "denomAmount", err) + } + + val, ok = pathParams["amount"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amount") + } + + protoReq.Amount, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amount", err) + } + + msg, err := server.Quote(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Burned_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Burned_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Burned_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Pool_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Pool_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Pool_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PoolAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PoolAll_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PoolAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Volume_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Volume_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Volume_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_VolumeAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_VolumeAll_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_VolumeAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Drop_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Drop_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Drop_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropAmounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DropAmounts_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropAmounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropCoin_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DropCoin_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropCoin_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropsToCoins_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DropsToCoins_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropsToCoins_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropPairs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DropPairs_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropPairs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropOwnerPair_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DropOwnerPair_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropOwnerPair_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DropAll_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Member_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Member_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Member_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_MemberAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_MemberAll_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_MemberAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Burnings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Burnings_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Burnings_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_BurningsAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_BurningsAll_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_BurningsAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_OrderAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_OrderAll_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_OrderAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_OrderOwner_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_OrderOwner_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_OrderOwner_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_OrderOwnerUids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_OrderOwnerUids_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_OrderOwnerUids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Book_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Book_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Book_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bookends_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Bookends_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bookends_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_History_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_History_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_History_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Quote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Quote_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Quote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Burned_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Burned_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Burned_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Pool_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Pool_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Pool_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PoolAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PoolAll_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PoolAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Volume_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Volume_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Volume_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_VolumeAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_VolumeAll_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_VolumeAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Drop_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Drop_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Drop_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropAmounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DropAmounts_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropAmounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropCoin_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DropCoin_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropCoin_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropsToCoins_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DropsToCoins_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropsToCoins_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropPairs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DropPairs_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropPairs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropOwnerPair_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DropOwnerPair_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropOwnerPair_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DropAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DropAll_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DropAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Member_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + resp, md, err := request_Query_Member_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_Member_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - return nil -} + mux.Handle("GET", pattern_Query_MemberAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_MemberAll_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { + forward_Query_MemberAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Burnings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Burnings_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - return RegisterQueryHandler(ctx, mux, conn) -} + forward_Query_Burnings_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} + }) -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + mux.Handle("GET", pattern_Query_BurningsAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_BurningsAll_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } - mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + forward_Query_BurningsAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -130,14 +2550,154 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_OrderAll_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_OrderAll_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_OrderAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_OrderOwner_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_OrderOwner_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_OrderOwner_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_OrderOwnerUids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_OrderOwnerUids_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_OrderOwnerUids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Book_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Book_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Book_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bookends_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Bookends_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bookends_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_History_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_History_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_History_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Quote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Quote_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Quote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -145,9 +2705,105 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } var ( - pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 0, 2, 1}, []string{"market", "params"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "params"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Burned_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "burned"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Pool_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "pool", "pair"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_PoolAll_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "pool"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Volume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "volume", "denom"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_VolumeAll_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "volume"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Drop_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "drop", "uid"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DropAmounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"pendulum-labs", "market", "drop", "amounts", "uid"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DropCoin_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5, 1, 0, 4, 1, 5, 6}, []string{"pendulum-labs", "market", "drop", "coin", "denomA", "denomB", "amountA"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DropsToCoins_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"pendulum-labs", "market", "drop", "coins", "pair", "drops"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DropPairs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"pendulum-labs", "market", "drop", "pairs", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DropOwnerPair_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"pendulum-labs", "market", "drop", "address", "pair"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DropAll_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "drop"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Member_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"pendulum-labs", "market", "member", "denomA", "denomB"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_MemberAll_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "member"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Burnings_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "burnings", "denom"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_BurningsAll_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "burnings"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "order", "uid"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_OrderAll_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2}, []string{"pendulum-labs", "market", "order"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_OrderOwner_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "order", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_OrderOwnerUids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"pendulum-labs", "market", "order", "uids", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Book_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"pendulum-labs", "market", "book", "denomA", "denomB", "orderType"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Bookends_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5, 1, 0, 4, 1, 5, 6}, []string{"pendulum-labs", "market", "bookends", "coinA", "coinB", "orderType", "rate"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_History_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"pendulum-labs", "market", "history", "pair"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Quote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5, 1, 0, 4, 1, 5, 6}, []string{"pendulum-labs", "market", "quote", "denomBid", "denomAsk", "denomAmount", "amount"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( forward_Query_Params_0 = runtime.ForwardResponseMessage + + forward_Query_Burned_0 = runtime.ForwardResponseMessage + + forward_Query_Pool_0 = runtime.ForwardResponseMessage + + forward_Query_PoolAll_0 = runtime.ForwardResponseMessage + + forward_Query_Volume_0 = runtime.ForwardResponseMessage + + forward_Query_VolumeAll_0 = runtime.ForwardResponseMessage + + forward_Query_Drop_0 = runtime.ForwardResponseMessage + + forward_Query_DropAmounts_0 = runtime.ForwardResponseMessage + + forward_Query_DropCoin_0 = runtime.ForwardResponseMessage + + forward_Query_DropsToCoins_0 = runtime.ForwardResponseMessage + + forward_Query_DropPairs_0 = runtime.ForwardResponseMessage + + forward_Query_DropOwnerPair_0 = runtime.ForwardResponseMessage + + forward_Query_DropAll_0 = runtime.ForwardResponseMessage + + forward_Query_Member_0 = runtime.ForwardResponseMessage + + forward_Query_MemberAll_0 = runtime.ForwardResponseMessage + + forward_Query_Burnings_0 = runtime.ForwardResponseMessage + + forward_Query_BurningsAll_0 = runtime.ForwardResponseMessage + + forward_Query_Order_0 = runtime.ForwardResponseMessage + + forward_Query_OrderAll_0 = runtime.ForwardResponseMessage + + forward_Query_OrderOwner_0 = runtime.ForwardResponseMessage + + forward_Query_OrderOwnerUids_0 = runtime.ForwardResponseMessage + + forward_Query_Book_0 = runtime.ForwardResponseMessage + + forward_Query_Bookends_0 = runtime.ForwardResponseMessage + + forward_Query_History_0 = runtime.ForwardResponseMessage + + forward_Query_Quote_0 = runtime.ForwardResponseMessage ) diff --git a/x/market/types/tx.pb.go b/x/market/types/tx.pb.go index 51215933..6c657ea6 100644 --- a/x/market/types/tx.pb.go +++ b/x/market/types/tx.pb.go @@ -9,7 +9,11 @@ import ( grpc1 "github.com/gogo/protobuf/grpc" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" math "math" + math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -23,17 +27,716 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type MsgCreatePool struct { + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + CoinA string `protobuf:"bytes,2,opt,name=coinA,proto3" json:"coinA,omitempty"` + CoinB string `protobuf:"bytes,3,opt,name=coinB,proto3" json:"coinB,omitempty"` +} + +func (m *MsgCreatePool) Reset() { *m = MsgCreatePool{} } +func (m *MsgCreatePool) String() string { return proto.CompactTextString(m) } +func (*MsgCreatePool) ProtoMessage() {} +func (*MsgCreatePool) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{0} +} +func (m *MsgCreatePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreatePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreatePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreatePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreatePool.Merge(m, src) +} +func (m *MsgCreatePool) XXX_Size() int { + return m.Size() +} +func (m *MsgCreatePool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreatePool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreatePool proto.InternalMessageInfo + +func (m *MsgCreatePool) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgCreatePool) GetCoinA() string { + if m != nil { + return m.CoinA + } + return "" +} + +func (m *MsgCreatePool) GetCoinB() string { + if m != nil { + return m.CoinB + } + return "" +} + +type MsgCreatePoolResponse struct { +} + +func (m *MsgCreatePoolResponse) Reset() { *m = MsgCreatePoolResponse{} } +func (m *MsgCreatePoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreatePoolResponse) ProtoMessage() {} +func (*MsgCreatePoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{1} +} +func (m *MsgCreatePoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreatePoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreatePoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreatePoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreatePoolResponse.Merge(m, src) +} +func (m *MsgCreatePoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreatePoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreatePoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreatePoolResponse proto.InternalMessageInfo + +type MsgCreateDrop struct { + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + Pair string `protobuf:"bytes,2,opt,name=pair,proto3" json:"pair,omitempty"` + Drops string `protobuf:"bytes,3,opt,name=drops,proto3" json:"drops,omitempty"` +} + +func (m *MsgCreateDrop) Reset() { *m = MsgCreateDrop{} } +func (m *MsgCreateDrop) String() string { return proto.CompactTextString(m) } +func (*MsgCreateDrop) ProtoMessage() {} +func (*MsgCreateDrop) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{2} +} +func (m *MsgCreateDrop) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateDrop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateDrop.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateDrop) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateDrop.Merge(m, src) +} +func (m *MsgCreateDrop) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateDrop) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateDrop.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateDrop proto.InternalMessageInfo + +func (m *MsgCreateDrop) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgCreateDrop) GetPair() string { + if m != nil { + return m.Pair + } + return "" +} + +func (m *MsgCreateDrop) GetDrops() string { + if m != nil { + return m.Drops + } + return "" +} + +type MsgCreateDropResponse struct { +} + +func (m *MsgCreateDropResponse) Reset() { *m = MsgCreateDropResponse{} } +func (m *MsgCreateDropResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateDropResponse) ProtoMessage() {} +func (*MsgCreateDropResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{3} +} +func (m *MsgCreateDropResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateDropResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateDropResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateDropResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateDropResponse.Merge(m, src) +} +func (m *MsgCreateDropResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateDropResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateDropResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateDropResponse proto.InternalMessageInfo + +type MsgRedeemDrop struct { + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (m *MsgRedeemDrop) Reset() { *m = MsgRedeemDrop{} } +func (m *MsgRedeemDrop) String() string { return proto.CompactTextString(m) } +func (*MsgRedeemDrop) ProtoMessage() {} +func (*MsgRedeemDrop) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{4} +} +func (m *MsgRedeemDrop) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRedeemDrop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRedeemDrop.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRedeemDrop) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRedeemDrop.Merge(m, src) +} +func (m *MsgRedeemDrop) XXX_Size() int { + return m.Size() +} +func (m *MsgRedeemDrop) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRedeemDrop.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRedeemDrop proto.InternalMessageInfo + +func (m *MsgRedeemDrop) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgRedeemDrop) GetUid() string { + if m != nil { + return m.Uid + } + return "" +} + +type MsgRedeemDropResponse struct { +} + +func (m *MsgRedeemDropResponse) Reset() { *m = MsgRedeemDropResponse{} } +func (m *MsgRedeemDropResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRedeemDropResponse) ProtoMessage() {} +func (*MsgRedeemDropResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{5} +} +func (m *MsgRedeemDropResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRedeemDropResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRedeemDropResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRedeemDropResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRedeemDropResponse.Merge(m, src) +} +func (m *MsgRedeemDropResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRedeemDropResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRedeemDropResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRedeemDropResponse proto.InternalMessageInfo + +type MsgCreateOrder struct { + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + DenomAsk string `protobuf:"bytes,2,opt,name=denomAsk,proto3" json:"denomAsk,omitempty"` + DenomBid string `protobuf:"bytes,3,opt,name=denomBid,proto3" json:"denomBid,omitempty"` + OrderType string `protobuf:"bytes,4,opt,name=orderType,proto3" json:"orderType,omitempty"` + Amount string `protobuf:"bytes,5,opt,name=amount,proto3" json:"amount,omitempty"` + Rate []string `protobuf:"bytes,6,rep,name=rate,proto3" json:"rate,omitempty"` + Prev string `protobuf:"bytes,7,opt,name=prev,proto3" json:"prev,omitempty"` + Next string `protobuf:"bytes,8,opt,name=next,proto3" json:"next,omitempty"` +} + +func (m *MsgCreateOrder) Reset() { *m = MsgCreateOrder{} } +func (m *MsgCreateOrder) String() string { return proto.CompactTextString(m) } +func (*MsgCreateOrder) ProtoMessage() {} +func (*MsgCreateOrder) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{6} +} +func (m *MsgCreateOrder) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateOrder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateOrder.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateOrder) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateOrder.Merge(m, src) +} +func (m *MsgCreateOrder) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateOrder) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateOrder.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateOrder proto.InternalMessageInfo + +func (m *MsgCreateOrder) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgCreateOrder) GetDenomAsk() string { + if m != nil { + return m.DenomAsk + } + return "" +} + +func (m *MsgCreateOrder) GetDenomBid() string { + if m != nil { + return m.DenomBid + } + return "" +} + +func (m *MsgCreateOrder) GetOrderType() string { + if m != nil { + return m.OrderType + } + return "" +} + +func (m *MsgCreateOrder) GetAmount() string { + if m != nil { + return m.Amount + } + return "" +} + +func (m *MsgCreateOrder) GetRate() []string { + if m != nil { + return m.Rate + } + return nil +} + +func (m *MsgCreateOrder) GetPrev() string { + if m != nil { + return m.Prev + } + return "" +} + +func (m *MsgCreateOrder) GetNext() string { + if m != nil { + return m.Next + } + return "" +} + +type MsgCreateOrderResponse struct { + Uid uint64 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (m *MsgCreateOrderResponse) Reset() { *m = MsgCreateOrderResponse{} } +func (m *MsgCreateOrderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateOrderResponse) ProtoMessage() {} +func (*MsgCreateOrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{7} +} +func (m *MsgCreateOrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateOrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateOrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateOrderResponse.Merge(m, src) +} +func (m *MsgCreateOrderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateOrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateOrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateOrderResponse proto.InternalMessageInfo + +func (m *MsgCreateOrderResponse) GetUid() uint64 { + if m != nil { + return m.Uid + } + return 0 +} + +type MsgCancelOrder struct { + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (m *MsgCancelOrder) Reset() { *m = MsgCancelOrder{} } +func (m *MsgCancelOrder) String() string { return proto.CompactTextString(m) } +func (*MsgCancelOrder) ProtoMessage() {} +func (*MsgCancelOrder) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{8} +} +func (m *MsgCancelOrder) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCancelOrder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCancelOrder.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCancelOrder) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCancelOrder.Merge(m, src) +} +func (m *MsgCancelOrder) XXX_Size() int { + return m.Size() +} +func (m *MsgCancelOrder) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCancelOrder.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCancelOrder proto.InternalMessageInfo + +func (m *MsgCancelOrder) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgCancelOrder) GetUid() string { + if m != nil { + return m.Uid + } + return "" +} + +type MsgCancelOrderResponse struct { +} + +func (m *MsgCancelOrderResponse) Reset() { *m = MsgCancelOrderResponse{} } +func (m *MsgCancelOrderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCancelOrderResponse) ProtoMessage() {} +func (*MsgCancelOrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{9} +} +func (m *MsgCancelOrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCancelOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCancelOrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCancelOrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCancelOrderResponse.Merge(m, src) +} +func (m *MsgCancelOrderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCancelOrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCancelOrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCancelOrderResponse proto.InternalMessageInfo + +type MsgMarketOrder struct { + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + DenomAsk string `protobuf:"bytes,2,opt,name=denomAsk,proto3" json:"denomAsk,omitempty"` + AmountAsk string `protobuf:"bytes,3,opt,name=amountAsk,proto3" json:"amountAsk,omitempty"` + DenomBid string `protobuf:"bytes,4,opt,name=denomBid,proto3" json:"denomBid,omitempty"` + AmountBid string `protobuf:"bytes,5,opt,name=amountBid,proto3" json:"amountBid,omitempty"` + //Slippage is percentage based on (parameter / 10000), 9999 representing as 99.99% + Slippage string `protobuf:"bytes,6,opt,name=slippage,proto3" json:"slippage,omitempty"` +} + +func (m *MsgMarketOrder) Reset() { *m = MsgMarketOrder{} } +func (m *MsgMarketOrder) String() string { return proto.CompactTextString(m) } +func (*MsgMarketOrder) ProtoMessage() {} +func (*MsgMarketOrder) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{10} +} +func (m *MsgMarketOrder) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgMarketOrder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgMarketOrder.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgMarketOrder) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgMarketOrder.Merge(m, src) +} +func (m *MsgMarketOrder) XXX_Size() int { + return m.Size() +} +func (m *MsgMarketOrder) XXX_DiscardUnknown() { + xxx_messageInfo_MsgMarketOrder.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgMarketOrder proto.InternalMessageInfo + +func (m *MsgMarketOrder) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgMarketOrder) GetDenomAsk() string { + if m != nil { + return m.DenomAsk + } + return "" +} + +func (m *MsgMarketOrder) GetAmountAsk() string { + if m != nil { + return m.AmountAsk + } + return "" +} + +func (m *MsgMarketOrder) GetDenomBid() string { + if m != nil { + return m.DenomBid + } + return "" +} + +func (m *MsgMarketOrder) GetAmountBid() string { + if m != nil { + return m.AmountBid + } + return "" +} + +func (m *MsgMarketOrder) GetSlippage() string { + if m != nil { + return m.Slippage + } + return "" +} + +type MsgMarketOrderResponse struct { + AmountBid string `protobuf:"bytes,1,opt,name=amountBid,proto3" json:"amountBid,omitempty"` + AmountAsk string `protobuf:"bytes,2,opt,name=amountAsk,proto3" json:"amountAsk,omitempty"` + Slippage string `protobuf:"bytes,3,opt,name=slippage,proto3" json:"slippage,omitempty"` +} + +func (m *MsgMarketOrderResponse) Reset() { *m = MsgMarketOrderResponse{} } +func (m *MsgMarketOrderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgMarketOrderResponse) ProtoMessage() {} +func (*MsgMarketOrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2966ca2342567dca, []int{11} +} +func (m *MsgMarketOrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgMarketOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgMarketOrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgMarketOrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgMarketOrderResponse.Merge(m, src) +} +func (m *MsgMarketOrderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgMarketOrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgMarketOrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgMarketOrderResponse proto.InternalMessageInfo + +func (m *MsgMarketOrderResponse) GetAmountBid() string { + if m != nil { + return m.AmountBid + } + return "" +} + +func (m *MsgMarketOrderResponse) GetAmountAsk() string { + if m != nil { + return m.AmountAsk + } + return "" +} + +func (m *MsgMarketOrderResponse) GetSlippage() string { + if m != nil { + return m.Slippage + } + return "" +} + +func init() { + proto.RegisterType((*MsgCreatePool)(nil), "pendulumlabs.market.market.MsgCreatePool") + proto.RegisterType((*MsgCreatePoolResponse)(nil), "pendulumlabs.market.market.MsgCreatePoolResponse") + proto.RegisterType((*MsgCreateDrop)(nil), "pendulumlabs.market.market.MsgCreateDrop") + proto.RegisterType((*MsgCreateDropResponse)(nil), "pendulumlabs.market.market.MsgCreateDropResponse") + proto.RegisterType((*MsgRedeemDrop)(nil), "pendulumlabs.market.market.MsgRedeemDrop") + proto.RegisterType((*MsgRedeemDropResponse)(nil), "pendulumlabs.market.market.MsgRedeemDropResponse") + proto.RegisterType((*MsgCreateOrder)(nil), "pendulumlabs.market.market.MsgCreateOrder") + proto.RegisterType((*MsgCreateOrderResponse)(nil), "pendulumlabs.market.market.MsgCreateOrderResponse") + proto.RegisterType((*MsgCancelOrder)(nil), "pendulumlabs.market.market.MsgCancelOrder") + proto.RegisterType((*MsgCancelOrderResponse)(nil), "pendulumlabs.market.market.MsgCancelOrderResponse") + proto.RegisterType((*MsgMarketOrder)(nil), "pendulumlabs.market.market.MsgMarketOrder") + proto.RegisterType((*MsgMarketOrderResponse)(nil), "pendulumlabs.market.market.MsgMarketOrderResponse") +} + func init() { proto.RegisterFile("market/tx.proto", fileDescriptor_2966ca2342567dca) } var fileDescriptor_2966ca2342567dca = []byte{ - // 99 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcf, 0x4d, 0x2c, 0xca, - 0x4e, 0x2d, 0xd1, 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x85, 0x08, 0xe8, - 0x41, 0x28, 0x23, 0x56, 0x2e, 0x66, 0xdf, 0xe2, 0x74, 0x27, 0xfd, 0x13, 0x8f, 0xe4, 0x18, 0x2f, - 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, - 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x12, 0x85, 0x1a, 0x50, 0xa1, 0x0f, 0x33, 0xa9, 0xb2, 0x20, 0xb5, - 0x38, 0x89, 0x0d, 0x6c, 0x9a, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x57, 0xf0, 0x09, 0x2d, 0x60, - 0x00, 0x00, 0x00, + // 543 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x6e, 0xd3, 0x4a, + 0x14, 0xc6, 0xe3, 0x3a, 0x49, 0x93, 0xb9, 0xba, 0x80, 0x2c, 0x5a, 0x46, 0x56, 0x65, 0x55, 0x5e, + 0x41, 0x17, 0x89, 0x28, 0x4b, 0xd8, 0x34, 0xb0, 0x8d, 0x40, 0x01, 0x36, 0xec, 0xdc, 0xf8, 0x28, + 0x32, 0xb5, 0x3d, 0xa3, 0xb1, 0x53, 0xa5, 0x6f, 0xc1, 0x8a, 0x87, 0xe1, 0x09, 0x58, 0x76, 0xc1, + 0x82, 0x25, 0x4a, 0x5e, 0x04, 0x9d, 0x19, 0x7b, 0x3c, 0x43, 0x4a, 0x6d, 0x89, 0x55, 0xe6, 0xfc, + 0x99, 0xf3, 0x9b, 0xf3, 0x29, 0x9f, 0x4c, 0x1e, 0x66, 0x91, 0xb8, 0x82, 0x72, 0x5a, 0x6e, 0x26, + 0x5c, 0xb0, 0x92, 0x79, 0x3e, 0x87, 0x3c, 0x5e, 0xa7, 0xeb, 0x2c, 0x8d, 0x2e, 0x8b, 0x89, 0xaa, + 0x56, 0x3f, 0xe1, 0x47, 0xf2, 0xff, 0xbc, 0x58, 0xbd, 0x16, 0x10, 0x95, 0xf0, 0x8e, 0xb1, 0xd4, + 0xa3, 0xe4, 0x70, 0x89, 0x11, 0x13, 0xd4, 0x39, 0x75, 0x9e, 0x8e, 0x17, 0x75, 0xe8, 0x3d, 0x26, + 0x83, 0x25, 0x4b, 0xf2, 0x0b, 0x7a, 0x20, 0xf3, 0x2a, 0xa8, 0xb3, 0x33, 0xea, 0x36, 0xd9, 0x59, + 0xf8, 0x84, 0x1c, 0x59, 0x63, 0x17, 0x50, 0x70, 0x96, 0x17, 0x10, 0xbe, 0x37, 0x78, 0x6f, 0x04, + 0xe3, 0xf7, 0xf0, 0x3c, 0xd2, 0xe7, 0x51, 0x22, 0x2a, 0x9c, 0x3c, 0x23, 0x2d, 0x16, 0x8c, 0x17, + 0x35, 0x4d, 0x06, 0x16, 0x0d, 0x87, 0x6a, 0xda, 0x4b, 0x49, 0x5b, 0x40, 0x0c, 0x90, 0xb5, 0xd0, + 0x1e, 0x11, 0x77, 0x9d, 0xc4, 0x15, 0x0c, 0x8f, 0xd5, 0xd4, 0xe6, 0xb2, 0x9e, 0xfa, 0xc3, 0x21, + 0x0f, 0x34, 0xef, 0xad, 0x88, 0x41, 0xdc, 0x33, 0xd7, 0x27, 0xa3, 0x18, 0x72, 0x96, 0x5d, 0x14, + 0x57, 0xd5, 0x70, 0x1d, 0xeb, 0xda, 0x2c, 0x89, 0xab, 0x85, 0x74, 0xec, 0x9d, 0x90, 0x31, 0xc3, + 0xd1, 0x1f, 0x6e, 0x38, 0xd0, 0xbe, 0x2c, 0x36, 0x09, 0xef, 0x98, 0x0c, 0xa3, 0x8c, 0xad, 0xf3, + 0x92, 0x0e, 0x64, 0xa9, 0x8a, 0x50, 0x33, 0x11, 0x95, 0x40, 0x87, 0xa7, 0x2e, 0x6a, 0x86, 0x67, + 0xa9, 0xa3, 0x80, 0x6b, 0x7a, 0x58, 0xe9, 0x28, 0xe0, 0x1a, 0x73, 0x39, 0x6c, 0x4a, 0x3a, 0x52, + 0x39, 0x3c, 0x87, 0x67, 0xe4, 0xd8, 0xde, 0xaa, 0x5e, 0xb8, 0xd6, 0x06, 0x37, 0xeb, 0x2b, 0x6d, + 0x5e, 0x29, 0x05, 0xa2, 0x7c, 0x09, 0x69, 0x9b, 0x02, 0xfb, 0xca, 0x52, 0x45, 0x6a, 0x6e, 0x6b, + 0x69, 0xbf, 0x29, 0x69, 0xe7, 0xf2, 0xcf, 0xf9, 0x2f, 0xd2, 0x9e, 0x90, 0xb1, 0x92, 0x04, 0x8b, + 0x4a, 0xdb, 0x26, 0x61, 0x09, 0xdf, 0xdf, 0x17, 0x5e, 0x35, 0x62, 0x71, 0x60, 0xde, 0xc4, 0xaa, + 0x4f, 0x46, 0x45, 0x9a, 0x70, 0x1e, 0xad, 0x50, 0x64, 0x79, 0xb3, 0x8e, 0x43, 0x2e, 0xd7, 0x32, + 0xde, 0xae, 0x05, 0xb4, 0x66, 0x3a, 0x7f, 0xce, 0xb4, 0xde, 0x7a, 0x70, 0xc7, 0x5b, 0x35, 0xd1, + 0xb5, 0x89, 0xe7, 0x5f, 0x07, 0xc4, 0x9d, 0x17, 0x2b, 0xef, 0x33, 0x21, 0x86, 0x85, 0x9f, 0x4d, + 0xfe, 0x6e, 0xf8, 0x89, 0x65, 0x4b, 0xff, 0x79, 0xe7, 0x56, 0xbd, 0x8b, 0x66, 0x49, 0x43, 0x75, + 0x63, 0x61, 0x6b, 0x47, 0x96, 0xe9, 0x34, 0x64, 0x19, 0xe6, 0x6d, 0x63, 0x35, 0xad, 0xad, 0xac, + 0x7d, 0x57, 0x7b, 0x19, 0xf9, 0xcf, 0x74, 0xf4, 0x59, 0xa7, 0xd7, 0xca, 0x5e, 0xff, 0xbc, 0x7b, + 0xaf, 0x85, 0x33, 0xec, 0xd3, 0x8a, 0x6b, 0x7a, 0xdb, 0x71, 0xfb, 0xc6, 0x42, 0x9c, 0x69, 0xaa, + 0x36, 0x9c, 0xd1, 0xdb, 0x8a, 0xbb, 0xe3, 0x0f, 0x3f, 0x9b, 0x7e, 0xdf, 0x06, 0xce, 0xed, 0x36, + 0x70, 0x7e, 0x6d, 0x03, 0xe7, 0xcb, 0x2e, 0xe8, 0xdd, 0xee, 0x82, 0xde, 0xcf, 0x5d, 0xd0, 0xfb, + 0x74, 0x54, 0x7d, 0x9d, 0x36, 0xd3, 0xfa, 0x33, 0x75, 0xc3, 0xa1, 0xb8, 0x1c, 0xca, 0x4f, 0xd5, + 0x8b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa2, 0xcf, 0x22, 0x67, 0xbd, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -48,6 +751,12 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MsgClient interface { + CreatePool(ctx context.Context, in *MsgCreatePool, opts ...grpc.CallOption) (*MsgCreatePoolResponse, error) + CreateDrop(ctx context.Context, in *MsgCreateDrop, opts ...grpc.CallOption) (*MsgCreateDropResponse, error) + RedeemDrop(ctx context.Context, in *MsgRedeemDrop, opts ...grpc.CallOption) (*MsgRedeemDropResponse, error) + CreateOrder(ctx context.Context, in *MsgCreateOrder, opts ...grpc.CallOption) (*MsgCreateOrderResponse, error) + CancelOrder(ctx context.Context, in *MsgCancelOrder, opts ...grpc.CallOption) (*MsgCancelOrderResponse, error) + MarketOrder(ctx context.Context, in *MsgMarketOrder, opts ...grpc.CallOption) (*MsgMarketOrderResponse, error) } type msgClient struct { @@ -58,22 +767,2512 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient { return &msgClient{cc} } +func (c *msgClient) CreatePool(ctx context.Context, in *MsgCreatePool, opts ...grpc.CallOption) (*MsgCreatePoolResponse, error) { + out := new(MsgCreatePoolResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Msg/CreatePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CreateDrop(ctx context.Context, in *MsgCreateDrop, opts ...grpc.CallOption) (*MsgCreateDropResponse, error) { + out := new(MsgCreateDropResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Msg/CreateDrop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RedeemDrop(ctx context.Context, in *MsgRedeemDrop, opts ...grpc.CallOption) (*MsgRedeemDropResponse, error) { + out := new(MsgRedeemDropResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Msg/RedeemDrop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CreateOrder(ctx context.Context, in *MsgCreateOrder, opts ...grpc.CallOption) (*MsgCreateOrderResponse, error) { + out := new(MsgCreateOrderResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Msg/CreateOrder", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CancelOrder(ctx context.Context, in *MsgCancelOrder, opts ...grpc.CallOption) (*MsgCancelOrderResponse, error) { + out := new(MsgCancelOrderResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Msg/CancelOrder", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) MarketOrder(ctx context.Context, in *MsgMarketOrder, opts ...grpc.CallOption) (*MsgMarketOrderResponse, error) { + out := new(MsgMarketOrderResponse) + err := c.cc.Invoke(ctx, "/pendulumlabs.market.market.Msg/MarketOrder", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MsgServer is the server API for Msg service. type MsgServer interface { + CreatePool(context.Context, *MsgCreatePool) (*MsgCreatePoolResponse, error) + CreateDrop(context.Context, *MsgCreateDrop) (*MsgCreateDropResponse, error) + RedeemDrop(context.Context, *MsgRedeemDrop) (*MsgRedeemDropResponse, error) + CreateOrder(context.Context, *MsgCreateOrder) (*MsgCreateOrderResponse, error) + CancelOrder(context.Context, *MsgCancelOrder) (*MsgCancelOrderResponse, error) + MarketOrder(context.Context, *MsgMarketOrder) (*MsgMarketOrderResponse, error) } // UnimplementedMsgServer can be embedded to have forward compatible implementations. type UnimplementedMsgServer struct { } +func (*UnimplementedMsgServer) CreatePool(ctx context.Context, req *MsgCreatePool) (*MsgCreatePoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreatePool not implemented") +} +func (*UnimplementedMsgServer) CreateDrop(ctx context.Context, req *MsgCreateDrop) (*MsgCreateDropResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateDrop not implemented") +} +func (*UnimplementedMsgServer) RedeemDrop(ctx context.Context, req *MsgRedeemDrop) (*MsgRedeemDropResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RedeemDrop not implemented") +} +func (*UnimplementedMsgServer) CreateOrder(ctx context.Context, req *MsgCreateOrder) (*MsgCreateOrderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateOrder not implemented") +} +func (*UnimplementedMsgServer) CancelOrder(ctx context.Context, req *MsgCancelOrder) (*MsgCancelOrderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelOrder not implemented") +} +func (*UnimplementedMsgServer) MarketOrder(ctx context.Context, req *MsgMarketOrder) (*MsgMarketOrderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MarketOrder not implemented") +} + func RegisterMsgServer(s grpc1.Server, srv MsgServer) { s.RegisterService(&_Msg_serviceDesc, srv) } +func _Msg_CreatePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreatePool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreatePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Msg/CreatePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreatePool(ctx, req.(*MsgCreatePool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CreateDrop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateDrop) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateDrop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Msg/CreateDrop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateDrop(ctx, req.(*MsgCreateDrop)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RedeemDrop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRedeemDrop) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RedeemDrop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Msg/RedeemDrop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RedeemDrop(ctx, req.(*MsgRedeemDrop)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CreateOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateOrder) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateOrder(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Msg/CreateOrder", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateOrder(ctx, req.(*MsgCreateOrder)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CancelOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCancelOrder) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CancelOrder(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Msg/CancelOrder", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CancelOrder(ctx, req.(*MsgCancelOrder)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_MarketOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgMarketOrder) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).MarketOrder(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pendulumlabs.market.market.Msg/MarketOrder", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).MarketOrder(ctx, req.(*MsgMarketOrder)) + } + return interceptor(ctx, in, info, handler) +} + var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "market.market.Msg", + ServiceName: "pendulumlabs.market.market.Msg", HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{}, - Metadata: "market/tx.proto", + Methods: []grpc.MethodDesc{ + { + MethodName: "CreatePool", + Handler: _Msg_CreatePool_Handler, + }, + { + MethodName: "CreateDrop", + Handler: _Msg_CreateDrop_Handler, + }, + { + MethodName: "RedeemDrop", + Handler: _Msg_RedeemDrop_Handler, + }, + { + MethodName: "CreateOrder", + Handler: _Msg_CreateOrder_Handler, + }, + { + MethodName: "CancelOrder", + Handler: _Msg_CancelOrder_Handler, + }, + { + MethodName: "MarketOrder", + Handler: _Msg_MarketOrder_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "market/tx.proto", +} + +func (m *MsgCreatePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreatePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreatePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CoinB) > 0 { + i -= len(m.CoinB) + copy(dAtA[i:], m.CoinB) + i = encodeVarintTx(dAtA, i, uint64(len(m.CoinB))) + i-- + dAtA[i] = 0x1a + } + if len(m.CoinA) > 0 { + i -= len(m.CoinA) + copy(dAtA[i:], m.CoinA) + i = encodeVarintTx(dAtA, i, uint64(len(m.CoinA))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreatePoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreatePoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreatePoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCreateDrop) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateDrop) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateDrop) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drops) > 0 { + i -= len(m.Drops) + copy(dAtA[i:], m.Drops) + i = encodeVarintTx(dAtA, i, uint64(len(m.Drops))) + i-- + dAtA[i] = 0x1a + } + if len(m.Pair) > 0 { + i -= len(m.Pair) + copy(dAtA[i:], m.Pair) + i = encodeVarintTx(dAtA, i, uint64(len(m.Pair))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateDropResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateDropResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateDropResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRedeemDrop) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRedeemDrop) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRedeemDrop) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Uid) > 0 { + i -= len(m.Uid) + copy(dAtA[i:], m.Uid) + i = encodeVarintTx(dAtA, i, uint64(len(m.Uid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRedeemDropResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRedeemDropResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRedeemDropResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCreateOrder) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateOrder) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Next) > 0 { + i -= len(m.Next) + copy(dAtA[i:], m.Next) + i = encodeVarintTx(dAtA, i, uint64(len(m.Next))) + i-- + dAtA[i] = 0x42 + } + if len(m.Prev) > 0 { + i -= len(m.Prev) + copy(dAtA[i:], m.Prev) + i = encodeVarintTx(dAtA, i, uint64(len(m.Prev))) + i-- + dAtA[i] = 0x3a + } + if len(m.Rate) > 0 { + for iNdEx := len(m.Rate) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Rate[iNdEx]) + copy(dAtA[i:], m.Rate[iNdEx]) + i = encodeVarintTx(dAtA, i, uint64(len(m.Rate[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Amount) > 0 { + i -= len(m.Amount) + copy(dAtA[i:], m.Amount) + i = encodeVarintTx(dAtA, i, uint64(len(m.Amount))) + i-- + dAtA[i] = 0x2a + } + if len(m.OrderType) > 0 { + i -= len(m.OrderType) + copy(dAtA[i:], m.OrderType) + i = encodeVarintTx(dAtA, i, uint64(len(m.OrderType))) + i-- + dAtA[i] = 0x22 + } + if len(m.DenomBid) > 0 { + i -= len(m.DenomBid) + copy(dAtA[i:], m.DenomBid) + i = encodeVarintTx(dAtA, i, uint64(len(m.DenomBid))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomAsk) > 0 { + i -= len(m.DenomAsk) + copy(dAtA[i:], m.DenomAsk) + i = encodeVarintTx(dAtA, i, uint64(len(m.DenomAsk))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateOrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateOrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Uid != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCancelOrder) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCancelOrder) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCancelOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Uid) > 0 { + i -= len(m.Uid) + copy(dAtA[i:], m.Uid) + i = encodeVarintTx(dAtA, i, uint64(len(m.Uid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCancelOrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCancelOrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCancelOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgMarketOrder) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgMarketOrder) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgMarketOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Slippage) > 0 { + i -= len(m.Slippage) + copy(dAtA[i:], m.Slippage) + i = encodeVarintTx(dAtA, i, uint64(len(m.Slippage))) + i-- + dAtA[i] = 0x32 + } + if len(m.AmountBid) > 0 { + i -= len(m.AmountBid) + copy(dAtA[i:], m.AmountBid) + i = encodeVarintTx(dAtA, i, uint64(len(m.AmountBid))) + i-- + dAtA[i] = 0x2a + } + if len(m.DenomBid) > 0 { + i -= len(m.DenomBid) + copy(dAtA[i:], m.DenomBid) + i = encodeVarintTx(dAtA, i, uint64(len(m.DenomBid))) + i-- + dAtA[i] = 0x22 + } + if len(m.AmountAsk) > 0 { + i -= len(m.AmountAsk) + copy(dAtA[i:], m.AmountAsk) + i = encodeVarintTx(dAtA, i, uint64(len(m.AmountAsk))) + i-- + dAtA[i] = 0x1a + } + if len(m.DenomAsk) > 0 { + i -= len(m.DenomAsk) + copy(dAtA[i:], m.DenomAsk) + i = encodeVarintTx(dAtA, i, uint64(len(m.DenomAsk))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgMarketOrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgMarketOrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgMarketOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Slippage) > 0 { + i -= len(m.Slippage) + copy(dAtA[i:], m.Slippage) + i = encodeVarintTx(dAtA, i, uint64(len(m.Slippage))) + i-- + dAtA[i] = 0x1a + } + if len(m.AmountAsk) > 0 { + i -= len(m.AmountAsk) + copy(dAtA[i:], m.AmountAsk) + i = encodeVarintTx(dAtA, i, uint64(len(m.AmountAsk))) + i-- + dAtA[i] = 0x12 + } + if len(m.AmountBid) > 0 { + i -= len(m.AmountBid) + copy(dAtA[i:], m.AmountBid) + i = encodeVarintTx(dAtA, i, uint64(len(m.AmountBid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreatePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.CoinA) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.CoinB) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgCreatePoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCreateDrop) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Pair) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Drops) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgCreateDropResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRedeemDrop) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Uid) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRedeemDropResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n } + +func (m *MsgCreateOrder) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.DenomAsk) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.DenomBid) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.OrderType) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Amount) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if len(m.Rate) > 0 { + for _, s := range m.Rate { + l = len(s) + n += 1 + l + sovTx(uint64(l)) + } + } + l = len(m.Prev) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Next) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgCreateOrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovTx(uint64(m.Uid)) + } + return n +} + +func (m *MsgCancelOrder) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Uid) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgCancelOrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgMarketOrder) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.DenomAsk) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.AmountAsk) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.DenomBid) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.AmountBid) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Slippage) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgMarketOrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AmountBid) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.AmountAsk) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Slippage) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreatePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreatePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreatePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoinA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoinA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoinB", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoinB = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreatePoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreatePoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreatePoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateDrop) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateDrop: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateDrop: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pair", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pair = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drops = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateDropResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateDropResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateDropResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRedeemDrop) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRedeemDrop: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRedeemDrop: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRedeemDropResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRedeemDropResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRedeemDropResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateOrder) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateOrder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateOrder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrderType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Amount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rate = append(m.Rate, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prev", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prev = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Next", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Next = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateOrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateOrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCancelOrder) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCancelOrder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCancelOrder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCancelOrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCancelOrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCancelOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgMarketOrder) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgMarketOrder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgMarketOrder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AmountAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AmountAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AmountBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AmountBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slippage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Slippage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgMarketOrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgMarketOrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgMarketOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AmountBid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AmountBid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AmountAsk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AmountAsk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slippage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Slippage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +)