From 4419e9236495be3c6284026a5d89f15215cb6c14 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Sat, 17 Sep 2022 09:13:00 +0800 Subject: [PATCH 01/16] Finish implementation of nft-transfer module --- .gitmodules | 4 + app/app.go | 29 + app/docs/swagger-ui/swagger.yaml | 20 + buf.work.yaml | 1 + go.mod | 2 +- proto/nft/v1/nft.proto | 1 + proto/nft/v1/tx.proto | 1 + proto/nft_transfer/v1/genesis.proto | 13 + proto/nft_transfer/v1/packet.proto | 22 + proto/nft_transfer/v1/query.proto | 90 + proto/nft_transfer/v1/trace.proto | 14 + proto/nft_transfer/v1/tx.proto | 44 + third_party/ibc-go | 1 + x/nft-transfer/client/cli/cli.go | 43 + x/nft-transfer/client/cli/query.go | 140 ++ x/nft-transfer/client/cli/tx.go | 121 ++ x/nft-transfer/ibc_module.go | 291 ++++ x/nft-transfer/keeper/genesis.go | 36 + x/nft-transfer/keeper/grpc_query.go | 119 ++ x/nft-transfer/keeper/keeper.go | 99 ++ x/nft-transfer/keeper/msg_server.go | 48 + x/nft-transfer/keeper/packet.go | 198 +++ x/nft-transfer/keeper/relay.go | 154 ++ x/nft-transfer/keeper/trace.go | 102 ++ x/nft-transfer/module.go | 173 ++ x/nft-transfer/simulation/decoder.go | 34 + x/nft-transfer/simulation/genesis.go | 42 + x/nft-transfer/simulation/genesis_test.go | 73 + x/nft-transfer/types/ack.go | 27 + x/nft-transfer/types/codec.go | 36 + x/nft-transfer/types/errors.go | 14 + x/nft-transfer/types/events.go | 17 + x/nft-transfer/types/expected_keepers.go | 51 + x/nft-transfer/types/genesis.go | 30 + x/nft-transfer/types/genesis.pb.go | 384 +++++ x/nft-transfer/types/genesis_test.go | 42 + x/nft-transfer/types/keys.go | 56 + x/nft-transfer/types/msgs.go | 96 ++ x/nft-transfer/types/msgs_test.go | 38 + x/nft-transfer/types/packet.go | 82 + x/nft-transfer/types/packet.pb.go | 594 +++++++ x/nft-transfer/types/packet_test.go | 53 + x/nft-transfer/types/query.pb.go | 1903 +++++++++++++++++++++ x/nft-transfer/types/query.pb.gw.go | 496 ++++++ x/nft-transfer/types/trace.go | 180 ++ x/nft-transfer/types/trace.pb.go | 373 ++++ x/nft-transfer/types/trace_test.go | 78 + x/nft-transfer/types/tx.pb.go | 847 +++++++++ x/nft/client/cli/flags.go | 2 + x/nft/client/cli/tx.go | 6 + x/nft/keeper/keeper.go | 23 +- x/nft/keeper/keeper_test.go | 4 +- x/nft/keeper/msg_server.go | 2 +- x/nft/types/denom.go | 3 +- x/nft/types/msgs.go | 3 +- x/nft/types/nft.pb.go | 109 +- x/nft/types/tx.pb.go | 116 +- 57 files changed, 7507 insertions(+), 73 deletions(-) create mode 100644 proto/nft_transfer/v1/genesis.proto create mode 100644 proto/nft_transfer/v1/packet.proto create mode 100644 proto/nft_transfer/v1/query.proto create mode 100644 proto/nft_transfer/v1/trace.proto create mode 100644 proto/nft_transfer/v1/tx.proto create mode 160000 third_party/ibc-go create mode 100644 x/nft-transfer/client/cli/cli.go create mode 100644 x/nft-transfer/client/cli/query.go create mode 100644 x/nft-transfer/client/cli/tx.go create mode 100644 x/nft-transfer/ibc_module.go create mode 100644 x/nft-transfer/keeper/genesis.go create mode 100644 x/nft-transfer/keeper/grpc_query.go create mode 100644 x/nft-transfer/keeper/keeper.go create mode 100644 x/nft-transfer/keeper/msg_server.go create mode 100644 x/nft-transfer/keeper/packet.go create mode 100644 x/nft-transfer/keeper/relay.go create mode 100644 x/nft-transfer/keeper/trace.go create mode 100644 x/nft-transfer/module.go create mode 100644 x/nft-transfer/simulation/decoder.go create mode 100644 x/nft-transfer/simulation/genesis.go create mode 100644 x/nft-transfer/simulation/genesis_test.go create mode 100644 x/nft-transfer/types/ack.go create mode 100644 x/nft-transfer/types/codec.go create mode 100644 x/nft-transfer/types/errors.go create mode 100644 x/nft-transfer/types/events.go create mode 100644 x/nft-transfer/types/expected_keepers.go create mode 100644 x/nft-transfer/types/genesis.go create mode 100644 x/nft-transfer/types/genesis.pb.go create mode 100644 x/nft-transfer/types/genesis_test.go create mode 100644 x/nft-transfer/types/keys.go create mode 100644 x/nft-transfer/types/msgs.go create mode 100644 x/nft-transfer/types/msgs_test.go create mode 100644 x/nft-transfer/types/packet.go create mode 100644 x/nft-transfer/types/packet.pb.go create mode 100644 x/nft-transfer/types/packet_test.go create mode 100644 x/nft-transfer/types/query.pb.go create mode 100644 x/nft-transfer/types/query.pb.gw.go create mode 100644 x/nft-transfer/types/trace.go create mode 100644 x/nft-transfer/types/trace.pb.go create mode 100644 x/nft-transfer/types/trace_test.go create mode 100644 x/nft-transfer/types/tx.pb.go diff --git a/.gitmodules b/.gitmodules index 15c01dad4..864e6d0e4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,7 @@ [submodule "cosmos-sdk"] path = third_party/cosmos-sdk url = https://github.com/cosmos/cosmos-sdk.git + +[submodule "ibc-go"] + path = third_party/ibc-go + url = https://github.com/cosmos/ibc-go.git diff --git a/app/app.go b/app/app.go index fca4163bc..c42483669 100644 --- a/app/app.go +++ b/app/app.go @@ -126,6 +126,9 @@ import ( icaauthmodulekeeper "github.com/crypto-org-chain/chain-main/v4/x/icaauth/keeper" icaauthmoduletypes "github.com/crypto-org-chain/chain-main/v4/x/icaauth/types" "github.com/crypto-org-chain/chain-main/v4/x/nft" + nfttransfer "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer" + nfttransferkeeper "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/keeper" + nfttransfertypes "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" nftkeeper "github.com/crypto-org-chain/chain-main/v4/x/nft/keeper" nfttypes "github.com/crypto-org-chain/chain-main/v4/x/nft/types" supply "github.com/crypto-org-chain/chain-main/v4/x/supply" @@ -168,6 +171,7 @@ var ( upgrade.AppModuleBasic{}, evidence.AppModuleBasic{}, transfer.AppModuleBasic{}, + nfttransfer.AppModuleBasic{}, authzmodule.AppModuleBasic{}, groupmodule.AppModuleBasic{}, vesting.AppModuleBasic{}, @@ -237,6 +241,7 @@ type ChainApp struct { FeeGrantKeeper feegrantkeeper.Keeper GroupKeeper groupkeeper.Keeper TransferKeeper ibctransferkeeper.Keeper + NFTTransferKeeper nfttransferkeeper.Keeper chainmainKeeper chainmainkeeper.Keeper SupplyKeeper supplykeeper.Keeper NFTKeeper nftkeeper.Keeper @@ -244,6 +249,7 @@ type ChainApp struct { // make scoped keepers public for test purposes ScopedIBCKeeper capabilitykeeper.ScopedKeeper ScopedTransferKeeper capabilitykeeper.ScopedKeeper + ScopedNFTTransferKeeper capabilitykeeper.ScopedKeeper ScopedIBCFeeKeeper capabilitykeeper.ScopedKeeper ScopedICAControllerKeeper capabilitykeeper.ScopedKeeper ScopedICAHostKeeper capabilitykeeper.ScopedKeeper @@ -310,6 +316,7 @@ func New( icahosttypes.StoreKey, capabilitytypes.StoreKey, authzkeeper.StoreKey, + nfttransfertypes.StoreKey, group.StoreKey, ibcfeetypes.StoreKey, icaauthmoduletypes.StoreKey, @@ -346,6 +353,7 @@ func New( app.CapabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey]) scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) + scopedNFTTransferKeeper := app.CapabilityKeeper.ScopeToModule(nfttransfertypes.ModuleName) scopedICAControllerKeeper := app.CapabilityKeeper.ScopeToModule(icacontrollertypes.SubModuleName) scopedICAHostKeeper := app.CapabilityKeeper.ScopeToModule(icahosttypes.SubModuleName) scopedICAAuthKeeper := app.CapabilityKeeper.ScopeToModule(icaauthmoduletypes.ModuleName) @@ -444,6 +452,21 @@ func New( transferStack = transfer.NewIBCModule(app.TransferKeeper) transferStack = ibcfee.NewIBCMiddleware(transferStack, app.IBCFeeKeeper) + app.NFTTransferKeeper = nfttransferkeeper.NewKeeper( + appCodec, + keys[nfttransfertypes.StoreKey], + app.IBCFeeKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.NFTKeeper, + app.AccountKeeper, + scopedNFTTransferKeeper, + ) + + var nftTransferStack porttypes.IBCModule + nftTransferStack = nfttransfer.NewIBCModule(app.NFTTransferKeeper) + nftTransferStack = ibcfee.NewIBCMiddleware(nftTransferStack, app.IBCFeeKeeper) + app.ICAControllerKeeper = icacontrollerkeeper.NewKeeper( appCodec, keys[icacontrollertypes.StoreKey], app.GetSubspace(icacontrollertypes.SubModuleName), app.IBCFeeKeeper, // ISC4 Wrapper: fee IBC middleware @@ -477,6 +500,7 @@ func New( ibcRouter.AddRoute(icacontrollertypes.SubModuleName, icaControllerStack) ibcRouter.AddRoute(icahosttypes.SubModuleName, icaHostStack) ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) + ibcRouter.AddRoute(nfttransfertypes.ModuleName, nftTransferStack) ibcRouter.AddRoute(icaauthmoduletypes.ModuleName, icaControllerStack) app.IBCKeeper.SetRouter(ibcRouter) @@ -511,6 +535,7 @@ func New( authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), transferModule, + nfttransfer.NewAppModule(app.NFTTransferKeeper), feeModule, icaModule, icaAuthModule, @@ -547,6 +572,7 @@ func New( ibcfeetypes.ModuleName, chainmaintypes.ModuleName, nfttypes.ModuleName, + nfttransfertypes.ModuleName, supplytypes.ModuleName, ) app.mm.SetOrderEndBlockers( @@ -573,6 +599,7 @@ func New( ibcfeetypes.ModuleName, chainmaintypes.ModuleName, nfttypes.ModuleName, + nfttransfertypes.ModuleName, supplytypes.ModuleName, ) @@ -603,6 +630,7 @@ func New( chainmaintypes.ModuleName, supplytypes.ModuleName, nfttypes.ModuleName, + nfttransfertypes.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, vestingtypes.ModuleName, @@ -751,6 +779,7 @@ func New( icahosttypes.StoreKey, icaauthmoduletypes.StoreKey, ibcfeetypes.StoreKey, + nfttransfertypes.StoreKey, }, } diff --git a/app/docs/swagger-ui/swagger.yaml b/app/docs/swagger-ui/swagger.yaml index 1711ce549..a8c89c63f 100644 --- a/app/docs/swagger-ui/swagger.yaml +++ b/app/docs/swagger-ui/swagger.yaml @@ -18153,6 +18153,8 @@ paths: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT nfts: type: array @@ -18359,6 +18361,8 @@ paths: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT pagination: type: object @@ -18493,6 +18497,8 @@ paths: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT title: >- QueryDenomByNameResponse is the response type for the @@ -18547,6 +18553,8 @@ paths: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT title: >- QueryDenomResponse is the response type for the Query/Denom RPC @@ -34390,6 +34398,8 @@ definitions: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT nfts: type: array @@ -34419,6 +34429,8 @@ definitions: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT chainmain.nft.v1.IDCollection: type: object @@ -34465,6 +34477,8 @@ definitions: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT nfts: type: array @@ -34526,6 +34540,8 @@ definitions: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT title: >- QueryDenomByNameResponse is the response type for the Query/DenomByName @@ -34544,6 +34560,8 @@ definitions: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT title: QueryDenomResponse is the response type for the Query/Denom RPC method chainmain.nft.v1.QueryDenomsResponse: @@ -34562,6 +34580,8 @@ definitions: type: string creator: type: string + uri: + type: string title: Denom defines a type of NFT pagination: type: object diff --git a/buf.work.yaml b/buf.work.yaml index 1878b341b..05e6da05e 100644 --- a/buf.work.yaml +++ b/buf.work.yaml @@ -1,3 +1,4 @@ version: v1 directories: - proto + - third_party/ibc-go/proto diff --git a/go.mod b/go.mod index c393f6aed..34c360122 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.18 require ( cosmossdk.io/math v1.0.0-beta.3 + github.com/armon/go-metrics v0.4.0 github.com/confluentinc/bincover v0.1.0 github.com/cosmos/cosmos-proto v1.0.0-alpha7 github.com/cosmos/cosmos-sdk v0.46.1 @@ -39,7 +40,6 @@ require ( github.com/99designs/keyring v1.2.1 // indirect github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect github.com/Workiva/go-datastructures v1.0.53 // indirect - github.com/armon/go-metrics v0.4.0 // indirect github.com/aws/aws-sdk-go v1.40.45 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect diff --git a/proto/nft/v1/nft.proto b/proto/nft/v1/nft.proto index ed8366e18..07eefe49e 100644 --- a/proto/nft/v1/nft.proto +++ b/proto/nft/v1/nft.proto @@ -27,6 +27,7 @@ message Denom { string name = 2; string schema = 3; string creator = 4; + string uri = 5; } // IDCollection defines a type of collection with specified ID diff --git a/proto/nft/v1/tx.proto b/proto/nft/v1/tx.proto index 49b774a46..bea8afde6 100644 --- a/proto/nft/v1/tx.proto +++ b/proto/nft/v1/tx.proto @@ -34,6 +34,7 @@ message MsgIssueDenom { string name = 2; string schema = 3; string sender = 4; + string uri = 5; } // MsgIssueDenomResponse defines the Msg/IssueDenom response type. diff --git a/proto/nft_transfer/v1/genesis.proto b/proto/nft_transfer/v1/genesis.proto new file mode 100644 index 000000000..f37abd16b --- /dev/null +++ b/proto/nft_transfer/v1/genesis.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package chainmain.nft_transfer.v1; + +option go_package = "github.com/crypto-org-chain/chain-main/x/nft-transfer/types"; + +import "nft_transfer/v1/trace.proto"; +import "gogoproto/gogo.proto"; + +// GenesisState defines the ibc-nft-transfer genesis state +message GenesisState { + string port_id = 1; + repeated ClassTrace traces = 2 [(gogoproto.castrepeated) = "Traces", (gogoproto.nullable) = false]; +} diff --git a/proto/nft_transfer/v1/packet.proto b/proto/nft_transfer/v1/packet.proto new file mode 100644 index 000000000..b880b03ae --- /dev/null +++ b/proto/nft_transfer/v1/packet.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package chainmain.nft_transfer.v1; + +option go_package = "github.com/crypto-org-chain/chain-main/x/nft-transfer/types"; + +// NonFungibleTokenPacketData defines a struct for the packet payload +// See NonFungibleTokenPacketData spec: +// https://github.com/cosmos/ibc/tree/master/spec/app/ics-721-nft-transfer#data-structures +message NonFungibleTokenPacketData { + // the class_id of tokens to be transferred + string class_id = 1; + // the class_uri of tokens to be transferred + string class_uri = 2; + // the non fungible tokens to be transferred + repeated string token_ids = 3; + // the non fungible tokens's uri to be transferred + repeated string token_uris = 4; + // the sender address + string sender = 5; + // the recipient address on the destination chain + string receiver = 6; +} diff --git a/proto/nft_transfer/v1/query.proto b/proto/nft_transfer/v1/query.proto new file mode 100644 index 000000000..fedb535a7 --- /dev/null +++ b/proto/nft_transfer/v1/query.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; +package chainmain.nft_transfer.v1; + +option go_package = "github.com/crypto-org-chain/chain-main/x/nft-transfer/types"; + +import "gogoproto/gogo.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "nft_transfer/v1/trace.proto"; +import "google/api/annotations.proto"; + +// Query provides defines the gRPC querier service. +service Query { + // ClassTrace queries a class trace information. + rpc ClassTrace(QueryClassTraceRequest) returns (QueryClassTraceResponse) { + option (google.api.http).get = "/ibc/apps/nft_transfer/v1/class_traces/{hash}"; + } + + // ClassTraces queries all class traces. + rpc ClassTraces(QueryClassTracesRequest) returns (QueryClassTracesResponse) { + option (google.api.http).get = "/ibc/apps/nft_transfer/v1/class_traces"; + } + + // ClassHash queries a class hash information. + rpc ClassHash(QueryClassHashRequest) returns (QueryClassHashResponse) { + option (google.api.http).get = "/ibc/apps/nft_transfer/v1/class_hashes/{trace}"; + } + + // EscrowAddress returns the escrow address for a particular port and channel id. + rpc EscrowAddress(QueryEscrowAddressRequest) returns (QueryEscrowAddressResponse) { + option (google.api.http).get = "/ibc/apps/nft_transfer/v1/channels/{channel_id}/ports/{port_id}/escrow_address"; + } +} + +// QueryClassTraceRequest is the request type for the Query/ClassDenom RPC +// method +message QueryClassTraceRequest { + // hash (in hex format) or classID (full classID with ibc prefix) of the denomination trace information. + string hash = 1; +} + +// QueryClassTraceResponse is the response type for the Query/ClassDenom RPC +// method. +message QueryClassTraceResponse { + // class_trace returns the requested class trace information. + ClassTrace class_trace = 1; +} + +// QueryConnectionsRequest is the request type for the Query/ClassTraces RPC +// method +message QueryClassTracesRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryClassTracesResponse is the response type for the Query/ClassTraces RPC +// method. +message QueryClassTracesResponse { + // class_traces returns all class trace information. + repeated ClassTrace class_traces = 1 [(gogoproto.castrepeated) = "Traces", (gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryClassHashRequest is the request type for the Query/ClassHash RPC +// method +message QueryClassHashRequest { + // The class trace ([port_id]/[channel_id])+/[denom] + string trace = 1; +} + +// QueryClassHashResponse is the response type for the Query/ClassHash RPC +// method. +message QueryClassHashResponse { + // hash (in hex format) of the denomination trace information. + string hash = 1; +} + +// QueryEscrowAddressRequest is the request type for the EscrowAddress RPC method. +message QueryEscrowAddressRequest { + // unique port identifier + string port_id = 1; + // unique channel identifier + string channel_id = 2; +} + +// QueryEscrowAddressResponse is the response type of the EscrowAddress RPC method. +message QueryEscrowAddressResponse { + // the escrow account address + string escrow_address = 1; +} diff --git a/proto/nft_transfer/v1/trace.proto b/proto/nft_transfer/v1/trace.proto new file mode 100644 index 000000000..72b70eecc --- /dev/null +++ b/proto/nft_transfer/v1/trace.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package chainmain.nft_transfer.v1; + +option go_package = "github.com/crypto-org-chain/chain-main/x/nft-transfer/types"; + +// ClassTrace contains the base classID for ICS721 non-fungible tokens and the +// source tracing information path. +message ClassTrace { + // path defines the chain of port/channel identifiers used for tracing the + // source of the non-fungible token. + string path = 1; + // base classID of the relayed non-fungible token. + string base_class_id = 2; +} diff --git a/proto/nft_transfer/v1/tx.proto b/proto/nft_transfer/v1/tx.proto new file mode 100644 index 000000000..dadc07ed7 --- /dev/null +++ b/proto/nft_transfer/v1/tx.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; +package chainmain.nft_transfer.v1; + +option go_package = "github.com/crypto-org-chain/chain-main/x/nft-transfer/types"; + +import "gogoproto/gogo.proto"; +import "ibc/core/client/v1/client.proto"; + +// Msg defines the ibc/nft-transfer Msg service. +service Msg { + // Transfer defines a rpc handler method for MsgTransfer. + rpc Transfer(MsgTransfer) returns (MsgTransferResponse); +} + +// MsgTransfer defines a msg to transfer non fungible tokens between +// ICS721 enabled chains. See ICS Spec here: +// https://github.com/cosmos/ibc/tree/master/spec/app/ics-721-nft-transfer#data-structures +message MsgTransfer { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // the port on which the packet will be sent + string source_port = 1 [(gogoproto.moretags) = "yaml:\"source_port\""]; + // the channel by which the packet will be sent + string source_channel = 2 [(gogoproto.moretags) = "yaml:\"source_channel\""]; + // the class_id of tokens to be transferred + string class_id = 3; + // the non fungible tokens to be transferred + repeated string token_ids = 4; + // the sender address + string sender = 5; + // the recipient address on the destination chain + string receiver = 6; + // Timeout height relative to the current block height. + // The timeout is disabled when set to 0. + ibc.core.client.v1.Height timeout_height = 7 + [(gogoproto.moretags) = "yaml:\"timeout_height\"", (gogoproto.nullable) = false]; + // Timeout timestamp in absolute nanoseconds since unix epoch. + // The timeout is disabled when set to 0. + uint64 timeout_timestamp = 8 [(gogoproto.moretags) = "yaml:\"timeout_timestamp\""]; +} + +// MsgTransferResponse defines the Msg/Transfer response type. +message MsgTransferResponse {} diff --git a/third_party/ibc-go b/third_party/ibc-go new file mode 160000 index 000000000..f106b747a --- /dev/null +++ b/third_party/ibc-go @@ -0,0 +1 @@ +Subproject commit f106b747a0f3895e9b468d25057f2d949cfdb9a7 diff --git a/x/nft-transfer/client/cli/cli.go b/x/nft-transfer/client/cli/cli.go new file mode 100644 index 000000000..9a5d2f5f6 --- /dev/null +++ b/x/nft-transfer/client/cli/cli.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" +) + +// GetQueryCmd returns the query commands for IBC connections +func GetQueryCmd() *cobra.Command { + queryCmd := &cobra.Command{ + Use: "nft-transfer", + Short: "IBC non-fungible token transfer query subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + } + + queryCmd.AddCommand( + GetCmdQueryClassTrace(), + GetCmdQueryClassTraces(), + GetCmdQueryEscrowAddress(), + GetCmdQueryClassHash(), + ) + + return queryCmd +} + +// NewTxCmd returns the transaction commands for IBC non-fungible token transfer +func NewTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: "nft-transfer", + Short: "IBC non-fungible token transfer transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + txCmd.AddCommand( + NewTransferTxCmd(), + ) + + return txCmd +} diff --git a/x/nft-transfer/client/cli/query.go b/x/nft-transfer/client/cli/query.go new file mode 100644 index 000000000..a8347c3c7 --- /dev/null +++ b/x/nft-transfer/client/cli/query.go @@ -0,0 +1,140 @@ +package cli + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + "github.com/spf13/cobra" + + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +// GetCmdQueryClassTrace defines the command to query a a class trace from a given trace hash or ibc class. +func GetCmdQueryClassTrace() *cobra.Command { + cmd := &cobra.Command{ + Use: "class-trace [hash/class]", + Short: "Query the class trace info from a given trace hash or ibc class", + Long: "Query the class trace info from a given trace hash or ibc class", + Example: fmt.Sprintf("%s query nft-transfer class-trace 27A6394C3F9FF9C9DCF5DFFADF9BB5FE9A37C7E92B006199894CF1824DF9AC7C", version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryClassTraceRequest{ + Hash: args[0], + } + + res, err := queryClient.ClassTrace(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetCmdQueryClassTraces defines the command to query all the class trace infos +// that this chain mantains. +func GetCmdQueryClassTraces() *cobra.Command { + cmd := &cobra.Command{ + Use: "class-traces", + Short: "Query the trace info for all the class", + Long: "Query the trace info for all the class", + Example: fmt.Sprintf("%s query nft-transfer class-traces", version.AppName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryClassTracesRequest{ + Pagination: pageReq, + } + + res, err := queryClient.ClassTraces(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "class trace") + + return cmd +} + +// GetCmdQueryEscrowAddress returns the command handler for nft-transfer escrow-address querying. +func GetCmdQueryEscrowAddress() *cobra.Command { + cmd := &cobra.Command{ + Use: "escrow-address", + Short: "Get the escrow address for a channel", + Long: "Get the escrow address for a channel", + Args: cobra.ExactArgs(2), + Example: fmt.Sprintf("%s query nft-transfer escrow-address [port] [channel-id]", version.AppName), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + port := args[0] + channel := args[1] + addr := types.GetEscrowAddress(port, channel) + return clientCtx.PrintString(fmt.Sprintf("%s\n", addr.String())) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryClassHash defines the command to query a class hash from a given trace. +func GetCmdQueryClassHash() *cobra.Command { + cmd := &cobra.Command{ + Use: "class-hash [trace]", + Short: "Query the class hash info from a given class trace", + Long: "Query the class hash info from a given class trace", + Example: fmt.Sprintf("%s query nft-transfer class-hash transfer/channel-0/class-id", version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryClassHashRequest{ + Trace: args[0], + } + + res, err := queryClient.ClassHash(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} diff --git a/x/nft-transfer/client/cli/tx.go b/x/nft-transfer/client/cli/tx.go new file mode 100644 index 000000000..9cd0f5559 --- /dev/null +++ b/x/nft-transfer/client/cli/tx.go @@ -0,0 +1,121 @@ +package cli + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/version" + "github.com/spf13/cobra" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + channelutils "github.com/cosmos/ibc-go/v5/modules/core/04-channel/client/utils" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +const ( + flagPacketTimeoutHeight = "packet-timeout-height" + flagPacketTimeoutTimestamp = "packet-timeout-timestamp" + flagAbsoluteTimeouts = "absolute-timeouts" +) + +// NewTransferTxCmd returns the command to create a NewMsgTransfer transaction +func NewTransferTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "transfer [src-port] [src-channel] [receiver] [classID] [tokenIDs]", + Short: "Transfer a non-fungible token through IBC", + Long: strings.TrimSpace(`Transfer a non-fungible token through IBC. Timeouts can be specified +as absolute or relative using the "absolute-timeouts" flag. Timeout height can be set by passing in the height string +in the form {revision}-{height} using the "packet-timeout-height" flag. Relative timeout height is added to the block +height queried from the latest consensus state corresponding to the counterparty channel. Relative timeout timestamp +is added to the greater value of the local clock time and the block timestamp queried from the latest consensus state +corresponding to the counterparty channel. Any timeout set to 0 is disabled.`), + Example: fmt.Sprintf("%s tx nft-transfer transfer [src-port] [src-channel] [receiver] [classID] [tokenIDs]", version.AppName), + Args: cobra.ExactArgs(5), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + sender := clientCtx.GetFromAddress().String() + srcPort := args[0] + srcChannel := args[1] + receiver := args[2] + classID := args[3] + tokenIDs := strings.Split(args[4], ",") + + if len(tokenIDs) == 0 { + return errors.New("tokenIDs cannot be empty") + } + + timeoutHeightStr, err := cmd.Flags().GetString(flagPacketTimeoutHeight) + if err != nil { + return err + } + timeoutHeight, err := clienttypes.ParseHeight(timeoutHeightStr) + if err != nil { + return err + } + + timeoutTimestamp, err := cmd.Flags().GetUint64(flagPacketTimeoutTimestamp) + if err != nil { + return err + } + + absoluteTimeouts, err := cmd.Flags().GetBool(flagAbsoluteTimeouts) + if err != nil { + return err + } + + // if the timeouts are not absolute, retrieve latest block height and block timestamp + // for the consensus state connected to the destination port/channel + if !absoluteTimeouts { + consensusState, height, _, err := channelutils.QueryLatestConsensusState(clientCtx, srcPort, srcChannel) + if err != nil { + return err + } + + if !timeoutHeight.IsZero() { + absoluteHeight := height + absoluteHeight.RevisionNumber += timeoutHeight.RevisionNumber + absoluteHeight.RevisionHeight += timeoutHeight.RevisionHeight + timeoutHeight = absoluteHeight + } + + if timeoutTimestamp != 0 { + // use local clock time as reference time if it is later than the + // consensus state timestamp of the counter party chain, otherwise + // still use consensus state timestamp as reference + now := time.Now().UnixNano() + consensusStateTimestamp := consensusState.GetTimestamp() + if now > 0 { + now := uint64(now) + if now > consensusStateTimestamp { + timeoutTimestamp = now + timeoutTimestamp + } else { + timeoutTimestamp = consensusStateTimestamp + timeoutTimestamp + } + } else { + return errors.New("local clock time is not greater than Jan 1st, 1970 12:00 AM") + } + } + } + + msg := types.NewMsgTransfer( + srcPort, srcChannel, classID, tokenIDs, sender, receiver, timeoutHeight, timeoutTimestamp, + ) + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + cmd.Flags().String(flagPacketTimeoutHeight, types.DefaultRelativePacketTimeoutHeight, "Packet timeout block height. The timeout is disabled when set to 0-0.") + cmd.Flags().Uint64(flagPacketTimeoutTimestamp, types.DefaultRelativePacketTimeoutTimestamp, "Packet timeout timestamp in nanoseconds from now. Default is 10 minutes. The timeout is disabled when set to 0.") + cmd.Flags().Bool(flagAbsoluteTimeouts, false, "Timeout flags are used as absolute timeouts.") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/nft-transfer/ibc_module.go b/x/nft-transfer/ibc_module.go new file mode 100644 index 000000000..b34d0353d --- /dev/null +++ b/x/nft-transfer/ibc_module.go @@ -0,0 +1,291 @@ +package nfttransfer + +import ( + "fmt" + "math" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v5/modules/core/05-port/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + ibcexported "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/keeper" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +var _ porttypes.IBCModule = IBCModule{} + +// IBCModule implements the ICS26 interface for transfer given the transfer keeper. +type IBCModule struct { + keeper keeper.Keeper +} + +// NewIBCModule creates a new IBCModule given the keeper +func NewIBCModule(k keeper.Keeper) IBCModule { + return IBCModule{ + keeper: k, + } +} + +// ValidateTransferChannelParams does validation of a newly created nft-transfer channel. A nft-transfer +// channel must be UNORDERED, use the correct port (by default 'nft-transfer'), and use the current +// supported version. Only 2^32 channels are allowed to be created. +func ValidateTransferChannelParams( + ctx sdk.Context, + keeper keeper.Keeper, + order channeltypes.Order, + portID string, + channelID string, +) error { + // NOTE: for escrow address security only 2^32 channels are allowed to be created + // Issue: https://github.com/cosmos/cosmos-sdk/issues/7737 + channelSequence, err := channeltypes.ParseChannelSequence(channelID) + if err != nil { + return err + } + if channelSequence > uint64(math.MaxUint32) { + return sdkerrors.Wrapf(types.ErrMaxTransferChannels, "channel sequence %d is greater than max allowed nft-transfer channels %d", channelSequence, uint64(math.MaxUint32)) + } + if order != channeltypes.UNORDERED { + return sdkerrors.Wrapf(channeltypes.ErrInvalidChannelOrdering, "expected %s channel, got %s ", channeltypes.UNORDERED, order) + } + + // Require portID is the portID transfer module is bound to + boundPort := keeper.GetPort(ctx) + if boundPort != portID { + return sdkerrors.Wrapf(porttypes.ErrInvalidPort, "invalid port: %s, expected %s", portID, boundPort) + } + + return nil +} + +// OnChanOpenInit implements the IBCModule interface +func (im IBCModule) OnChanOpenInit( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + chanCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) (string, error) { + if err := ValidateTransferChannelParams(ctx, im.keeper, order, portID, channelID); err != nil { + return "", err + } + + if strings.TrimSpace(version) == "" { + version = types.Version + } + + if version != types.Version { + return "", sdkerrors.Wrapf(types.ErrInvalidVersion, "got %s, expected %s", version, types.Version) + } + + // Claim channel capability passed back by IBC module + if err := im.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { + return "", err + } + + return "", nil +} + +// OnChanOpenTry implements the IBCModule interface. +func (im IBCModule) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + chanCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + counterpartyVersion string, +) (string, error) { + if err := ValidateTransferChannelParams(ctx, im.keeper, order, portID, channelID); err != nil { + return "", err + } + + if counterpartyVersion != types.Version { + return "", sdkerrors.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: %s, expected %s", counterpartyVersion, types.Version) + } + + // Module may have already claimed capability in OnChanOpenInit in the case of crossing hellos + // (ie chainA and chainB both call ChanOpenInit before one of them calls ChanOpenTry) + // If module can already authenticate the capability then module already owns it so we don't need to claim + // Otherwise, module does not have channel capability and we must claim it from IBC + if !im.keeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { + // Only claim channel capability passed back by IBC module if we do not already own it + if err := im.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { + return "", err + } + } + + return "", nil +} + +// OnChanOpenAck implements the IBCModule interface +func (im IBCModule) OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + _ string, + counterpartyVersion string, +) error { + if counterpartyVersion != types.Version { + return sdkerrors.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: %s, expected %s", counterpartyVersion, types.Version) + } + im.keeper.SetEscrowAddress(ctx, portID, channelID) + return nil +} + +// OnChanOpenConfirm implements the IBCModule interface +func (im IBCModule) OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + im.keeper.SetEscrowAddress(ctx, portID, channelID) + return nil +} + +// OnChanCloseInit implements the IBCModule interface +func (im IBCModule) OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, +) error { + // Disallow user-initiated channel closing for transfer channels + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") +} + +// OnChanCloseConfirm implements the IBCModule interface +func (im IBCModule) OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return nil +} + +// OnRecvPacket implements the IBCModule interface. A successful acknowledgement +// is returned if the packet data is successfully decoded and the receive application +// logic returns without error. +func (im IBCModule) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) ibcexported.Acknowledgement { + ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) + + var data types.NonFungibleTokenPacketData + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + ack = channeltypes.NewErrorAcknowledgement(fmt.Errorf("cannot unmarshal ICS-721 nft-transfer packet data")) + } + + // only attempt the application logic if the packet data + // was successfully decoded + if ack.Success() { + if err := im.keeper.OnRecvPacket(ctx, packet, data); err != nil { + ack = types.NewErrorAcknowledgement(err) + } + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(sdk.AttributeKeySender, data.Sender), + sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver), + sdk.NewAttribute(types.AttributeKeyClassID, data.ClassId), + sdk.NewAttribute(types.AttributeKeyTokenIDs, strings.Join(data.TokenIds, ",")), + sdk.NewAttribute(types.AttributeKeyAckSuccess, fmt.Sprintf("%t", ack.Success())), + ), + ) + + // NOTE: acknowledgement will be written synchronously during IBC handler execution. + return ack +} + +// OnAcknowledgementPacket implements the IBCModule interface +func (im IBCModule) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + relayer sdk.AccAddress, +) error { + var ack channeltypes.Acknowledgement + if err := types.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-721 transfer packet acknowledgement: %v", err) + } + var data types.NonFungibleTokenPacketData + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-721 transfer packet data: %s", err.Error()) + } + + if err := im.keeper.OnAcknowledgementPacket(ctx, packet, data, ack); err != nil { + return err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(sdk.AttributeKeySender, data.Sender), + sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver), + sdk.NewAttribute(types.AttributeKeyClassID, data.ClassId), + sdk.NewAttribute(types.AttributeKeyTokenIDs, strings.Join(data.TokenIds, ",")), + sdk.NewAttribute(types.AttributeKeyAckSuccess, fmt.Sprintf("%t", ack.Success())), + ), + ) + + switch resp := ack.Response.(type) { + case *channeltypes.Acknowledgement_Result: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(types.AttributeKeyAckSuccess, string(resp.Result)), + ), + ) + case *channeltypes.Acknowledgement_Error: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(types.AttributeKeyAckError, resp.Error), + ), + ) + } + + return nil +} + +// OnTimeoutPacket implements the IBCModule interface +func (im IBCModule) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) error { + var data types.NonFungibleTokenPacketData + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-721 transfer packet data: %s", err.Error()) + } + // refund tokens + if err := im.keeper.OnTimeoutPacket(ctx, packet, data); err != nil { + return err + } + + // ctx.EventManager().EmitEvent( + // sdk.NewEvent( + // types.EventTypeTimeout, + // sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + // sdk.NewAttribute(types.AttributeKeyRefundReceiver, data.Sender), + // sdk.NewAttribute(types.AttributeKeyRefundDenom, data.Denom), + // sdk.NewAttribute(types.AttributeKeyRefundAmount, data.Amount), + // ), + // ) + + return nil +} diff --git a/x/nft-transfer/keeper/genesis.go b/x/nft-transfer/keeper/genesis.go new file mode 100644 index 000000000..85ef635a5 --- /dev/null +++ b/x/nft-transfer/keeper/genesis.go @@ -0,0 +1,36 @@ +package keeper + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +// InitGenesis initializes the ibc nft-transfer state and binds to PortID. +func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { + k.SetPort(ctx, state.PortId) + + for _, trace := range state.Traces { + k.SetClassTrace(ctx, trace) + } + + // Only try to bind to port if it is not already bound, since we may already own + // port capability from capability InitGenesis + if !k.IsBound(ctx, state.PortId) { + // nft-transfer module binds to the nft-transfer port on InitChain + // and claims the returned capability + err := k.BindPort(ctx, state.PortId) + if err != nil { + panic(fmt.Sprintf("could not claim port capability: %v", err)) + } + } +} + +// ExportGenesis exports ibc nft-transfer module's portID and class trace info into its genesis state. +func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + return &types.GenesisState{ + PortId: k.GetPort(ctx), + Traces: k.GetAllClassTraces(ctx), + } +} diff --git a/x/nft-transfer/keeper/grpc_query.go b/x/nft-transfer/keeper/grpc_query.go new file mode 100644 index 000000000..79bd5d73c --- /dev/null +++ b/x/nft-transfer/keeper/grpc_query.go @@ -0,0 +1,119 @@ +package keeper + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +var _ types.QueryServer = Keeper{} + +// ClassTrace implements the Query/ClassTrace gRPC method +func (k Keeper) ClassTrace(c context.Context, + req *types.QueryClassTraceRequest, +) (*types.QueryClassTraceResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + hash, err := types.ParseHexHash(strings.TrimPrefix(req.Hash, "ibc/")) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid denom trace hash: %s, error: %s", hash.String(), err)) + } + + ctx := sdk.UnwrapSDKContext(c) + classTrace, found := k.GetClassTrace(ctx, hash) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrap(types.ErrTraceNotFound, req.Hash).Error(), + ) + } + + return &types.QueryClassTraceResponse{ + ClassTrace: &classTrace, + }, nil +} + +// ClassTraces implements the Query/ClassTraces gRPC method +func (k Keeper) ClassTraces(c context.Context, + req *types.QueryClassTracesRequest, +) (*types.QueryClassTracesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + traces := types.Traces{} + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ClassTraceKey) + pageRes, err := query.Paginate(store, req.Pagination, func(_, value []byte) error { + result, err := k.UnmarshalClassTrace(value) + if err != nil { + return err + } + + traces = append(traces, result) + return nil + }) + if err != nil { + return nil, err + } + + return &types.QueryClassTracesResponse{ + ClassTraces: traces.Sort(), + Pagination: pageRes, + }, nil +} + +// ClassHash implements the Query/ClassHash gRPC method +func (k Keeper) ClassHash(c context.Context, + req *types.QueryClassHashRequest, +) (*types.QueryClassHashResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + // Convert given request trace path to ClassTrace struct to confirm the path in a valid class trace format + classTrace := types.ParseClassTrace(req.Trace) + if err := classTrace.Validate(); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + classHash := classTrace.Hash() + found := k.HasClassTrace(ctx, classHash) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrap(types.ErrTraceNotFound, req.Trace).Error(), + ) + } + + return &types.QueryClassHashResponse{ + Hash: classHash.String(), + }, nil +} + +// EscrowAddress implements the EscrowAddress gRPC method +func (k Keeper) EscrowAddress(c context.Context, + req *types.QueryEscrowAddressRequest, +) (*types.QueryEscrowAddressResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + addr := types.GetEscrowAddress(req.PortId, req.ChannelId) + + return &types.QueryEscrowAddressResponse{ + EscrowAddress: addr.String(), + }, nil +} diff --git a/x/nft-transfer/keeper/keeper.go b/x/nft-transfer/keeper/keeper.go new file mode 100644 index 000000000..630b5e8af --- /dev/null +++ b/x/nft-transfer/keeper/keeper.go @@ -0,0 +1,99 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" + "github.com/tendermint/tendermint/libs/log" +) + +// Keeper defines the IBC non fungible transfer keeper +type Keeper struct { + storeKey storetypes.StoreKey + cdc codec.BinaryCodec + + ics4Wrapper types.ICS4Wrapper + channelKeeper types.ChannelKeeper + portKeeper types.PortKeeper + nftKeeper types.NFTKeeper + authKeeper types.AccountKeeper + scopedKeeper capabilitykeeper.ScopedKeeper +} + +// NewKeeper creates a new IBC nft-transfer Keeper instance +func NewKeeper( + cdc codec.BinaryCodec, + key storetypes.StoreKey, + ics4Wrapper types.ICS4Wrapper, + channelKeeper types.ChannelKeeper, + portKeeper types.PortKeeper, + nftKeeper types.NFTKeeper, + authKeeper types.AccountKeeper, + scopedKeeper capabilitykeeper.ScopedKeeper, +) Keeper { + return Keeper{ + cdc: cdc, + storeKey: key, + ics4Wrapper: ics4Wrapper, + channelKeeper: channelKeeper, + portKeeper: portKeeper, + nftKeeper: nftKeeper, + authKeeper: authKeeper, + scopedKeeper: scopedKeeper, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+host.ModuleName+"-"+types.ModuleName) +} + +// SetPort sets the portID for the nft-transfer module. Used in InitGenesis +func (k Keeper) SetPort(ctx sdk.Context, portID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.PortKey, []byte(portID)) +} + +// GetPort returns the portID for the nft-transfer module. +func (k Keeper) GetPort(ctx sdk.Context) string { + store := ctx.KVStore(k.storeKey) + return string(store.Get(types.PortKey)) +} + +// IsBound checks if the transfer module is already bound to the desired port +func (k Keeper) IsBound(ctx sdk.Context, portID string) bool { + _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID)) + return ok +} + +// BindPort defines a wrapper function for the ort Keeper's function in +// order to expose it to module's InitGenesis function +func (k Keeper) BindPort(ctx sdk.Context, portID string) error { + cap := k.portKeeper.BindPort(ctx, portID) + return k.ClaimCapability(ctx, cap, host.PortPath(portID)) +} + +// AuthenticateCapability wraps the scopedKeeper's AuthenticateCapability function +func (k Keeper) AuthenticateCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) bool { + return k.scopedKeeper.AuthenticateCapability(ctx, cap, name) +} + +// ClaimCapability allows the nft-transfer module that can claim a capability that IBC module +// passes to it +func (k Keeper) ClaimCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) error { + return k.scopedKeeper.ClaimCapability(ctx, cap, name) +} + +// SetEscrowAddress attempts to save a account to auth module +func (k Keeper) SetEscrowAddress(ctx sdk.Context, portID, channelID string) { + // create the escrow address for the tokens + escrowAddress := types.GetEscrowAddress(portID, channelID) + if !k.authKeeper.HasAccount(ctx, escrowAddress) { + acc := k.authKeeper.NewAccountWithAddress(ctx, escrowAddress) + k.authKeeper.SetAccount(ctx, acc) + } +} diff --git a/x/nft-transfer/keeper/msg_server.go b/x/nft-transfer/keeper/msg_server.go new file mode 100644 index 000000000..d20ad7291 --- /dev/null +++ b/x/nft-transfer/keeper/msg_server.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "context" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +var _ types.MsgServer = Keeper{} + +// Transfer defines a rpc handler method for MsgTransfer. +func (k Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types.MsgTransferResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + sender, err := sdk.AccAddressFromBech32(msg.Sender) + if err != nil { + return nil, err + } + if err := k.SendTransfer( + ctx, msg.SourcePort, msg.SourceChannel, msg.ClassId, msg.TokenIds, + sender, msg.Receiver, msg.TimeoutHeight, msg.TimeoutTimestamp, + ); err != nil { + return nil, err + } + + k.Logger(ctx).Info("IBC non-fungible token transfer", + "classID", msg.ClassId, + "tokenIDs", strings.Join(msg.TokenIds, ","), + "sender", msg.Sender, + "receiver", msg.Receiver, + ) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeTransfer, + sdk.NewAttribute(sdk.AttributeKeySender, msg.Sender), + sdk.NewAttribute(types.AttributeKeyReceiver, msg.Receiver), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + ), + }) + + return &types.MsgTransferResponse{}, nil +} diff --git a/x/nft-transfer/keeper/packet.go b/x/nft-transfer/keeper/packet.go new file mode 100644 index 000000000..5b397a572 --- /dev/null +++ b/x/nft-transfer/keeper/packet.go @@ -0,0 +1,198 @@ +package keeper + +import ( + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +// refundPacketToken will unescrow and send back the tokens back to sender +// if the sending chain was the source chain. Otherwise, the sent tokens +// were burnt in the original send so new tokens are minted and sent to +// the sending address. +func (k Keeper) refundPacketToken(ctx sdk.Context, packet channeltypes.Packet, data types.NonFungibleTokenPacketData) error { + sender, err := sdk.AccAddressFromBech32(data.Sender) + if err != nil { + return err + } + + classTrace := types.ParseClassTrace(data.ClassId) + voucherClassID := classTrace.IBCClassID() + + isAwayFromOrigin := types.IsAwayFromOrigin(packet.GetSourcePort(), + packet.GetSourceChannel(), data.ClassId) + + escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel()) + + if isAwayFromOrigin { + for _, tokenID := range data.TokenIds { + if err := k.nftKeeper.TransferOwner(ctx, voucherClassID, tokenID, escrowAddress, sender); err != nil { + return err + } + } + } else { + for i, tokenID := range data.TokenIds { + if err := k.nftKeeper.MintNFT(ctx, voucherClassID, tokenID, "", data.TokenUris[i], "", escrowAddress, sender); err != nil { + return err + } + } + } + + return nil +} + +// createOutgoingPacket will escrow the tokens to escrow account +// if the token was away from origin chain . Otherwise, the sent tokens +// were burnt in the sending chain and will unescrow the token to receiver +// in the destination chain +func (k Keeper) createOutgoingPacket(ctx sdk.Context, + sourcePort, + sourceChannel, + destinationPort, + destinationChannel, + classID string, + tokenIDs []string, + sender sdk.AccAddress, + receiver string, + sequence uint64, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, +) (channeltypes.Packet, error) { + denom, err := k.nftKeeper.GetDenom(ctx, classID) + if err != nil { + return channeltypes.Packet{}, err + } + + var ( + // NOTE: class and hex hash correctness checked during msg.ValidateBasic + fullClassPath = classID + tokenURIs = []string{} + ) + + // deconstruct the token denomination into the denomination trace info + // to determine if the sender is the source chain + if strings.HasPrefix(classID, "ibc/") { + fullClassPath, err = k.ClassPathFromHash(ctx, classID) + if err != nil { + return channeltypes.Packet{}, err + } + } + + isAwayFromOrigin := types.IsAwayFromOrigin(sourcePort, + sourceChannel, fullClassPath) + + for _, tokenID := range tokenIDs { + nft, err := k.nftKeeper.GetNFT(ctx, classID, tokenID) + if err != nil { + return channeltypes.Packet{}, err + } + tokenURIs = append(tokenURIs, nft.GetURI()) + + owner := nft.GetOwner() + if !sender.Equals(owner) { + return channeltypes.Packet{}, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "not token owner") + } + + if isAwayFromOrigin { + // create the escrow address for the tokens + escrowAddress := types.GetEscrowAddress(sourcePort, sourceChannel) + if err := k.nftKeeper.TransferOwner(ctx, classID, tokenID, sender, escrowAddress); err != nil { + return channeltypes.Packet{}, err + } + } else { + if err := k.nftKeeper.BurnNFTUnverified(ctx, classID, tokenID, sender); err != nil { + return channeltypes.Packet{}, err + } + } + } + + packetData := types.NewNonFungibleTokenPacketData( + fullClassPath, denom.Uri, tokenIDs, tokenURIs, sender.String(), receiver, + ) + + return channeltypes.NewPacket( + packetData.GetBytes(), + sequence, + sourcePort, + sourceChannel, + destinationPort, + destinationChannel, + timeoutHeight, + timeoutTimestamp, + ), nil +} + +// processReceivedPacket will mint the tokens to receiver account +// if the token was away from origin chain . Otherwise, the sent tokens +// were burnt in the sending chain and will unescrow the token to receiver +// in the destination chain +func (k Keeper) processReceivedPacket(ctx sdk.Context, packet channeltypes.Packet, + data types.NonFungibleTokenPacketData, +) error { + receiver, err := sdk.AccAddressFromBech32(data.Receiver) + if err != nil { + return err + } + + isAwayFromOrigin := types.IsAwayFromOrigin(packet.GetSourcePort(), packet.GetSourceChannel(), data.ClassId) + + // create the escrow address for creating denom and minting nft + escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel()) + + if isAwayFromOrigin { + // since SendPacket did not prefix the classID, we must prefix classID here + classPrefix := types.GetClassPrefix(packet.GetDestPort(), packet.GetDestChannel()) + // NOTE: sourcePrefix contains the trailing "/" + prefixedClassID := classPrefix + data.ClassId + + // construct the class trace from the full raw classID + classTrace := types.ParseClassTrace(prefixedClassID) + if !k.HasClassTrace(ctx, classTrace.Hash()) { + k.SetClassTrace(ctx, classTrace) + } + + voucherClassID := classTrace.IBCClassID() + + if !k.nftKeeper.HasDenomID(ctx, voucherClassID) { + if err := k.nftKeeper.IssueDenom(ctx, voucherClassID, "", "", data.ClassUri, escrowAddress); err != nil { + return err + } + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeClassTrace, + sdk.NewAttribute(types.AttributeKeyTraceHash, classTrace.Hash().String()), + sdk.NewAttribute(types.AttributeKeyClassID, voucherClassID), + ), + ) + + for i, tokenID := range data.TokenIds { + if err := k.nftKeeper.MintNFT(ctx, voucherClassID, tokenID, "", data.TokenUris[i], "", escrowAddress, receiver); err != nil { + return err + } + } + } else { + // If the token moves in the direction of back to origin, + // we need to unescrow the token and transfer it to the receiver + + // we should remove the prefix. For example: + // p6/c6/p4/c4/p2/c2/nftClass -> p4/c4/p2/c2/nftClass + unprefixedClassID := types.RemoveClassPrefix(packet.GetSourcePort(), + packet.GetSourceChannel(), data.ClassId) + + voucherClassID := types.ParseClassTrace(unprefixedClassID).IBCClassID() + for _, tokenID := range data.TokenIds { + if err := k.nftKeeper.TransferOwner(ctx, + voucherClassID, tokenID, escrowAddress, receiver); err != nil { + return err + } + } + } + + return nil +} diff --git a/x/nft-transfer/keeper/relay.go b/x/nft-transfer/keeper/relay.go new file mode 100644 index 000000000..496d96790 --- /dev/null +++ b/x/nft-transfer/keeper/relay.go @@ -0,0 +1,154 @@ +package keeper + +import ( + "github.com/armon/go-metrics" + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + coretypes "github.com/cosmos/ibc-go/v5/modules/core/types" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +// SendTransfer handles nft-transfer sending logic. +// A sending chain may be acting as a source or sink zone. +// +// when a chain is sending tokens across a port and channel which are +// not equal to the last prefixed port and channel pair, it is acting as a source zone. +// when tokens are sent from a source zone, the destination port and +// channel will be prefixed onto the classId (once the tokens are received) +// adding another hop to the tokens record. +// +// when a chain is sending tokens across a port and channel which are +// equal to the last prefixed port and channel pair, it is acting as a sink zone. +// when tokens are sent from a sink zone, the last prefixed port and channel +// pair on the classId is removed (once the tokens are received), undoing the last hop in the tokens record. +// +// For example, assume these steps of transfer occur: +// A -> B -> C -> A -> C -> B -> A +// +// | sender chain | receiver chain | +// | :-----: | -------------------------: | :------------: | :------------: | -------------------------: | :-----: | +// | chain | classID | (port,channel) | (port,channel) | classID | chain | +// | A | nftClass | (p1,c1) | (p2,c2) | p2/c2/nftClass | B | +// | B | p2/c2/nftClass | (p3,c3) | (p4,c4) | p4/c4/p2/c2/nftClass | C | +// | C | p4/c4/p2/c2/nftClass | (p5,c5) | (p6,c6) | p6/c6/p4/c4/p2/c2/nftClass | A | +// | A | p6/c6/p4/c4/p2/c2/nftClass | (p6,c6) | (p5,c5) | p4/c4/p2/c2/nftClass | C | +// | C | p4/c4/p2/c2/nftClass | (p4,c4) | (p3,c3) | p2/c2/nftClass | B | +// | B | p2/c2/nftClass | (p2,c2) | (p1,c1) | nftClass | A | +// +func (k Keeper) SendTransfer( + ctx sdk.Context, + sourcePort, + sourceChannel, + classID string, + tokenIDs []string, + sender sdk.AccAddress, + receiver string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, +) error { + sourceChannelEnd, found := k.channelKeeper.GetChannel(ctx, sourcePort, sourceChannel) + if !found { + return sdkerrors.Wrapf(channeltypes.ErrChannelNotFound, "port ID (%s) channel ID (%s)", sourcePort, sourceChannel) + } + + destinationPort := sourceChannelEnd.GetCounterparty().GetPortID() + destinationChannel := sourceChannelEnd.GetCounterparty().GetChannelID() + + // get the next sequence + sequence, found := k.channelKeeper.GetNextSequenceSend(ctx, sourcePort, sourceChannel) + if !found { + return sdkerrors.Wrapf( + channeltypes.ErrSequenceSendNotFound, + "source port: %s, source channel: %s", sourcePort, sourceChannel, + ) + } + + channelCap, ok := k.scopedKeeper.GetCapability(ctx, host.ChannelCapabilityPath(sourcePort, sourceChannel)) + if !ok { + return sdkerrors.Wrap(channeltypes.ErrChannelCapabilityNotFound, "module does not own channel capability") + } + + // See spec for this logic: https://github.com/cosmos/ibc/blob/master/spec/app/ics-721-nft-transfer/README.md#packet-relay + packet, err := k.createOutgoingPacket(ctx, + sourcePort, + sourceChannel, + destinationPort, + destinationChannel, + classID, + tokenIDs, + sender, + receiver, + sequence, + timeoutHeight, + timeoutTimestamp, + ) + if err != nil { + return err + } + + if err := k.ics4Wrapper.SendPacket(ctx, channelCap, packet); err != nil { + return err + } + + defer func() { + labels := []metrics.Label{ + telemetry.NewLabel(coretypes.LabelDestinationPort, destinationPort), + telemetry.NewLabel(coretypes.LabelDestinationChannel, destinationChannel), + } + + telemetry.SetGaugeWithLabels( + []string{"tx", "msg", "ibc", "nft-transfer"}, + float32(len(tokenIDs)), + []metrics.Label{telemetry.NewLabel("class_id", classID)}, + ) + + telemetry.IncrCounterWithLabels( + []string{"ibc", types.ModuleName, "send"}, + 1, + labels, + ) + }() + return nil +} + +// OnRecvPacket processes a cross chain fungible token transfer. If the +// sender chain is the source of minted tokens then vouchers will be minted +// and sent to the receiving address. Otherwise if the sender chain is sending +// back tokens this chain originally transferred to it, the tokens are +// unescrowed and sent to the receiving address. +func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, + data types.NonFungibleTokenPacketData, +) error { + // validate packet data upon receiving + if err := data.ValidateBasic(); err != nil { + return err + } + + // See spec for this logic: https://github.com/cosmos/ibc/blob/master/spec/app/ics-721-nft-transfer/README.md#packet-relay + return k.processReceivedPacket(ctx, packet, data) +} + +// OnAcknowledgementPacket responds to the the success or failure of a packet +// acknowledgement written on the receiving chain. If the acknowledgement +// was a success then nothing occurs. If the acknowledgement failed, then +// the sender is refunded their tokens using the refundPacketToken function. +func (k Keeper) OnAcknowledgementPacket(ctx sdk.Context, packet channeltypes.Packet, data types.NonFungibleTokenPacketData, ack channeltypes.Acknowledgement) error { + switch ack.Response.(type) { + case *channeltypes.Acknowledgement_Error: + return k.refundPacketToken(ctx, packet, data) + default: + // the acknowledgement succeeded on the receiving chain so nothing + // needs to be executed and no error needs to be returned + return nil + } +} + +// OnTimeoutPacket refunds the sender since the original packet sent was +// never received and has been timed out. +func (k Keeper) OnTimeoutPacket(ctx sdk.Context, packet channeltypes.Packet, data types.NonFungibleTokenPacketData) error { + return k.refundPacketToken(ctx, packet, data) +} diff --git a/x/nft-transfer/keeper/trace.go b/x/nft-transfer/keeper/trace.go new file mode 100644 index 000000000..eeb1d0b07 --- /dev/null +++ b/x/nft-transfer/keeper/trace.go @@ -0,0 +1,102 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" + tmbytes "github.com/tendermint/tendermint/libs/bytes" +) + +// GetClassTrace retrieves the full identifiers trace and base classId from the store. +func (k Keeper) GetClassTrace(ctx sdk.Context, classTraceHash tmbytes.HexBytes) (types.ClassTrace, bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ClassTraceKey) + bz := store.Get(classTraceHash) + if bz == nil { + return types.ClassTrace{}, false + } + + denomTrace := k.MustUnmarshalClassTrace(bz) + return denomTrace, true +} + +// GetAllClassTraces returns the trace information for all the class. +func (k Keeper) GetAllClassTraces(ctx sdk.Context) types.Traces { + traces := types.Traces{} + k.IterateClassTraces(ctx, func(classTrace types.ClassTrace) bool { + traces = append(traces, classTrace) + return false + }) + + return traces.Sort() +} + +// IterateClassTraces iterates over the class traces in the store +// and performs a callback function. +func (k Keeper) IterateClassTraces(ctx sdk.Context, cb func(denomTrace types.ClassTrace) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.ClassTraceKey) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + classTrace := k.MustUnmarshalClassTrace(iterator.Value()) + if cb(classTrace) { + break + } + } +} + +// ClassPathFromHash returns the full class path prefix from an ibc classId with a hash +// component. +func (k Keeper) ClassPathFromHash(ctx sdk.Context, classID string) (string, error) { + // trim the class prefix, by default "ibc/" + hexHash := classID[len(types.ClassPrefix+"/"):] + + hash, err := types.ParseHexHash(hexHash) + if err != nil { + return "", sdkerrors.Wrap(types.ErrInvalidClassID, err.Error()) + } + + classTrace, found := k.GetClassTrace(ctx, hash) + if !found { + return "", sdkerrors.Wrap(types.ErrTraceNotFound, hexHash) + } + return classTrace.GetFullClassPath(), nil +} + +// HasClassTrace checks if a the key with the given denomination trace hash exists on the store. +func (k Keeper) HasClassTrace(ctx sdk.Context, denomTraceHash tmbytes.HexBytes) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ClassTraceKey) + return store.Has(denomTraceHash) +} + +// SetClassTrace sets a new {trace hash -> class trace} pair to the store. +func (k Keeper) SetClassTrace(ctx sdk.Context, denomTrace types.ClassTrace) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ClassTraceKey) + bz := k.MustMarshalClassTrace(denomTrace) + store.Set(denomTrace.Hash(), bz) +} + +// MustUnmarshalClassTrace attempts to decode and return an ClassTrace object from +// raw encoded bytes. It panics on error. +func (k Keeper) MustUnmarshalClassTrace(bz []byte) types.ClassTrace { + var classTrace types.ClassTrace + k.cdc.MustUnmarshal(bz, &classTrace) + return classTrace +} + +// MustMarshalClassTrace attempts to decode and return an ClassTrace object from +// raw encoded bytes. It panics on error. +func (k Keeper) MustMarshalClassTrace(classTrace types.ClassTrace) []byte { + return k.cdc.MustMarshal(&classTrace) +} + +// UnmarshalClassTrace attempts to decode and return an ClassTrace object from +// raw encoded bytes. +func (k Keeper) UnmarshalClassTrace(bz []byte) (types.ClassTrace, error) { + var classTrace types.ClassTrace + if err := k.cdc.Unmarshal(bz, &classTrace); err != nil { + return types.ClassTrace{}, err + } + return classTrace, nil +} diff --git a/x/nft-transfer/module.go b/x/nft-transfer/module.go new file mode 100644 index 000000000..7b4b6594a --- /dev/null +++ b/x/nft-transfer/module.go @@ -0,0 +1,173 @@ +package nfttransfer + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + + porttypes "github.com/cosmos/ibc-go/v5/modules/core/05-port/types" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/client/cli" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/keeper" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/simulation" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ porttypes.IBCModule = IBCModule{} +) + +// AppModuleBasic is the IBC nft-transfer AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) { + types.RegisterInterfaces(registry) +} + +// DefaultGenesis returns default genesis state as raw bytes for the ibc +// nft-transfer module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis performs genesis state validation for the ibc nft-transfer module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return gs.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for ics29 fee module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.NewTxCmd() +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper keeper.Keeper +} + +// NewAppModule creates a new nft-transfer module +func NewAppModule(k keeper.Keeper) AppModule { + return AppModule{ + keeper: k, + } +} + +// RegisterInvariants implements the AppModule interface +func (AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {} + +// Route implements the AppModule interface +func (am AppModule) Route() sdk.Route { + return sdk.Route{} +} + +// QuerierRoute implements the AppModule interface +func (AppModule) QuerierRoute() string { + return types.QuerierRoute +} + +// LegacyQuerierHandler implements the AppModule interface +func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), am.keeper) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// InitGenesis performs genesis initialization for the ibc nft-transfer module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the exported genesis state as raw bytes for the ibc nft-transfer +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { +} + +// EndBlock implements the AppModule interface +func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +// AppModuleSimulation functions + +// GenerateGenesisState creates a randomized GenState of the nft-transfer module. +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + simulation.RandomizedGenState(simState) +} + +// ProposalContents doesn't return any content functions for governance proposals. +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { + return nil +} + +// RandomizedParams creates randomized ibc nft-transfer param changes for the simulator. +func (AppModule) RandomizedParams(r *rand.Rand) []simtypes.ParamChange { + return nil +} + +// RegisterStoreDecoder registers a decoder for nft-transfer module's types +func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) { + sdr[types.StoreKey] = simulation.NewDecodeStore(am.keeper) +} + +// WeightedOperations returns the all the nft-transfer module operations with their respective weights. +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { + return nil +} diff --git a/x/nft-transfer/simulation/decoder.go b/x/nft-transfer/simulation/decoder.go new file mode 100644 index 000000000..d212f38c3 --- /dev/null +++ b/x/nft-transfer/simulation/decoder.go @@ -0,0 +1,34 @@ +package simulation + +import ( + "bytes" + "fmt" + + "github.com/cosmos/cosmos-sdk/types/kv" + + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +// TransferUnmarshaler defines the expected encoding store functions. +type TransferUnmarshaler interface { + MustUnmarshalClassTrace([]byte) types.ClassTrace +} + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding ClassTrace type. +func NewDecodeStore(cdc TransferUnmarshaler) func(kvA, kvB kv.Pair) string { + return func(kvA, kvB kv.Pair) string { + switch { + case bytes.Equal(kvA.Key[:1], types.PortKey): + return fmt.Sprintf("Port A: %s\nPort B: %s", string(kvA.Value), string(kvB.Value)) + + case bytes.Equal(kvA.Key[:1], types.ClassTraceKey): + classTraceA := cdc.MustUnmarshalClassTrace(kvA.Value) + classTraceB := cdc.MustUnmarshalClassTrace(kvB.Value) + return fmt.Sprintf("ClassTrace A: %s\nClassTrace B: %s", classTraceA.IBCClassID(), classTraceB.IBCClassID()) + + default: + panic(fmt.Sprintf("invalid %s key prefix %X", types.ModuleName, kvA.Key[:1])) + } + } +} diff --git a/x/nft-transfer/simulation/genesis.go b/x/nft-transfer/simulation/genesis.go new file mode 100644 index 000000000..ed95cf1fe --- /dev/null +++ b/x/nft-transfer/simulation/genesis.go @@ -0,0 +1,42 @@ +package simulation + +import ( + "encoding/json" + "fmt" + "math/rand" + "strings" + + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +// Simulation parameter constants +const port = "port_id" + +// RadomEnabled randomized send or receive enabled param with 75% prob of being true. +func RadomEnabled(r *rand.Rand) bool { + return r.Int63n(101) <= 75 +} + +// RandomizedGenState generates a random GenesisState for nft-transfer. +func RandomizedGenState(simState *module.SimulationState) { + var portID string + simState.AppParams.GetOrGenerate( + simState.Cdc, port, &portID, simState.Rand, + func(r *rand.Rand) { portID = strings.ToLower(simtypes.RandStringOfLength(r, 20)) }, + ) + + transferGenesis := types.GenesisState{ + PortId: portID, + Traces: types.Traces{}, + } + + bz, err := json.MarshalIndent(&transferGenesis, "", " ") + if err != nil { + panic(err) + } + fmt.Printf("Selected randomly generated %s parameters:\n%s\n", types.ModuleName, bz) + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&transferGenesis) +} diff --git a/x/nft-transfer/simulation/genesis_test.go b/x/nft-transfer/simulation/genesis_test.go new file mode 100644 index 000000000..ecd468c97 --- /dev/null +++ b/x/nft-transfer/simulation/genesis_test.go @@ -0,0 +1,73 @@ +package simulation_test + +import ( + "encoding/json" + "math/rand" + "testing" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/simulation" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" + "github.com/stretchr/testify/require" +) + +// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState. +// Abonormal scenarios are not tested here. +func TestRandomizedGenState(t *testing.T) { + interfaceRegistry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + + s := rand.NewSource(1) + // nolint: gosec + r := rand.New(s) + + simState := module.SimulationState{ + AppParams: make(simtypes.AppParams), + Cdc: cdc, + Rand: r, + NumBonded: 3, + Accounts: simtypes.RandomAccounts(r, 3), + InitialStake: sdkmath.NewInt(1000), + GenState: make(map[string]json.RawMessage), + } + + simulation.RandomizedGenState(&simState) + + var ibcTransferGenesis types.GenesisState + simState.Cdc.MustUnmarshalJSON(simState.GenState[types.ModuleName], &ibcTransferGenesis) + + require.Equal(t, "euzxpfgkqegqiqwixnku", ibcTransferGenesis.PortId) + require.Len(t, ibcTransferGenesis.Traces, 0) +} + +// TestRandomizedGenState tests abnormal scenarios of applying RandomizedGenState. +func TestRandomizedGenState1(t *testing.T) { + interfaceRegistry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + + s := rand.NewSource(1) + // nolint: gosec + r := rand.New(s) + // all these tests will panic + tests := []struct { + simState module.SimulationState + panicMsg string + }{ + { // panic => reason: incomplete initialization of the simState + module.SimulationState{}, "invalid memory address or nil pointer dereference"}, + { // panic => reason: incomplete initialization of the simState + module.SimulationState{ + AppParams: make(simtypes.AppParams), + Cdc: cdc, + Rand: r, + }, "assignment to entry in nil map"}, + } + + for _, tt := range tests { + require.Panicsf(t, func() { simulation.RandomizedGenState(&tt.simState) }, tt.panicMsg) + } +} diff --git a/x/nft-transfer/types/ack.go b/x/nft-transfer/types/ack.go new file mode 100644 index 000000000..a7ee55691 --- /dev/null +++ b/x/nft-transfer/types/ack.go @@ -0,0 +1,27 @@ +package types + +import ( + "fmt" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" +) + +const ( + // ackErrorString defines a string constant included in error acknowledgements + // NOTE: Changing this const is state machine breaking as acknowledgements are written into state + ackErrorString = "error handling packet on destination chain: see events for details" +) + +// NewErrorAcknowledgement returns a deterministic error string which may be used in +// the packet acknowledgement. +func NewErrorAcknowledgement(err error) channeltypes.Acknowledgement { + // the ABCI code is included in the abcitypes.ResponseDeliverTx hash + // constructed in Tendermint and is therefore deterministic + _, code, _ := sdkerrors.ABCIInfo(err, false) // discard non-deterministic codespace and log values + + errorString := fmt.Errorf("ABCI code: %d: %s", code, ackErrorString) + + return channeltypes.NewErrorAcknowledgement(errorString) +} diff --git a/x/nft-transfer/types/codec.go b/x/nft-transfer/types/codec.go new file mode 100644 index 000000000..1202fbadc --- /dev/null +++ b/x/nft-transfer/types/codec.go @@ -0,0 +1,36 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/ibc-transfer module codec. Note, the codec + // should ONLY be used in certain instances of tests and for JSON encoding. + // + // The actual codec used for serialization should be provided to x/ibc transfer and + // defined at the application level. + ModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) + + // AminoCdc is a amino codec created to support amino json compatible msgs. + AminoCdc = codec.NewAminoCodec(amino) +) + +// RegisterLegacyAminoCodec registers the necessary nft-transfer interfaces and concrete types +// on the provided LegacyAmino codec. These types are used for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgTransfer{}, "cosmos-sdk/MsgTransferNFT", nil) +} + +// RegisterInterfaces register the ibc nft-transfer module interfaces to protobuf +// Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgTransfer{}) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/x/nft-transfer/types/errors.go b/x/nft-transfer/types/errors.go new file mode 100644 index 000000000..6633dc7cb --- /dev/null +++ b/x/nft-transfer/types/errors.go @@ -0,0 +1,14 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + ErrInvalidClassID = sdkerrors.Register(ModuleName, 1501, "invalid class id") + ErrInvalidTokenID = sdkerrors.Register(ModuleName, 1502, "invalid token id") + ErrInvalidPacket = sdkerrors.Register(ModuleName, 1503, "invalid packet") + ErrTraceNotFound = sdkerrors.Register(ModuleName, 1504, "class trace not found") + ErrInvalidVersion = sdkerrors.Register(ModuleName, 1505, "invalid ICS721 version") + ErrMaxTransferChannels = sdkerrors.Register(ModuleName, 1506, "max nft-transfer channels") +) diff --git a/x/nft-transfer/types/events.go b/x/nft-transfer/types/events.go new file mode 100644 index 000000000..6b9bf27df --- /dev/null +++ b/x/nft-transfer/types/events.go @@ -0,0 +1,17 @@ +package types + +// IBC transfer events +const ( + EventTypeTimeout = "timeout" + EventTypePacket = "non_fungible_token_packet" + EventTypeTransfer = "ibc_nft_transfer" + EventTypeChannelClose = "channel_closed" + EventTypeClassTrace = "class_trace" + + AttributeKeyReceiver = "receiver" + AttributeKeyClassID = "classID" + AttributeKeyTokenIDs = "tokenIDs" + AttributeKeyAckSuccess = "success" + AttributeKeyAckError = "error" + AttributeKeyTraceHash = "trace_hash" +) diff --git a/x/nft-transfer/types/expected_keepers.go b/x/nft-transfer/types/expected_keepers.go new file mode 100644 index 000000000..78dddeb4f --- /dev/null +++ b/x/nft-transfer/types/expected_keepers.go @@ -0,0 +1,51 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v5/modules/core/exported" + nftexported "github.com/crypto-org-chain/chain-main/v4/x/nft/exported" + nfttypes "github.com/crypto-org-chain/chain-main/v4/x/nft/types" +) + +// ICS4Wrapper defines the expected ICS4Wrapper for middleware +type ICS4Wrapper interface { + SendPacket(ctx sdk.Context, channelCap *capabilitytypes.Capability, packet ibcexported.PacketI) error +} + +// ChannelKeeper defines the expected IBC channel keeper +type ChannelKeeper interface { + GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) + GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) +} + +// NFTKeeper defines the expected nft keeper +type NFTKeeper interface { + HasDenomID(ctx sdk.Context, id string) bool + GetDenom(ctx sdk.Context, id string) (denom nfttypes.Denom, err error) + IssueDenom(ctx sdk.Context, id, name, schema, uri string, creator sdk.AccAddress) error + + GetNFT(ctx sdk.Context, denomID, tokenID string) (nft nftexported.NFT, err error) + MintNFT( + ctx sdk.Context, denomID, tokenID, tokenNm, + tokenURI, tokenData string, sender, owner sdk.AccAddress, + ) error + BurnNFTUnverified(ctx sdk.Context, denomID, tokenID string, owner sdk.AccAddress) error + TransferOwner(ctx sdk.Context, denomID, tokenID string, srcOwner, dstOwner sdk.AccAddress) error +} + +// PortKeeper defines the expected IBC port keeper +type PortKeeper interface { + BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability +} + +// AccountKeeper defines the contract required for account APIs. +type AccountKeeper interface { + NewAccountWithAddress(ctx sdk.Context, addr sdk.AccAddress) types.AccountI + // Set an account in the store. + SetAccount(sdk.Context, types.AccountI) + HasAccount(ctx sdk.Context, addr sdk.AccAddress) bool + GetModuleAddress(name string) sdk.AccAddress +} diff --git a/x/nft-transfer/types/genesis.go b/x/nft-transfer/types/genesis.go new file mode 100644 index 000000000..399852c07 --- /dev/null +++ b/x/nft-transfer/types/genesis.go @@ -0,0 +1,30 @@ +package types + +import ( + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" +) + +// NewGenesisState creates a new ibc nft-transfer GenesisState instance. +func NewGenesisState(portID string, traces Traces) *GenesisState { + return &GenesisState{ + PortId: portID, + Traces: traces, + } +} + +// DefaultGenesisState returns a GenesisState with "nft-transfer" as the default PortID. +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + PortId: PortID, + Traces: Traces{}, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + if err := host.PortIdentifierValidator(gs.PortId); err != nil { + return err + } + return gs.Traces.Validate() +} diff --git a/x/nft-transfer/types/genesis.pb.go b/x/nft-transfer/types/genesis.pb.go new file mode 100644 index 000000000..705558269 --- /dev/null +++ b/x/nft-transfer/types/genesis.pb.go @@ -0,0 +1,384 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nft_transfer/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc-nft-transfer genesis state +type GenesisState struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + Traces Traces `protobuf:"bytes,2,rep,name=traces,proto3,castrepeated=Traces" json:"traces"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_674acb5cce894156, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *GenesisState) GetTraces() Traces { + if m != nil { + return m.Traces + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "chainmain.nft_transfer.v1.GenesisState") +} + +func init() { proto.RegisterFile("nft_transfer/v1/genesis.proto", fileDescriptor_674acb5cce894156) } + +var fileDescriptor_674acb5cce894156 = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcd, 0x4b, 0x2b, 0x89, + 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, + 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4c, 0xce, 0x48, 0xcc, 0xcc, + 0xcb, 0x4d, 0xcc, 0xcc, 0xd3, 0x43, 0x56, 0xa8, 0x57, 0x66, 0x28, 0x25, 0x8d, 0xae, 0xb3, 0xa4, + 0x28, 0x31, 0x39, 0x15, 0xa2, 0x4f, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, + 0x20, 0xa2, 0x4a, 0x65, 0x5c, 0x3c, 0xee, 0x10, 0xe3, 0x83, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0xc4, + 0xb9, 0xd8, 0x0b, 0xf2, 0x8b, 0x4a, 0xe2, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, + 0xd8, 0x40, 0x5c, 0xcf, 0x14, 0x21, 0x5f, 0x2e, 0x36, 0xb0, 0x69, 0xc5, 0x12, 0x4c, 0x0a, 0xcc, + 0x1a, 0xdc, 0x46, 0xaa, 0x7a, 0x38, 0xdd, 0xa1, 0xe7, 0x9c, 0x93, 0x58, 0x5c, 0x1c, 0x02, 0x52, + 0xed, 0xc4, 0x77, 0xe2, 0x9e, 0x3c, 0xc3, 0xaa, 0xfb, 0xf2, 0x6c, 0x60, 0x6e, 0x71, 0x10, 0xd4, + 0x10, 0xa7, 0xd0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, + 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4e, 0xcf, + 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, + 0xcd, 0x2f, 0x4a, 0xd7, 0x05, 0xdb, 0xa6, 0x0f, 0x26, 0x75, 0x41, 0x96, 0xea, 0x57, 0xe8, 0xe7, + 0xa5, 0x95, 0xe8, 0xc2, 0x7d, 0x5b, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, 0x06, 0xf6, 0x95, 0x31, + 0x20, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x3d, 0xa9, 0x3e, 0x44, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Traces) > 0 { + for iNdEx := len(m.Traces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Traces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if len(m.Traces) > 0 { + for _, e := range m.Traces { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Traces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Traces = append(m.Traces, ClassTrace{}) + if err := m.Traces[len(m.Traces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/nft-transfer/types/genesis_test.go b/x/nft-transfer/types/genesis_test.go new file mode 100644 index 000000000..2e921a4ce --- /dev/null +++ b/x/nft-transfer/types/genesis_test.go @@ -0,0 +1,42 @@ +package types_test + +import ( + "testing" + + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +func TestGenesisState_Validate(t *testing.T) { + tests := []struct { + name string + genState *types.GenesisState + wantErr bool + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + wantErr: false, + }, + { + "valid genesis", + &types.GenesisState{ + PortId: "portidone", + }, + false, + }, + { + "invalid client", + &types.GenesisState{ + PortId: "(INVALIDPORT)", + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := tt.genState.Validate(); (err != nil) != tt.wantErr { + t.Errorf("GenesisState.Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/x/nft-transfer/types/keys.go b/x/nft-transfer/types/keys.go new file mode 100644 index 000000000..6f770e32d --- /dev/null +++ b/x/nft-transfer/types/keys.go @@ -0,0 +1,56 @@ +package types + +import ( + "crypto/sha256" + fmt "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + // Module name defines IBC nft-transfer moduel name + ModuleName = "nft-transfer" + + // RouterKey is the message route for IBC nft-transfer + RouterKey = ModuleName + + // StoreKey is the store key string for IBC nft-transfer + StoreKey = ModuleName + + // QuerierRoute is the querier route for IBC nft-transfer + QuerierRoute = ModuleName + + // Version defines the current version the IBC nft-transfer + // module supports + Version = "ics721-1" + + // PortID is the default port id that nft-transfer module binds to + PortID = "nft-transfer" + + // ClassPrefix is the prefix used for internal SDK NFT representation. + ClassPrefix = "ibc" +) + +var ( + // PortKey defines the key to store the port ID in store + PortKey = []byte{0x01} + + // ClassTraceKey defines the key to store the class trace info in store + ClassTraceKey = []byte{0x02} +) + +// GetEscrowAddress returns the escrow address for the specified channel. +// The escrow address follows the format as outlined in ADR 028: +// https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md +func GetEscrowAddress(portID string, channelID string) sdk.AccAddress { + // a slash is used to create domain separation between port and channel identifiers to + // prevent address collisions between escrow addresses created for different channels + contents := fmt.Sprintf("%s/%s", portID, channelID) + + // ADR 028 AddressHash construction + preImage := []byte(Version) + preImage = append(preImage, 0) + preImage = append(preImage, contents...) + hash := sha256.Sum256(preImage) + return hash[:20] +} diff --git a/x/nft-transfer/types/msgs.go b/x/nft-transfer/types/msgs.go new file mode 100644 index 000000000..f562c0cc3 --- /dev/null +++ b/x/nft-transfer/types/msgs.go @@ -0,0 +1,96 @@ +package types + +import ( + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" +) + +// msg types +const ( + TypeMsgTransfer = "nft-transfer" +) + +// NewMsgTransfer creates a new MsgTransfer instance +//nolint:interfacer +func NewMsgTransfer( + sourcePort, sourceChannel string, + classID string, tokenIds []string, sender, receiver string, + timeoutHeight clienttypes.Height, timeoutTimestamp uint64, +) *MsgTransfer { + return &MsgTransfer{ + SourcePort: sourcePort, + SourceChannel: sourceChannel, + ClassId: classID, + TokenIds: tokenIds, + Sender: sender, + Receiver: receiver, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + } +} + +// Route implements sdk.Msg +func (MsgTransfer) Route() string { + return RouterKey +} + +// Type implements sdk.Msg +func (MsgTransfer) Type() string { + return TypeMsgTransfer +} + +// ValidateBasic performs a basic check of the MsgTransfer fields. +// NOTE: timeout height or timestamp values can be 0 to disable the timeout. +// NOTE: The recipient addresses format is not validated as the format defined by +// the chain is not known to IBC. +func (msg MsgTransfer) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.SourcePort); err != nil { + return sdkerrors.Wrap(err, "invalid source port ID") + } + if err := host.ChannelIdentifierValidator(msg.SourceChannel); err != nil { + return sdkerrors.Wrap(err, "invalid source channel ID") + } + + if strings.TrimSpace(msg.ClassId) == "" { + return sdkerrors.Wrap(ErrInvalidClassID, "classId cannot be blank") + } + + if len(msg.TokenIds) == 0 { + return sdkerrors.Wrap(ErrInvalidTokenID, "tokenId cannot be blank") + } + + for _, tokenID := range msg.TokenIds { + if strings.TrimSpace(tokenID) == "" { + return sdkerrors.Wrap(ErrInvalidTokenID, "tokenId cannot be blank") + } + } + + // NOTE: sender format must be validated as it is required by the GetSigners function. + _, err := sdk.AccAddressFromBech32(msg.Sender) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + if strings.TrimSpace(msg.Receiver) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "missing recipient address") + } + return nil +} + +// GetSignBytes implements sdk.Msg. +func (msg MsgTransfer) GetSignBytes() []byte { + return sdk.MustSortJSON(AminoCdc.MustMarshalJSON(&msg)) +} + +// GetSigners implements sdk.Msg +func (msg MsgTransfer) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Sender) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} diff --git a/x/nft-transfer/types/msgs_test.go b/x/nft-transfer/types/msgs_test.go new file mode 100644 index 000000000..09b905032 --- /dev/null +++ b/x/nft-transfer/types/msgs_test.go @@ -0,0 +1,38 @@ +package types_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +var ( + sender = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()).String() + receiver = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()).String() +) + +func TestMsgTransfer_ValidateBasic(t *testing.T) { + tests := []struct { + name string + msg *types.MsgTransfer + wantErr bool + }{ + {"valid msg", types.NewMsgTransfer("nft-transfer", "channel-1", "cryptoCat", []string{"kitty"}, sender, receiver, clienttypes.NewHeight(1, 1), 1), false}, + {"invalid msg with port", types.NewMsgTransfer("@nft-transfer", "channel-1", "cryptoCat", []string{"kitty"}, sender, receiver, clienttypes.NewHeight(1, 1), 1), true}, + {"invalid msg with channel", types.NewMsgTransfer("nft-transfer", "@channel-1", "cryptoCat", []string{"kitty"}, sender, receiver, clienttypes.NewHeight(1, 1), 1), true}, + {"invalid msg with class", types.NewMsgTransfer("nft-transfer", "channel-1", "", []string{"kitty"}, sender, receiver, clienttypes.NewHeight(1, 1), 1), true}, + {"invalid msg with token_id", types.NewMsgTransfer("nft-transfer", "channel-1", "cryptoCat", []string{""}, sender, receiver, clienttypes.NewHeight(1, 1), 1), true}, + {"invalid msg with sender", types.NewMsgTransfer("nft-transfer", "channel-1", "cryptoCat", []string{"kitty"}, "", receiver, clienttypes.NewHeight(1, 1), 1), true}, + {"invalid msg with receiver", types.NewMsgTransfer("nft-transfer", "channel-1", "cryptoCat", []string{"kitty"}, sender, "", clienttypes.NewHeight(1, 1), 1), true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := tt.msg.ValidateBasic(); (err != nil) != tt.wantErr { + t.Errorf("MsgTransfer.ValidateBasic() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/x/nft-transfer/types/packet.go b/x/nft-transfer/types/packet.go new file mode 100644 index 000000000..e463d3761 --- /dev/null +++ b/x/nft-transfer/types/packet.go @@ -0,0 +1,82 @@ +package types + +import ( + "strings" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + // DefaultRelativePacketTimeoutHeight is the default packet timeout height (in blocks) relative + // to the current block height of the counterparty chain provided by the client state. The + // timeout is disabled when set to 0. + DefaultRelativePacketTimeoutHeight = "0-1000" + + // DefaultRelativePacketTimeoutTimestamp is the default packet timeout timestamp (in nanoseconds) + // relative to the current block timestamp of the counterparty chain provided by the client + // state. The timeout is disabled when set to 0. The default is currently set to a 10 minute + // timeout. + DefaultRelativePacketTimeoutTimestamp = uint64((time.Duration(10) * time.Minute).Nanoseconds()) +) + +// NewNonFungibleTokenPacketData constructs a new NonFungibleTokenPacketData instance +func NewNonFungibleTokenPacketData( + classID string, + classURI string, + tokenIDs []string, + tokenURI []string, + sender string, + receiver string, +) NonFungibleTokenPacketData { + return NonFungibleTokenPacketData{ + ClassId: classID, + ClassUri: classURI, + TokenIds: tokenIDs, + TokenUris: tokenURI, + Sender: sender, + Receiver: receiver, + } +} + +// ValidateBasic is used for validating the nft transfer. +// NOTE: The addresses formats are not validated as the sender and recipient can have different +// formats defined by their corresponding chains that are not known to IBC. +func (nftpd NonFungibleTokenPacketData) ValidateBasic() error { + if strings.TrimSpace(nftpd.ClassId) == "" { + return sdkerrors.Wrap(ErrInvalidClassID, "classId cannot be blank") + } + + if len(nftpd.TokenIds) == 0 { + return sdkerrors.Wrap(ErrInvalidTokenID, "tokenId cannot be blank") + } + + if len(nftpd.TokenIds) != len(nftpd.TokenUris) { + return sdkerrors.Wrap(ErrInvalidPacket, "tokenIds and tokenUris lengths do not match") + } + + if strings.TrimSpace(nftpd.Sender) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "sender address cannot be blank") + } + + // decode the sender address + if _, err := sdk.AccAddressFromBech32(nftpd.Sender); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "invalid sender address") + } + + if strings.TrimSpace(nftpd.Receiver) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "receiver address cannot be blank") + } + + // decode the receiver address + if _, err := sdk.AccAddressFromBech32(nftpd.Receiver); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "invalid receiver address") + } + return nil +} + +// GetBytes is a helper for serializing +func (nftpd NonFungibleTokenPacketData) GetBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&nftpd)) +} diff --git a/x/nft-transfer/types/packet.pb.go b/x/nft-transfer/types/packet.pb.go new file mode 100644 index 000000000..4ff14ac0f --- /dev/null +++ b/x/nft-transfer/types/packet.pb.go @@ -0,0 +1,594 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nft_transfer/v1/packet.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// NonFungibleTokenPacketData defines a struct for the packet payload +// See NonFungibleTokenPacketData spec: +// https://github.com/cosmos/ibc/tree/master/spec/app/ics-721-nft-transfer#data-structures +type NonFungibleTokenPacketData struct { + // the class_id of tokens to be transferred + ClassId string `protobuf:"bytes,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"` + // the class_uri of tokens to be transferred + ClassUri string `protobuf:"bytes,2,opt,name=class_uri,json=classUri,proto3" json:"class_uri,omitempty"` + // the non fungible tokens to be transferred + TokenIds []string `protobuf:"bytes,3,rep,name=token_ids,json=tokenIds,proto3" json:"token_ids,omitempty"` + // the non fungible tokens's uri to be transferred + TokenUris []string `protobuf:"bytes,4,rep,name=token_uris,json=tokenUris,proto3" json:"token_uris,omitempty"` + // the sender address + Sender string `protobuf:"bytes,5,opt,name=sender,proto3" json:"sender,omitempty"` + // the recipient address on the destination chain + Receiver string `protobuf:"bytes,6,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *NonFungibleTokenPacketData) Reset() { *m = NonFungibleTokenPacketData{} } +func (m *NonFungibleTokenPacketData) String() string { return proto.CompactTextString(m) } +func (*NonFungibleTokenPacketData) ProtoMessage() {} +func (*NonFungibleTokenPacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_14c037a407c65908, []int{0} +} +func (m *NonFungibleTokenPacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonFungibleTokenPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NonFungibleTokenPacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NonFungibleTokenPacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonFungibleTokenPacketData.Merge(m, src) +} +func (m *NonFungibleTokenPacketData) XXX_Size() int { + return m.Size() +} +func (m *NonFungibleTokenPacketData) XXX_DiscardUnknown() { + xxx_messageInfo_NonFungibleTokenPacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_NonFungibleTokenPacketData proto.InternalMessageInfo + +func (m *NonFungibleTokenPacketData) GetClassId() string { + if m != nil { + return m.ClassId + } + return "" +} + +func (m *NonFungibleTokenPacketData) GetClassUri() string { + if m != nil { + return m.ClassUri + } + return "" +} + +func (m *NonFungibleTokenPacketData) GetTokenIds() []string { + if m != nil { + return m.TokenIds + } + return nil +} + +func (m *NonFungibleTokenPacketData) GetTokenUris() []string { + if m != nil { + return m.TokenUris + } + return nil +} + +func (m *NonFungibleTokenPacketData) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *NonFungibleTokenPacketData) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +func init() { + proto.RegisterType((*NonFungibleTokenPacketData)(nil), "chainmain.nft_transfer.v1.NonFungibleTokenPacketData") +} + +func init() { proto.RegisterFile("nft_transfer/v1/packet.proto", fileDescriptor_14c037a407c65908) } + +var fileDescriptor_14c037a407c65908 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xb1, 0x4e, 0xeb, 0x30, + 0x18, 0x85, 0xeb, 0xdb, 0x4b, 0x69, 0x3d, 0x7a, 0x40, 0x6e, 0x01, 0xab, 0x62, 0x62, 0x49, 0xac, + 0x8a, 0x91, 0x0d, 0x21, 0xa4, 0x2e, 0x08, 0x21, 0xba, 0xb0, 0x44, 0x8e, 0xe3, 0xa4, 0x56, 0x5b, + 0x3b, 0xfa, 0xed, 0x44, 0xf4, 0x2d, 0x78, 0x28, 0x06, 0xc6, 0x8e, 0x8c, 0x28, 0x79, 0x11, 0x14, + 0x07, 0x10, 0x8b, 0xa5, 0xe3, 0xef, 0xf8, 0x58, 0xfa, 0xf0, 0x99, 0xc9, 0x7d, 0xe2, 0x41, 0x18, + 0x97, 0x2b, 0xe0, 0xf5, 0x82, 0x97, 0x42, 0x6e, 0x94, 0x8f, 0x4b, 0xb0, 0xde, 0x92, 0xa9, 0x5c, + 0x0b, 0x6d, 0x76, 0x42, 0x9b, 0xf8, 0x6f, 0x2f, 0xae, 0x17, 0x17, 0x6f, 0x08, 0xcf, 0xee, 0xad, + 0xb9, 0xab, 0x4c, 0xa1, 0xd3, 0xad, 0x7a, 0xb2, 0x1b, 0x65, 0x1e, 0xc2, 0xdb, 0x5b, 0xe1, 0x05, + 0x99, 0xe2, 0xb1, 0xdc, 0x0a, 0xe7, 0x12, 0x9d, 0x51, 0x34, 0x47, 0x97, 0x93, 0xc7, 0xe3, 0x90, + 0x97, 0x19, 0x39, 0xc5, 0x93, 0x1e, 0x55, 0xa0, 0xe9, 0xbf, 0xc0, 0xfa, 0xee, 0x0a, 0x74, 0x07, + 0x7d, 0x37, 0x95, 0xe8, 0xcc, 0xd1, 0xe1, 0x7c, 0xd8, 0xc1, 0x70, 0xb1, 0xcc, 0x1c, 0x39, 0xc7, + 0xb8, 0x87, 0x15, 0x68, 0x47, 0xff, 0x07, 0xda, 0xd7, 0x57, 0xa0, 0x1d, 0x39, 0xc1, 0x23, 0xa7, + 0x4c, 0xa6, 0x80, 0x1e, 0x85, 0xd5, 0xef, 0x44, 0x66, 0x78, 0x0c, 0x4a, 0x2a, 0x5d, 0x2b, 0xa0, + 0xa3, 0xfe, 0xbf, 0x9f, 0x7c, 0xb3, 0x7a, 0x6f, 0x18, 0x3a, 0x34, 0x0c, 0x7d, 0x36, 0x0c, 0xbd, + 0xb6, 0x6c, 0x70, 0x68, 0xd9, 0xe0, 0xa3, 0x65, 0x83, 0xe7, 0xeb, 0x42, 0xfb, 0x75, 0x95, 0xc6, + 0xd2, 0xee, 0xb8, 0x84, 0x7d, 0xe9, 0x6d, 0x64, 0xa1, 0x88, 0x82, 0x11, 0x1e, 0xce, 0xa8, 0x13, + 0xc3, 0x5f, 0xb8, 0xc9, 0x7d, 0xf4, 0xab, 0xd0, 0xef, 0x4b, 0xe5, 0xd2, 0x51, 0xf0, 0x77, 0xf5, + 0x15, 0x00, 0x00, 0xff, 0xff, 0x43, 0xf2, 0xe2, 0xbe, 0x5f, 0x01, 0x00, 0x00, +} + +func (m *NonFungibleTokenPacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonFungibleTokenPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonFungibleTokenPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintPacket(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x32 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintPacket(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x2a + } + if len(m.TokenUris) > 0 { + for iNdEx := len(m.TokenUris) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TokenUris[iNdEx]) + copy(dAtA[i:], m.TokenUris[iNdEx]) + i = encodeVarintPacket(dAtA, i, uint64(len(m.TokenUris[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.TokenIds) > 0 { + for iNdEx := len(m.TokenIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TokenIds[iNdEx]) + copy(dAtA[i:], m.TokenIds[iNdEx]) + i = encodeVarintPacket(dAtA, i, uint64(len(m.TokenIds[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.ClassUri) > 0 { + i -= len(m.ClassUri) + copy(dAtA[i:], m.ClassUri) + i = encodeVarintPacket(dAtA, i, uint64(len(m.ClassUri))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClassId) > 0 { + i -= len(m.ClassId) + copy(dAtA[i:], m.ClassId) + i = encodeVarintPacket(dAtA, i, uint64(len(m.ClassId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintPacket(dAtA []byte, offset int, v uint64) int { + offset -= sovPacket(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *NonFungibleTokenPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClassId) + if l > 0 { + n += 1 + l + sovPacket(uint64(l)) + } + l = len(m.ClassUri) + if l > 0 { + n += 1 + l + sovPacket(uint64(l)) + } + if len(m.TokenIds) > 0 { + for _, s := range m.TokenIds { + l = len(s) + n += 1 + l + sovPacket(uint64(l)) + } + } + if len(m.TokenUris) > 0 { + for _, s := range m.TokenUris { + l = len(s) + n += 1 + l + sovPacket(uint64(l)) + } + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovPacket(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovPacket(uint64(l)) + } + return n +} + +func sovPacket(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPacket(x uint64) (n int) { + return sovPacket(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *NonFungibleTokenPacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonFungibleTokenPacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonFungibleTokenPacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPacket + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPacket + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClassId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassUri", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPacket + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPacket + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClassUri = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPacket + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPacket + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenIds = append(m.TokenIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenUris", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPacket + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPacket + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenUris = append(m.TokenUris, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPacket + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPacket + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPacket + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPacket + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPacket + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPacket(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPacket + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPacket(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPacket + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPacket + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPacket + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPacket + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPacket + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPacket + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPacket = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPacket = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPacket = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/nft-transfer/types/packet_test.go b/x/nft-transfer/types/packet_test.go new file mode 100644 index 000000000..c10531247 --- /dev/null +++ b/x/nft-transfer/types/packet_test.go @@ -0,0 +1,53 @@ +package types_test + +import ( + "testing" + + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +func TestNonFungibleTokenPacketData_ValidateBasic(t *testing.T) { + tests := []struct { + name string + packet types.NonFungibleTokenPacketData + wantErr bool + }{ + { + name: "valid packet", + packet: types.NonFungibleTokenPacketData{"cryptoCat", "uri", []string{"kitty"}, []string{"kitty_uri"}, sender, receiver}, + wantErr: false, + }, + { + name: "invalid packet with empty classID", + packet: types.NonFungibleTokenPacketData{"", "uri", []string{"kitty"}, []string{"kitty_uri"}, sender, receiver}, + wantErr: true, + }, + { + name: "invalid packet with empty tokenIds", + packet: types.NonFungibleTokenPacketData{"cryptoCat", "uri", []string{}, []string{"kitty_uri"}, sender, receiver}, + wantErr: true, + }, + { + name: "invalid packet with empty tokenUris", + packet: types.NonFungibleTokenPacketData{"cryptoCat", "uri", []string{"kitty"}, []string{}, sender, receiver}, + wantErr: true, + }, + { + name: "invalid packet with empty sender", + packet: types.NonFungibleTokenPacketData{"cryptoCat", "uri", []string{"kitty"}, []string{}, "", receiver}, + wantErr: true, + }, + { + name: "invalid packet with empty receiver", + packet: types.NonFungibleTokenPacketData{"cryptoCat", "uri", []string{"kitty"}, []string{}, sender, receiver}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := tt.packet.ValidateBasic(); (err != nil) != tt.wantErr { + t.Errorf("NonFungibleTokenPacketData.ValidateBasic() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/x/nft-transfer/types/query.pb.go b/x/nft-transfer/types/query.pb.go new file mode 100644 index 000000000..925bea70a --- /dev/null +++ b/x/nft-transfer/types/query.pb.go @@ -0,0 +1,1903 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nft_transfer/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryClassTraceRequest is the request type for the Query/ClassDenom RPC +// method +type QueryClassTraceRequest struct { + // hash (in hex format) or classID (full classID with ibc prefix) of the denomination trace information. + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *QueryClassTraceRequest) Reset() { *m = QueryClassTraceRequest{} } +func (m *QueryClassTraceRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClassTraceRequest) ProtoMessage() {} +func (*QueryClassTraceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{0} +} +func (m *QueryClassTraceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClassTraceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClassTraceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClassTraceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClassTraceRequest.Merge(m, src) +} +func (m *QueryClassTraceRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClassTraceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClassTraceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClassTraceRequest proto.InternalMessageInfo + +func (m *QueryClassTraceRequest) GetHash() string { + if m != nil { + return m.Hash + } + return "" +} + +// QueryClassTraceResponse is the response type for the Query/ClassDenom RPC +// method. +type QueryClassTraceResponse struct { + // class_trace returns the requested class trace information. + ClassTrace *ClassTrace `protobuf:"bytes,1,opt,name=class_trace,json=classTrace,proto3" json:"class_trace,omitempty"` +} + +func (m *QueryClassTraceResponse) Reset() { *m = QueryClassTraceResponse{} } +func (m *QueryClassTraceResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClassTraceResponse) ProtoMessage() {} +func (*QueryClassTraceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{1} +} +func (m *QueryClassTraceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClassTraceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClassTraceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClassTraceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClassTraceResponse.Merge(m, src) +} +func (m *QueryClassTraceResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClassTraceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClassTraceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClassTraceResponse proto.InternalMessageInfo + +func (m *QueryClassTraceResponse) GetClassTrace() *ClassTrace { + if m != nil { + return m.ClassTrace + } + return nil +} + +// QueryConnectionsRequest is the request type for the Query/ClassTraces RPC +// method +type QueryClassTracesRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryClassTracesRequest) Reset() { *m = QueryClassTracesRequest{} } +func (m *QueryClassTracesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClassTracesRequest) ProtoMessage() {} +func (*QueryClassTracesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{2} +} +func (m *QueryClassTracesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClassTracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClassTracesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClassTracesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClassTracesRequest.Merge(m, src) +} +func (m *QueryClassTracesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClassTracesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClassTracesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClassTracesRequest proto.InternalMessageInfo + +func (m *QueryClassTracesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryClassTracesResponse is the response type for the Query/ClassTraces RPC +// method. +type QueryClassTracesResponse struct { + // class_traces returns all class trace information. + ClassTraces Traces `protobuf:"bytes,1,rep,name=class_traces,json=classTraces,proto3,castrepeated=Traces" json:"class_traces"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryClassTracesResponse) Reset() { *m = QueryClassTracesResponse{} } +func (m *QueryClassTracesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClassTracesResponse) ProtoMessage() {} +func (*QueryClassTracesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{3} +} +func (m *QueryClassTracesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClassTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClassTracesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClassTracesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClassTracesResponse.Merge(m, src) +} +func (m *QueryClassTracesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClassTracesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClassTracesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClassTracesResponse proto.InternalMessageInfo + +func (m *QueryClassTracesResponse) GetClassTraces() Traces { + if m != nil { + return m.ClassTraces + } + return nil +} + +func (m *QueryClassTracesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryClassHashRequest is the request type for the Query/ClassHash RPC +// method +type QueryClassHashRequest struct { + // The class trace ([port_id]/[channel_id])+/[denom] + Trace string `protobuf:"bytes,1,opt,name=trace,proto3" json:"trace,omitempty"` +} + +func (m *QueryClassHashRequest) Reset() { *m = QueryClassHashRequest{} } +func (m *QueryClassHashRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClassHashRequest) ProtoMessage() {} +func (*QueryClassHashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{4} +} +func (m *QueryClassHashRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClassHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClassHashRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClassHashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClassHashRequest.Merge(m, src) +} +func (m *QueryClassHashRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClassHashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClassHashRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClassHashRequest proto.InternalMessageInfo + +func (m *QueryClassHashRequest) GetTrace() string { + if m != nil { + return m.Trace + } + return "" +} + +// QueryClassHashResponse is the response type for the Query/ClassHash RPC +// method. +type QueryClassHashResponse struct { + // hash (in hex format) of the denomination trace information. + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *QueryClassHashResponse) Reset() { *m = QueryClassHashResponse{} } +func (m *QueryClassHashResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClassHashResponse) ProtoMessage() {} +func (*QueryClassHashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{5} +} +func (m *QueryClassHashResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClassHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClassHashResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClassHashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClassHashResponse.Merge(m, src) +} +func (m *QueryClassHashResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClassHashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClassHashResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClassHashResponse proto.InternalMessageInfo + +func (m *QueryClassHashResponse) GetHash() string { + if m != nil { + return m.Hash + } + return "" +} + +// QueryEscrowAddressRequest is the request type for the EscrowAddress RPC method. +type QueryEscrowAddressRequest struct { + // unique port identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // unique channel identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryEscrowAddressRequest) Reset() { *m = QueryEscrowAddressRequest{} } +func (m *QueryEscrowAddressRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEscrowAddressRequest) ProtoMessage() {} +func (*QueryEscrowAddressRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{6} +} +func (m *QueryEscrowAddressRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEscrowAddressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEscrowAddressRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEscrowAddressRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEscrowAddressRequest.Merge(m, src) +} +func (m *QueryEscrowAddressRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEscrowAddressRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEscrowAddressRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEscrowAddressRequest proto.InternalMessageInfo + +func (m *QueryEscrowAddressRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryEscrowAddressRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +// QueryEscrowAddressResponse is the response type of the EscrowAddress RPC method. +type QueryEscrowAddressResponse struct { + // the escrow account address + EscrowAddress string `protobuf:"bytes,1,opt,name=escrow_address,json=escrowAddress,proto3" json:"escrow_address,omitempty"` +} + +func (m *QueryEscrowAddressResponse) Reset() { *m = QueryEscrowAddressResponse{} } +func (m *QueryEscrowAddressResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEscrowAddressResponse) ProtoMessage() {} +func (*QueryEscrowAddressResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6f722060ae5d435b, []int{7} +} +func (m *QueryEscrowAddressResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEscrowAddressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEscrowAddressResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEscrowAddressResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEscrowAddressResponse.Merge(m, src) +} +func (m *QueryEscrowAddressResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEscrowAddressResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEscrowAddressResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEscrowAddressResponse proto.InternalMessageInfo + +func (m *QueryEscrowAddressResponse) GetEscrowAddress() string { + if m != nil { + return m.EscrowAddress + } + return "" +} + +func init() { + proto.RegisterType((*QueryClassTraceRequest)(nil), "chainmain.nft_transfer.v1.QueryClassTraceRequest") + proto.RegisterType((*QueryClassTraceResponse)(nil), "chainmain.nft_transfer.v1.QueryClassTraceResponse") + proto.RegisterType((*QueryClassTracesRequest)(nil), "chainmain.nft_transfer.v1.QueryClassTracesRequest") + proto.RegisterType((*QueryClassTracesResponse)(nil), "chainmain.nft_transfer.v1.QueryClassTracesResponse") + proto.RegisterType((*QueryClassHashRequest)(nil), "chainmain.nft_transfer.v1.QueryClassHashRequest") + proto.RegisterType((*QueryClassHashResponse)(nil), "chainmain.nft_transfer.v1.QueryClassHashResponse") + proto.RegisterType((*QueryEscrowAddressRequest)(nil), "chainmain.nft_transfer.v1.QueryEscrowAddressRequest") + proto.RegisterType((*QueryEscrowAddressResponse)(nil), "chainmain.nft_transfer.v1.QueryEscrowAddressResponse") +} + +func init() { proto.RegisterFile("nft_transfer/v1/query.proto", fileDescriptor_6f722060ae5d435b) } + +var fileDescriptor_6f722060ae5d435b = []byte{ + // 652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0xc7, 0xb7, 0x28, 0x98, 0x7d, 0x2b, 0x1c, 0x26, 0x28, 0x50, 0xb5, 0x90, 0x4d, 0x40, 0x62, + 0xec, 0x0c, 0x05, 0xf1, 0xe2, 0x49, 0x88, 0x28, 0x17, 0x83, 0xeb, 0x8f, 0x18, 0x2f, 0x9b, 0xd9, + 0xee, 0xd0, 0x36, 0x81, 0x4e, 0xe9, 0x0c, 0x28, 0x21, 0x7b, 0xf1, 0x2f, 0x30, 0xf1, 0x7f, 0x30, + 0x86, 0xff, 0xc1, 0x3b, 0x47, 0x12, 0x3d, 0x78, 0x12, 0x03, 0xfe, 0x21, 0x66, 0xa6, 0xb3, 0xdb, + 0xae, 0xac, 0xb0, 0x7b, 0xd9, 0xb4, 0x7d, 0xef, 0xfb, 0xde, 0xe7, 0x7d, 0xe7, 0xcd, 0xc2, 0xad, + 0x78, 0x53, 0xd6, 0x65, 0x4a, 0x63, 0xb1, 0xc9, 0x52, 0xb2, 0xe7, 0x91, 0x9d, 0x5d, 0x96, 0xee, + 0xe3, 0x24, 0xe5, 0x92, 0xa3, 0x29, 0x3f, 0xa4, 0x51, 0xbc, 0x4d, 0xa3, 0x18, 0x17, 0xd3, 0xf0, + 0x9e, 0x67, 0x8f, 0x07, 0x3c, 0xe0, 0x3a, 0x8b, 0xa8, 0xa7, 0x4c, 0x60, 0xdf, 0xf3, 0xb9, 0xd8, + 0xe6, 0x82, 0x34, 0xa8, 0x60, 0x59, 0x25, 0xb2, 0xe7, 0x35, 0x98, 0xa4, 0x1e, 0x49, 0x68, 0x10, + 0xc5, 0x54, 0x46, 0x3c, 0x36, 0xb9, 0xe7, 0x3a, 0xcb, 0x94, 0xfa, 0xcc, 0x04, 0x6f, 0x07, 0x9c, + 0x07, 0x5b, 0x8c, 0xd0, 0x24, 0x22, 0x34, 0x8e, 0xb9, 0xd4, 0x4a, 0x91, 0x45, 0xab, 0xf7, 0xe1, + 0xe6, 0x0b, 0x55, 0x7c, 0x75, 0x8b, 0x0a, 0xf1, 0x4a, 0xc9, 0x6a, 0x6c, 0x67, 0x97, 0x09, 0x89, + 0x10, 0x5c, 0x0d, 0xa9, 0x08, 0x27, 0xad, 0x19, 0x6b, 0xbe, 0x5c, 0xd3, 0xcf, 0x55, 0x0a, 0x13, + 0xe7, 0xb2, 0x45, 0xc2, 0x63, 0xc1, 0xd0, 0x1a, 0x54, 0x7c, 0xf5, 0xb5, 0xae, 0x7b, 0x6b, 0x55, + 0x65, 0x71, 0x16, 0xff, 0x77, 0x6c, 0x5c, 0xa8, 0x01, 0x7e, 0xe7, 0xb9, 0x47, 0x0b, 0xd1, 0x26, + 0x5a, 0x03, 0xc8, 0x47, 0x37, 0x1d, 0xe6, 0x70, 0xe6, 0x13, 0x56, 0x3e, 0xe1, 0xcc, 0x71, 0xe3, + 0x13, 0xde, 0xa0, 0x41, 0x7b, 0x9a, 0x5a, 0x41, 0x59, 0xfd, 0x66, 0xc1, 0xe4, 0xf9, 0x1e, 0x66, + 0x8e, 0xb7, 0x70, 0xbd, 0x30, 0x87, 0x98, 0xb4, 0x66, 0xae, 0xf4, 0x3d, 0xc8, 0xca, 0xd8, 0xd1, + 0xaf, 0xe9, 0xd2, 0xe1, 0xc9, 0xf4, 0x88, 0x29, 0x5a, 0xc9, 0x07, 0x13, 0xe8, 0x69, 0x17, 0xfe, + 0x90, 0xc6, 0xbf, 0x7b, 0x29, 0x7e, 0x86, 0xd5, 0xc5, 0xef, 0xc2, 0x8d, 0x1c, 0xff, 0x19, 0x15, + 0x61, 0xdb, 0xa0, 0x71, 0x18, 0xce, 0xdd, 0x2f, 0xd7, 0xb2, 0x97, 0xee, 0x23, 0xce, 0xd2, 0xcd, + 0xac, 0xbd, 0x8e, 0xf8, 0x25, 0x4c, 0xe9, 0xec, 0x27, 0xc2, 0x4f, 0xf9, 0xfb, 0xc7, 0xcd, 0x66, + 0xca, 0x44, 0xe7, 0x04, 0x26, 0xe0, 0x5a, 0xc2, 0x53, 0x59, 0x8f, 0x9a, 0x46, 0x33, 0xa2, 0x5e, + 0xd7, 0x9b, 0xe8, 0x0e, 0x80, 0x1f, 0xd2, 0x38, 0x66, 0x5b, 0x2a, 0x36, 0xa4, 0x63, 0x65, 0xf3, + 0x65, 0xbd, 0x59, 0x5d, 0x05, 0xbb, 0x57, 0x51, 0x83, 0x31, 0x0b, 0x63, 0x4c, 0x07, 0xea, 0x34, + 0x8b, 0x98, 0xe2, 0xa3, 0xac, 0x98, 0xbe, 0x78, 0x32, 0x0c, 0xc3, 0xba, 0x0a, 0x3a, 0xb4, 0x00, + 0x72, 0xd7, 0x91, 0x77, 0xc1, 0xe1, 0xf4, 0x5e, 0x6e, 0x7b, 0x71, 0x10, 0x49, 0x86, 0x59, 0x5d, + 0xfe, 0xf8, 0xfd, 0xcf, 0xe7, 0x21, 0x82, 0x5c, 0x12, 0x35, 0x7c, 0x42, 0x93, 0x44, 0x90, 0x7f, + 0xef, 0x5d, 0x71, 0x73, 0xc8, 0x81, 0xf2, 0xb3, 0x85, 0xbe, 0x58, 0x50, 0x29, 0x2c, 0x1a, 0x1a, + 0xa0, 0x75, 0xdb, 0x77, 0x7b, 0x69, 0x20, 0x8d, 0xe1, 0xc5, 0x9a, 0x77, 0x1e, 0xcd, 0xf5, 0xc7, + 0x8b, 0xbe, 0x5a, 0x50, 0xee, 0xec, 0x08, 0x5a, 0xe8, 0xab, 0x65, 0x61, 0xfb, 0x6c, 0x6f, 0x00, + 0x85, 0x41, 0x7c, 0xa8, 0x11, 0x17, 0x10, 0xbe, 0x0c, 0x51, 0x59, 0xa9, 0x2c, 0xd5, 0xa8, 0x2d, + 0xf4, 0xc3, 0x82, 0xd1, 0xae, 0x5d, 0x42, 0x0f, 0x2e, 0x6b, 0xde, 0x6b, 0x9f, 0xed, 0xe5, 0x01, + 0x55, 0x06, 0xfb, 0x8d, 0xc6, 0xde, 0x40, 0xcf, 0x2f, 0xc0, 0xce, 0x76, 0x5f, 0x90, 0x83, 0xfc, + 0x5e, 0xb4, 0x88, 0xba, 0x2d, 0x82, 0x1c, 0x98, 0x3b, 0xd4, 0x22, 0xdd, 0x6b, 0xbf, 0xf2, 0xfa, + 0xe8, 0xd4, 0xb1, 0x8e, 0x4f, 0x1d, 0xeb, 0xf7, 0xa9, 0x63, 0x7d, 0x3a, 0x73, 0x4a, 0xc7, 0x67, + 0x4e, 0xe9, 0xe7, 0x99, 0x53, 0x7a, 0xf7, 0x28, 0x88, 0x64, 0xb8, 0xdb, 0xc0, 0x3e, 0xdf, 0x26, + 0x7e, 0xba, 0x9f, 0x48, 0xee, 0xf2, 0x34, 0x70, 0x35, 0x3d, 0xd1, 0xbf, 0xae, 0x1a, 0x82, 0x7c, + 0x50, 0x20, 0x6e, 0x07, 0x44, 0xee, 0x27, 0x4c, 0x34, 0x46, 0xf4, 0x5f, 0xfd, 0xd2, 0xdf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xf6, 0xfd, 0x0f, 0x90, 0xa1, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // ClassTrace queries a class trace information. + ClassTrace(ctx context.Context, in *QueryClassTraceRequest, opts ...grpc.CallOption) (*QueryClassTraceResponse, error) + // ClassTraces queries all class traces. + ClassTraces(ctx context.Context, in *QueryClassTracesRequest, opts ...grpc.CallOption) (*QueryClassTracesResponse, error) + // ClassHash queries a class hash information. + ClassHash(ctx context.Context, in *QueryClassHashRequest, opts ...grpc.CallOption) (*QueryClassHashResponse, error) + // EscrowAddress returns the escrow address for a particular port and channel id. + EscrowAddress(ctx context.Context, in *QueryEscrowAddressRequest, opts ...grpc.CallOption) (*QueryEscrowAddressResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) ClassTrace(ctx context.Context, in *QueryClassTraceRequest, opts ...grpc.CallOption) (*QueryClassTraceResponse, error) { + out := new(QueryClassTraceResponse) + err := c.cc.Invoke(ctx, "/chainmain.nft_transfer.v1.Query/ClassTrace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ClassTraces(ctx context.Context, in *QueryClassTracesRequest, opts ...grpc.CallOption) (*QueryClassTracesResponse, error) { + out := new(QueryClassTracesResponse) + err := c.cc.Invoke(ctx, "/chainmain.nft_transfer.v1.Query/ClassTraces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ClassHash(ctx context.Context, in *QueryClassHashRequest, opts ...grpc.CallOption) (*QueryClassHashResponse, error) { + out := new(QueryClassHashResponse) + err := c.cc.Invoke(ctx, "/chainmain.nft_transfer.v1.Query/ClassHash", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) EscrowAddress(ctx context.Context, in *QueryEscrowAddressRequest, opts ...grpc.CallOption) (*QueryEscrowAddressResponse, error) { + out := new(QueryEscrowAddressResponse) + err := c.cc.Invoke(ctx, "/chainmain.nft_transfer.v1.Query/EscrowAddress", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // ClassTrace queries a class trace information. + ClassTrace(context.Context, *QueryClassTraceRequest) (*QueryClassTraceResponse, error) + // ClassTraces queries all class traces. + ClassTraces(context.Context, *QueryClassTracesRequest) (*QueryClassTracesResponse, error) + // ClassHash queries a class hash information. + ClassHash(context.Context, *QueryClassHashRequest) (*QueryClassHashResponse, error) + // EscrowAddress returns the escrow address for a particular port and channel id. + EscrowAddress(context.Context, *QueryEscrowAddressRequest) (*QueryEscrowAddressResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) ClassTrace(ctx context.Context, req *QueryClassTraceRequest) (*QueryClassTraceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClassTrace not implemented") +} +func (*UnimplementedQueryServer) ClassTraces(ctx context.Context, req *QueryClassTracesRequest) (*QueryClassTracesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClassTraces not implemented") +} +func (*UnimplementedQueryServer) ClassHash(ctx context.Context, req *QueryClassHashRequest) (*QueryClassHashResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClassHash not implemented") +} +func (*UnimplementedQueryServer) EscrowAddress(ctx context.Context, req *QueryEscrowAddressRequest) (*QueryEscrowAddressResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EscrowAddress not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_ClassTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClassTraceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClassTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/chainmain.nft_transfer.v1.Query/ClassTrace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClassTrace(ctx, req.(*QueryClassTraceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ClassTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClassTracesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClassTraces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/chainmain.nft_transfer.v1.Query/ClassTraces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClassTraces(ctx, req.(*QueryClassTracesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ClassHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClassHashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClassHash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/chainmain.nft_transfer.v1.Query/ClassHash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClassHash(ctx, req.(*QueryClassHashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_EscrowAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEscrowAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EscrowAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/chainmain.nft_transfer.v1.Query/EscrowAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EscrowAddress(ctx, req.(*QueryEscrowAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "chainmain.nft_transfer.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ClassTrace", + Handler: _Query_ClassTrace_Handler, + }, + { + MethodName: "ClassTraces", + Handler: _Query_ClassTraces_Handler, + }, + { + MethodName: "ClassHash", + Handler: _Query_ClassHash_Handler, + }, + { + MethodName: "EscrowAddress", + Handler: _Query_EscrowAddress_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "nft_transfer/v1/query.proto", +} + +func (m *QueryClassTraceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClassTraceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClassTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClassTraceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClassTraceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClassTraceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClassTrace != nil { + { + size, err := m.ClassTrace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClassTracesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClassTracesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClassTracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClassTracesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClassTracesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClassTracesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClassTraces) > 0 { + for iNdEx := len(m.ClassTraces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClassTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryClassHashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClassHashRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClassHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Trace) > 0 { + i -= len(m.Trace) + copy(dAtA[i:], m.Trace) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Trace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClassHashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClassHashResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClassHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryEscrowAddressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEscrowAddressRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEscrowAddressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryEscrowAddressResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEscrowAddressResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEscrowAddressResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EscrowAddress) > 0 { + i -= len(m.EscrowAddress) + copy(dAtA[i:], m.EscrowAddress) + i = encodeVarintQuery(dAtA, i, uint64(len(m.EscrowAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryClassTraceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClassTraceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClassTrace != nil { + l = m.ClassTrace.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClassTracesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClassTracesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClassTraces) > 0 { + for _, e := range m.ClassTraces { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClassHashRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Trace) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClassHashResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEscrowAddressRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEscrowAddressResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EscrowAddress) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryClassTraceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClassTraceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClassTraceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClassTraceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClassTraceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClassTraceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassTrace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClassTrace == nil { + m.ClassTrace = &ClassTrace{} + } + if err := m.ClassTrace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClassTracesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClassTracesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClassTracesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClassTracesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClassTracesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClassTracesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassTraces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClassTraces = append(m.ClassTraces, ClassTrace{}) + if err := m.ClassTraces[len(m.ClassTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClassHashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClassHashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClassHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Trace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClassHashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClassHashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClassHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEscrowAddressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEscrowAddressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEscrowAddressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEscrowAddressResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEscrowAddressResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEscrowAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EscrowAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/nft-transfer/types/query.pb.gw.go b/x/nft-transfer/types/query.pb.gw.go new file mode 100644 index 000000000..9087ffffd --- /dev/null +++ b/x/nft-transfer/types/query.pb.gw.go @@ -0,0 +1,496 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: nft_transfer/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_ClassTrace_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClassTraceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash") + } + + protoReq.Hash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err) + } + + msg, err := client.ClassTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClassTrace_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClassTraceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash") + } + + protoReq.Hash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err) + } + + msg, err := server.ClassTrace(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ClassTraces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_ClassTraces_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClassTracesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ClassTraces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ClassTraces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClassTraces_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClassTracesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ClassTraces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ClassTraces(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ClassHash_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClassHashRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["trace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "trace") + } + + protoReq.Trace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "trace", err) + } + + msg, err := client.ClassHash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClassHash_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClassHashRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["trace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "trace") + } + + protoReq.Trace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "trace", err) + } + + msg, err := server.ClassHash(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_EscrowAddress_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEscrowAddressRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := client.EscrowAddress(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EscrowAddress_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEscrowAddressRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := server.EscrowAddress(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_ClassTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClassTrace_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClassTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClassTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClassTraces_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClassTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClassHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClassHash_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClassHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_EscrowAddress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EscrowAddress_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EscrowAddress_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_ClassTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClassTrace_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClassTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClassTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClassTraces_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClassTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClassHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClassHash_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClassHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_EscrowAddress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EscrowAddress_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EscrowAddress_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_ClassTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "apps", "nft_transfer", "v1", "class_traces", "hash"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ClassTraces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "nft_transfer", "v1", "class_traces"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ClassHash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "apps", "nft_transfer", "v1", "class_hashes", "trace"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_EscrowAddress_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "apps", "nft_transfer", "v1", "channels", "channel_id", "ports", "port_id", "escrow_address"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_ClassTrace_0 = runtime.ForwardResponseMessage + + forward_Query_ClassTraces_0 = runtime.ForwardResponseMessage + + forward_Query_ClassHash_0 = runtime.ForwardResponseMessage + + forward_Query_EscrowAddress_0 = runtime.ForwardResponseMessage +) diff --git a/x/nft-transfer/types/trace.go b/x/nft-transfer/types/trace.go new file mode 100644 index 000000000..09bcfa037 --- /dev/null +++ b/x/nft-transfer/types/trace.go @@ -0,0 +1,180 @@ +package types + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "sort" + "strings" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmtypes "github.com/tendermint/tendermint/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" +) + +// ParseHexHash parses a hex hash in string format to bytes and validates its correctness. +func ParseHexHash(hexHash string) (tmbytes.HexBytes, error) { + hash, err := hex.DecodeString(hexHash) + if err != nil { + return nil, err + } + + if err := tmtypes.ValidateHash(hash); err != nil { + return nil, err + } + + return hash, nil +} + +// GetClassPrefix returns the receiving class prefix +func GetClassPrefix(portID, channelID string) string { + return fmt.Sprintf("%s/%s/", portID, channelID) +} + +// RemoveClassPrefix returns the unprefixed classID. +// After the receiving chain receives the packet,if isAwayFromOrigin=false, it means that nft is moving +// in the direction of the original chain, and the portID/channelID prefix of the sending chain +// in trace.path needs to be removed +func RemoveClassPrefix(portID, channelID, classID string) string { + classPrefix := GetClassPrefix(portID, channelID) + return classID[len(classPrefix):] +} + +// IsAwayFromOrigin determine if non-fungible token is moving away from +// the origin chain (the chain issued by the native nft). +// Note that fullClassPath refers to the full path of the unencoded classID. +// The longer the fullClassPath, the farther it is from the origin chain +func IsAwayFromOrigin(sourcePort, sourceChannel, fullClassPath string) bool { + prefixClassID := GetClassPrefix(sourcePort, sourceChannel) + if !strings.HasPrefix(fullClassPath, prefixClassID) { + return true + } + return fullClassPath[:len(prefixClassID)] != prefixClassID +} + +// ParseClassTrace parses a string with the ibc prefix (class trace) and the base classID +// into a ClassTrace type. +// +// Examples: +// +// - "port-1/channel-1/class-1" => ClassTrace{Path: "port-1/channel-1", BaseClassId: "class-1"} +// - "class-1" => ClassTrace{Path: "", BaseClassId: "class-1"} +func ParseClassTrace(rawClassID string) ClassTrace { + classSplit := strings.Split(rawClassID, "/") + + if classSplit[0] == rawClassID { + return ClassTrace{ + Path: "", + BaseClassId: rawClassID, + } + } + + return ClassTrace{ + Path: strings.Join(classSplit[:len(classSplit)-1], "/"), + BaseClassId: classSplit[len(classSplit)-1], + } +} + +// GetFullClassPath returns the full classId according to the ICS721 specification: +// tracePath + "/" + BaseClassId +// If there exists no trace then the base BaseClassId is returned. +func (ct ClassTrace) GetFullClassPath() string { + if ct.Path == "" { + return ct.BaseClassId + } + return ct.GetPrefix() + ct.BaseClassId +} + +// GetPrefix returns the receiving classId prefix composed by the trace info and a separator. +func (ct ClassTrace) GetPrefix() string { + return ct.Path + "/" +} + +// Hash returns the hex bytes of the SHA256 hash of the ClassTrace fields using the following formula: +// +// hash = sha256(tracePath + "/" + baseClassId) +func (ct ClassTrace) Hash() tmbytes.HexBytes { + hash := sha256.Sum256([]byte(ct.GetFullClassPath())) + return hash[:] +} + +// IBCClassID a classID for an ICS721 non-fungible token in the format +// 'ibc/{hash(tracePath + BaseClassId)}'. If the trace is empty, it will return the base classID. +func (ct ClassTrace) IBCClassID() string { + if ct.Path != "" { + return fmt.Sprintf("%s/%s", ClassPrefix, ct.Hash()) + } + return ct.BaseClassId +} + +// Validate performs a basic validation of the ClassTrace fields. +func (ct ClassTrace) Validate() error { + // empty trace is accepted when token lives on the original chain + switch { + case ct.Path == "" && ct.BaseClassId != "": + return nil + case strings.TrimSpace(ct.BaseClassId) == "": + return fmt.Errorf("base class_id cannot be blank") + } + + // NOTE: no base class validation + + identifiers := strings.Split(ct.Path, "/") + return validateTraceIdentifiers(identifiers) +} + +func validateTraceIdentifiers(identifiers []string) error { + if len(identifiers) == 0 || len(identifiers)%2 != 0 { + return fmt.Errorf("trace info must come in pairs of port and channel identifiers '{portID}/{channelID}', got the identifiers: %s", identifiers) + } + + // validate correctness of port and channel identifiers + for i := 0; i < len(identifiers); i += 2 { + if err := host.PortIdentifierValidator(identifiers[i]); err != nil { + return sdkerrors.Wrapf(err, "invalid port ID at position %d", i) + } + if err := host.ChannelIdentifierValidator(identifiers[i+1]); err != nil { + return sdkerrors.Wrapf(err, "invalid channel ID at position %d", i) + } + } + return nil +} + +// Traces defines a wrapper type for a slice of DenomTrace. +type Traces []ClassTrace + +// Validate performs a basic validation of each denomination trace info. +func (t Traces) Validate() error { + seenTraces := make(map[string]bool) + for i, trace := range t { + hash := trace.Hash().String() + if seenTraces[hash] { + return fmt.Errorf("duplicated class trace with hash %s", trace.Hash()) + } + + if err := trace.Validate(); err != nil { + return sdkerrors.Wrapf(err, "failed class trace %d validation", i) + } + seenTraces[hash] = true + } + return nil +} + +var _ sort.Interface = Traces{} + +// Len implements sort.Interface for Traces +func (t Traces) Len() int { return len(t) } + +// Less implements sort.Interface for Traces +func (t Traces) Less(i, j int) bool { return t[i].GetFullClassPath() < t[j].GetFullClassPath() } + +// Swap implements sort.Interface for Traces +func (t Traces) Swap(i, j int) { t[i], t[j] = t[j], t[i] } + +// Sort is a helper function to sort the set of denomination traces in-place +func (t Traces) Sort() Traces { + sort.Sort(t) + return t +} diff --git a/x/nft-transfer/types/trace.pb.go b/x/nft-transfer/types/trace.pb.go new file mode 100644 index 000000000..45b4c2b7e --- /dev/null +++ b/x/nft-transfer/types/trace.pb.go @@ -0,0 +1,373 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nft_transfer/v1/trace.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ClassTrace contains the base classID for ICS721 non-fungible tokens and the +// source tracing information path. +type ClassTrace struct { + // path defines the chain of port/channel identifiers used for tracing the + // source of the non-fungible token. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // base classID of the relayed non-fungible token. + BaseClassId string `protobuf:"bytes,2,opt,name=base_class_id,json=baseClassId,proto3" json:"base_class_id,omitempty"` +} + +func (m *ClassTrace) Reset() { *m = ClassTrace{} } +func (m *ClassTrace) String() string { return proto.CompactTextString(m) } +func (*ClassTrace) ProtoMessage() {} +func (*ClassTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_f4e6ac472424735f, []int{0} +} +func (m *ClassTrace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClassTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClassTrace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClassTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClassTrace.Merge(m, src) +} +func (m *ClassTrace) XXX_Size() int { + return m.Size() +} +func (m *ClassTrace) XXX_DiscardUnknown() { + xxx_messageInfo_ClassTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_ClassTrace proto.InternalMessageInfo + +func (m *ClassTrace) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *ClassTrace) GetBaseClassId() string { + if m != nil { + return m.BaseClassId + } + return "" +} + +func init() { + proto.RegisterType((*ClassTrace)(nil), "chainmain.nft_transfer.v1.ClassTrace") +} + +func init() { proto.RegisterFile("nft_transfer/v1/trace.proto", fileDescriptor_f4e6ac472424735f) } + +var fileDescriptor_f4e6ac472424735f = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xce, 0x4b, 0x2b, 0x89, + 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x2f, 0x29, 0x4a, 0x4c, + 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4c, 0xce, 0x48, 0xcc, 0xcc, 0xcb, 0x4d, + 0xcc, 0xcc, 0xd3, 0x43, 0x56, 0xa6, 0x57, 0x66, 0xa8, 0xe4, 0xc2, 0xc5, 0xe5, 0x9c, 0x93, 0x58, + 0x5c, 0x1c, 0x02, 0x52, 0x2e, 0x24, 0xc4, 0xc5, 0x52, 0x90, 0x58, 0x92, 0x21, 0xc1, 0xa8, 0xc0, + 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0x29, 0x71, 0xf1, 0x26, 0x25, 0x16, 0xa7, 0xc6, 0x27, 0x83, + 0x94, 0xc5, 0x67, 0xa6, 0x48, 0x30, 0x81, 0x25, 0xb9, 0x41, 0x82, 0x60, 0xad, 0x9e, 0x29, 0x4e, + 0xa1, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, + 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9d, 0x9e, 0x59, 0x92, + 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x5c, 0x54, 0x59, 0x50, 0x92, 0xaf, 0x9b, 0x5f, + 0x94, 0xae, 0x0b, 0x76, 0x90, 0x3e, 0x98, 0xd4, 0x05, 0xb9, 0x4b, 0xbf, 0x42, 0x3f, 0x2f, 0xad, + 0x44, 0x17, 0xee, 0x81, 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, 0xb0, 0xf3, 0x8d, 0x01, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x8d, 0xf5, 0xbd, 0xd5, 0xdd, 0x00, 0x00, 0x00, +} + +func (m *ClassTrace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClassTrace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClassTrace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BaseClassId) > 0 { + i -= len(m.BaseClassId) + copy(dAtA[i:], m.BaseClassId) + i = encodeVarintTrace(dAtA, i, uint64(len(m.BaseClassId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTrace(dAtA []byte, offset int, v uint64) int { + offset -= sovTrace(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClassTrace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + l = len(m.BaseClassId) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + return n +} + +func sovTrace(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTrace(x uint64) (n int) { + return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClassTrace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClassTrace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClassTrace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseClassId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BaseClassId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTrace(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTrace + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTrace + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTrace + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/nft-transfer/types/trace_test.go b/x/nft-transfer/types/trace_test.go new file mode 100644 index 000000000..d973dc38c --- /dev/null +++ b/x/nft-transfer/types/trace_test.go @@ -0,0 +1,78 @@ +package types_test + +import ( + "reflect" + "testing" + + "github.com/crypto-org-chain/chain-main/v4/x/nft-transfer/types" +) + +func TestIsAwayFromOrigin(t *testing.T) { + type args struct { + sourcePort string + sourceChannel string + fullClassPath string + } + tests := []struct { + name string + args args + want bool + }{ + {"transfer forward by origin chain", args{"p1", "c1", "kitty"}, true}, + {"transfer forward by relay chain", args{"p3", "c3", "p2/c2/kitty"}, true}, + {"transfer forward by relay chain", args{"p5", "c5", "p4/c4/p2/c2/kitty"}, true}, + {"transfer back by relay chain", args{"p6", "c6", "p6/c6/p4/c4/p2/c2/kitty"}, false}, + {"transfer back by relay chain", args{"p4", "c4", "p4/c4/p2/c2/kitty"}, false}, + {"transfer back by relay chain", args{"p2", "c2", "p2/c2/kitty"}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := types.IsAwayFromOrigin(tt.args.sourcePort, tt.args.sourceChannel, tt.args.fullClassPath); got != tt.want { + t.Errorf("IsAwayFromOrigin() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseClassTrace(t *testing.T) { + type args struct { + rawClassID string + } + tests := []struct { + name string + args args + want types.ClassTrace + }{ + {"native class", args{"kitty"}, types.ClassTrace{Path: "", BaseClassId: "kitty"}}, + {"transfer to (p2,c2)", args{"p2/c2/kitty"}, types.ClassTrace{Path: "p2/c2", BaseClassId: "kitty"}}, + {"transfer to (p4,c4)", args{"p4/c4/p2/c2/kitty"}, types.ClassTrace{Path: "p4/c4/p2/c2", BaseClassId: "kitty"}}, + {"transfer to (p6,c6)", args{"p6/c6/p4/c4/p2/c2/kitty"}, types.ClassTrace{Path: "p6/c6/p4/c4/p2/c2", BaseClassId: "kitty"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := types.ParseClassTrace(tt.args.rawClassID); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseClassTrace() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClassTrace_GetFullClassPath(t *testing.T) { + tests := []struct { + name string + ct types.ClassTrace + want string + }{ + {"native class", types.ClassTrace{Path: "", BaseClassId: "kitty"}, "kitty"}, + {"first tranfer", types.ClassTrace{Path: "p2/c2", BaseClassId: "kitty"}, "p2/c2/kitty"}, + {"second tranfer", types.ClassTrace{Path: "p4/c4/p2/c2", BaseClassId: "kitty"}, "p4/c4/p2/c2/kitty"}, + {"third tranfer", types.ClassTrace{Path: "p6/c6/p4/c4/p2/c2", BaseClassId: "kitty"}, "p6/c6/p4/c4/p2/c2/kitty"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.ct.GetFullClassPath(); got != tt.want { + t.Errorf("ClassTrace.GetFullClassPath() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/x/nft-transfer/types/tx.pb.go b/x/nft-transfer/types/tx.pb.go new file mode 100644 index 000000000..9b053b91c --- /dev/null +++ b/x/nft-transfer/types/tx.pb.go @@ -0,0 +1,847 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nft_transfer/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgTransfer defines a msg to transfer non fungible tokens between +// ICS721 enabled chains. See ICS Spec here: +// https://github.com/cosmos/ibc/tree/master/spec/app/ics-721-nft-transfer#data-structures +type MsgTransfer struct { + // the port on which the packet will be sent + SourcePort string `protobuf:"bytes,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"` + // the channel by which the packet will be sent + SourceChannel string `protobuf:"bytes,2,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"` + // the class_id of tokens to be transferred + ClassId string `protobuf:"bytes,3,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"` + // the non fungible tokens to be transferred + TokenIds []string `protobuf:"bytes,4,rep,name=token_ids,json=tokenIds,proto3" json:"token_ids,omitempty"` + // the sender address + Sender string `protobuf:"bytes,5,opt,name=sender,proto3" json:"sender,omitempty"` + // the recipient address on the destination chain + Receiver string `protobuf:"bytes,6,opt,name=receiver,proto3" json:"receiver,omitempty"` + // Timeout height relative to the current block height. + // The timeout is disabled when set to 0. + TimeoutHeight types.Height `protobuf:"bytes,7,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"` + // Timeout timestamp in absolute nanoseconds since unix epoch. + // The timeout is disabled when set to 0. + TimeoutTimestamp uint64 `protobuf:"varint,8,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"` +} + +func (m *MsgTransfer) Reset() { *m = MsgTransfer{} } +func (m *MsgTransfer) String() string { return proto.CompactTextString(m) } +func (*MsgTransfer) ProtoMessage() {} +func (*MsgTransfer) Descriptor() ([]byte, []int) { + return fileDescriptor_fa0f686d4218285d, []int{0} +} +func (m *MsgTransfer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransfer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransfer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransfer) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransfer.Merge(m, src) +} +func (m *MsgTransfer) XXX_Size() int { + return m.Size() +} +func (m *MsgTransfer) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransfer.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransfer proto.InternalMessageInfo + +// MsgTransferResponse defines the Msg/Transfer response type. +type MsgTransferResponse struct { +} + +func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} } +func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTransferResponse) ProtoMessage() {} +func (*MsgTransferResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fa0f686d4218285d, []int{1} +} +func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransferResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransferResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransferResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransferResponse.Merge(m, src) +} +func (m *MsgTransferResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTransferResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransferResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgTransfer)(nil), "chainmain.nft_transfer.v1.MsgTransfer") + proto.RegisterType((*MsgTransferResponse)(nil), "chainmain.nft_transfer.v1.MsgTransferResponse") +} + +func init() { proto.RegisterFile("nft_transfer/v1/tx.proto", fileDescriptor_fa0f686d4218285d) } + +var fileDescriptor_fa0f686d4218285d = []byte{ + // 476 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xbd, 0x8e, 0xd3, 0x30, + 0x1c, 0x4f, 0xe8, 0xd1, 0x4b, 0x5d, 0xdd, 0x09, 0x0c, 0x77, 0x72, 0x0b, 0x24, 0x55, 0x06, 0xd4, + 0xa5, 0x8e, 0x7a, 0x0c, 0x48, 0xc7, 0x82, 0xca, 0x42, 0x87, 0x93, 0x50, 0x74, 0x2c, 0x2c, 0x25, + 0x75, 0xdc, 0xc4, 0xa2, 0xb1, 0x23, 0xdb, 0xad, 0xae, 0x6f, 0xc0, 0xc8, 0x23, 0xdc, 0xce, 0x8b, + 0xdc, 0x78, 0x23, 0x53, 0x85, 0xda, 0x85, 0xb9, 0x4f, 0x80, 0xe2, 0xa4, 0xa5, 0x1d, 0x90, 0x6e, + 0x49, 0xfc, 0xfb, 0xb4, 0x6c, 0xff, 0x01, 0xe2, 0x13, 0x3d, 0xd2, 0x32, 0xe2, 0x6a, 0x42, 0x65, + 0x30, 0xef, 0x07, 0xfa, 0x06, 0xe7, 0x52, 0x68, 0x01, 0x5b, 0x24, 0x8d, 0x18, 0xcf, 0x22, 0xc6, + 0xf1, 0xbe, 0x07, 0xcf, 0xfb, 0xed, 0xe7, 0x89, 0x48, 0x84, 0x71, 0x05, 0xc5, 0xaa, 0x0c, 0xb4, + 0x3d, 0x36, 0x26, 0x01, 0x11, 0x92, 0x06, 0x64, 0xca, 0x28, 0xd7, 0x45, 0x5b, 0xb9, 0x2a, 0x0d, + 0xfe, 0xcf, 0x1a, 0x68, 0x5e, 0xa9, 0xe4, 0xba, 0x6a, 0x82, 0x6f, 0x41, 0x53, 0x89, 0x99, 0x24, + 0x74, 0x94, 0x0b, 0xa9, 0x91, 0xdd, 0xb1, 0xbb, 0x8d, 0xc1, 0xf9, 0x66, 0xe9, 0xc1, 0x45, 0x94, + 0x4d, 0x2f, 0xfd, 0x3d, 0xd1, 0x0f, 0x41, 0x89, 0x3e, 0x09, 0xa9, 0xe1, 0x7b, 0x70, 0x5a, 0x69, + 0x24, 0x8d, 0x38, 0xa7, 0x53, 0xf4, 0xc8, 0x64, 0x5b, 0x9b, 0xa5, 0x77, 0x76, 0x90, 0xad, 0x74, + 0x3f, 0x3c, 0x29, 0x89, 0x0f, 0x25, 0x86, 0x2d, 0xe0, 0x90, 0x69, 0xa4, 0xd4, 0x88, 0xc5, 0xa8, + 0x56, 0x64, 0xc3, 0x63, 0x83, 0x87, 0x31, 0x7c, 0x01, 0x1a, 0x5a, 0x7c, 0xa3, 0x7c, 0xc4, 0x62, + 0x85, 0x8e, 0x3a, 0xb5, 0x6e, 0x23, 0x74, 0x0c, 0x31, 0x8c, 0x15, 0x3c, 0x07, 0x75, 0x45, 0x79, + 0x4c, 0x25, 0x7a, 0x6c, 0x52, 0x15, 0x82, 0x6d, 0xe0, 0x48, 0x4a, 0x28, 0x9b, 0x53, 0x89, 0xea, + 0x46, 0xd9, 0x61, 0xf8, 0x15, 0x9c, 0x6a, 0x96, 0x51, 0x31, 0xd3, 0xa3, 0x94, 0xb2, 0x24, 0xd5, + 0xe8, 0xb8, 0x63, 0x77, 0x9b, 0x17, 0x6d, 0xcc, 0xc6, 0x04, 0x17, 0x17, 0x86, 0xab, 0x6b, 0x9a, + 0xf7, 0xf1, 0x47, 0xe3, 0x18, 0xbc, 0xba, 0x5b, 0x7a, 0xd6, 0xbf, 0xd3, 0x1c, 0xe6, 0xfd, 0xf0, + 0xa4, 0x22, 0x4a, 0x37, 0x1c, 0x82, 0xa7, 0x5b, 0x47, 0xf1, 0x57, 0x3a, 0xca, 0x72, 0xe4, 0x74, + 0xec, 0xee, 0xd1, 0xe0, 0xe5, 0x66, 0xe9, 0xa1, 0xc3, 0x92, 0x9d, 0xc5, 0x0f, 0x9f, 0x54, 0xdc, + 0xf5, 0x96, 0xba, 0x74, 0xbe, 0xdf, 0x7a, 0xd6, 0x9f, 0x5b, 0xcf, 0xf2, 0xcf, 0xc0, 0xb3, 0xbd, + 0xc7, 0x0a, 0xa9, 0xca, 0x05, 0x57, 0xf4, 0x82, 0x81, 0xda, 0x95, 0x4a, 0xe0, 0x18, 0x38, 0xbb, + 0x77, 0x7c, 0x8d, 0xff, 0x3b, 0x2a, 0x78, 0xaf, 0xa2, 0x8d, 0x1f, 0xe6, 0xdb, 0x6e, 0x35, 0xf8, + 0x7c, 0xb7, 0x72, 0xed, 0xfb, 0x95, 0x6b, 0xff, 0x5e, 0xb9, 0xf6, 0x8f, 0xb5, 0x6b, 0xdd, 0xaf, + 0x5d, 0xeb, 0xd7, 0xda, 0xb5, 0xbe, 0xbc, 0x4b, 0x98, 0x4e, 0x67, 0x63, 0x4c, 0x44, 0x16, 0x10, + 0xb9, 0xc8, 0xb5, 0xe8, 0x09, 0x99, 0xf4, 0x4c, 0x7d, 0x60, 0xbe, 0xbd, 0x62, 0x97, 0xe0, 0x26, + 0xe0, 0x13, 0xdd, 0xdb, 0x8d, 0xb7, 0x5e, 0xe4, 0x54, 0x8d, 0xeb, 0x66, 0x1a, 0xdf, 0xfc, 0x0d, + 0x00, 0x00, 0xff, 0xff, 0x12, 0x0d, 0xd1, 0x9e, 0xfb, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // Transfer defines a rpc handler method for MsgTransfer. + Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) { + out := new(MsgTransferResponse) + err := c.cc.Invoke(ctx, "/chainmain.nft_transfer.v1.Msg/Transfer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // Transfer defines a rpc handler method for MsgTransfer. + Transfer(context.Context, *MsgTransfer) (*MsgTransferResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) Transfer(ctx context.Context, req *MsgTransfer) (*MsgTransferResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTransfer) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Transfer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/chainmain.nft_transfer.v1.Msg/Transfer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "chainmain.nft_transfer.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Transfer", + Handler: _Msg_Transfer_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "nft_transfer/v1/tx.proto", +} + +func (m *MsgTransfer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransfer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutTimestamp != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.TimeoutTimestamp)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintTx(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x32 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTx(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x2a + } + if len(m.TokenIds) > 0 { + for iNdEx := len(m.TokenIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TokenIds[iNdEx]) + copy(dAtA[i:], m.TokenIds[iNdEx]) + i = encodeVarintTx(dAtA, i, uint64(len(m.TokenIds[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.ClassId) > 0 { + i -= len(m.ClassId) + copy(dAtA[i:], m.ClassId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ClassId))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceChannel) > 0 { + i -= len(m.SourceChannel) + copy(dAtA[i:], m.SourceChannel) + i = encodeVarintTx(dAtA, i, uint64(len(m.SourceChannel))) + i-- + dAtA[i] = 0x12 + } + if len(m.SourcePort) > 0 { + i -= len(m.SourcePort) + copy(dAtA[i:], m.SourcePort) + i = encodeVarintTx(dAtA, i, uint64(len(m.SourcePort))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgTransferResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransferResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransferResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgTransfer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePort) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.SourceChannel) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ClassId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if len(m.TokenIds) > 0 { + for _, s := range m.TokenIds { + l = len(s) + n += 1 + l + sovTx(uint64(l)) + } + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.TimeoutHeight.Size() + n += 1 + l + sovTx(uint64(l)) + if m.TimeoutTimestamp != 0 { + n += 1 + sovTx(uint64(m.TimeoutTimestamp)) + } + return n +} + +func (m *MsgTransferResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgTransfer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransfer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransfer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePort = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceChannel = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClassId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenIds = append(m.TokenIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType) + } + m.TimeoutTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutTimestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTransferResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransferResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransferResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/nft/client/cli/flags.go b/x/nft/client/cli/flags.go index 335d4c492..c841694fc 100644 --- a/x/nft/client/cli/flags.go +++ b/x/nft/client/cli/flags.go @@ -16,6 +16,7 @@ const ( FlagDenomName = "name" FlagDenomID = "denom-id" FlagSchema = "schema" + FlagDenomURI = "uri" ) var ( @@ -30,6 +31,7 @@ var ( func init() { FsIssueDenom.String(FlagSchema, "", "Denom data structure definition") FsIssueDenom.String(FlagDenomName, "", "The name of the denom") + FsIssueDenom.String(FlagDenomURI, "", "URI of the denom") FsMintNFT.String(FlagTokenURI, "", "URI for supplemental off-chain tokenData (should return a JSON object)") FsMintNFT.String(FlagRecipient, "", "Receiver of the nft, if not filled, the default is the sender of the transaction") diff --git a/x/nft/client/cli/tx.go b/x/nft/client/cli/tx.go index 10fd5d000..6ac40dc5c 100644 --- a/x/nft/client/cli/tx.go +++ b/x/nft/client/cli/tx.go @@ -49,6 +49,7 @@ func GetCmdIssueDenom() *cobra.Command { "--from= "+ "--name= "+ "--schema= "+ + "--uri= "+ "--chain-id= "+ "--fees=", version.AppName, @@ -68,6 +69,10 @@ func GetCmdIssueDenom() *cobra.Command { if err != nil { return err } + uri, err := cmd.Flags().GetString(FlagDenomURI) + if err != nil { + return err + } optionsContent, err := ioutil.ReadFile(schema) if err == nil { schema = string(optionsContent) @@ -77,6 +82,7 @@ func GetCmdIssueDenom() *cobra.Command { args[0], denomName, schema, + uri, clientCtx.GetFromAddress().String(), ) if err := msg.ValidateBasic(); err != nil { diff --git a/x/nft/keeper/keeper.go b/x/nft/keeper/keeper.go index dfd84dcd5..d5f628c64 100644 --- a/x/nft/keeper/keeper.go +++ b/x/nft/keeper/keeper.go @@ -36,9 +36,9 @@ func (k Keeper) Logger(ctx sdk.Context) log.Logger { // IssueDenom issues a denom according to the given params func (k Keeper) IssueDenom(ctx sdk.Context, - id, name, schema string, + id, name, schema, uri string, creator sdk.AccAddress) error { - return k.SetDenom(ctx, types.NewDenom(id, name, schema, creator)) + return k.SetDenom(ctx, types.NewDenom(id, name, schema, uri, creator)) } // MintNFTUnverified mints an NFT without verifying if the owner is the creator of denom @@ -159,3 +159,22 @@ func (k Keeper) BurnNFT(ctx sdk.Context, denomID, tokenID string, owner sdk.AccA return nil } + +// BurnNFTUnverified deletes a specified NFT without verifying if the owner is the creator of denom +// Needed for IBC transfer of NFT +func (k Keeper) BurnNFTUnverified(ctx sdk.Context, denomID, tokenID string, owner sdk.AccAddress) error { + if !k.HasDenomID(ctx, denomID) { + return sdkerrors.Wrapf(types.ErrInvalidDenom, "denom ID %s not exists", denomID) + } + + nft, err := k.IsOwner(ctx, denomID, tokenID, owner) + if err != nil { + return err + } + + k.deleteNFT(ctx, denomID, nft) + k.deleteOwner(ctx, denomID, tokenID, owner) + k.decreaseSupply(ctx, denomID) + + return nil +} diff --git a/x/nft/keeper/keeper_test.go b/x/nft/keeper/keeper_test.go index ef21bd1da..712de4c6c 100644 --- a/x/nft/keeper/keeper_test.go +++ b/x/nft/keeper/keeper_test.go @@ -73,11 +73,11 @@ func (suite *KeeperSuite) SetupTest() { types.RegisterQueryServer(queryHelper, app.NFTKeeper) suite.queryClient = types.NewQueryClient(queryHelper) - err := suite.keeper.IssueDenom(suite.ctx, denomID, denomNm, schema, address) + err := suite.keeper.IssueDenom(suite.ctx, denomID, denomNm, schema, "", address) suite.NoError(err) // MintNFT shouldn't fail when collection does not exist - err = suite.keeper.IssueDenom(suite.ctx, denomID2, denomNm2, schema, address) + err = suite.keeper.IssueDenom(suite.ctx, denomID2, denomNm2, schema, "", address) suite.NoError(err) // collections should equal 1 diff --git a/x/nft/keeper/msg_server.go b/x/nft/keeper/msg_server.go index a13bc6a96..613b39fba 100644 --- a/x/nft/keeper/msg_server.go +++ b/x/nft/keeper/msg_server.go @@ -30,7 +30,7 @@ func (m msgServer) IssueDenom(goCtx context.Context, msg *types.MsgIssueDenom) ( } ctx := sdk.UnwrapSDKContext(goCtx) - if err := m.Keeper.IssueDenom(ctx, msg.Id, msg.Name, msg.Schema, sender); err != nil { + if err := m.Keeper.IssueDenom(ctx, msg.Id, msg.Name, msg.Schema, "", sender); err != nil { return nil, err } diff --git a/x/nft/types/denom.go b/x/nft/types/denom.go index 09b30587b..764391309 100644 --- a/x/nft/types/denom.go +++ b/x/nft/types/denom.go @@ -7,11 +7,12 @@ import ( ) // NewDenom return a new denom -func NewDenom(id, name, schema string, creator sdk.AccAddress) Denom { +func NewDenom(id, name, schema string, uri string, creator sdk.AccAddress) Denom { return Denom{ Id: id, Name: name, Schema: schema, Creator: creator.String(), + Uri: uri, } } diff --git a/x/nft/types/msgs.go b/x/nft/types/msgs.go index 0d7e8c392..fd776ed15 100644 --- a/x/nft/types/msgs.go +++ b/x/nft/types/msgs.go @@ -25,12 +25,13 @@ var ( ) // NewMsgIssueDenom is a constructor function for MsgSetName -func NewMsgIssueDenom(denomID, denomName, schema, sender string) *MsgIssueDenom { +func NewMsgIssueDenom(denomID, denomName, schema, uri, sender string) *MsgIssueDenom { return &MsgIssueDenom{ Sender: sender, Id: denomID, Name: denomName, Schema: schema, + Uri: uri, } } diff --git a/x/nft/types/nft.pb.go b/x/nft/types/nft.pb.go index e846540ce..ea1251935 100644 --- a/x/nft/types/nft.pb.go +++ b/x/nft/types/nft.pb.go @@ -71,6 +71,7 @@ type Denom struct { Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"` Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` + Uri string `protobuf:"bytes,5,opt,name=uri,proto3" json:"uri,omitempty"` } func (m *Denom) Reset() { *m = Denom{} } @@ -234,38 +235,38 @@ func init() { func init() { proto.RegisterFile("nft/v1/nft.proto", fileDescriptor_f935ea002f215618) } var fileDescriptor_f935ea002f215618 = []byte{ - // 485 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xbf, 0x6e, 0xdb, 0x30, - 0x10, 0xc6, 0x25, 0x5b, 0x8a, 0x63, 0x3a, 0x49, 0x0d, 0xd6, 0x68, 0x95, 0x0e, 0x52, 0x20, 0x74, + // 492 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6e, 0xdb, 0x30, + 0x14, 0x86, 0x2d, 0x5b, 0x8e, 0x63, 0x3a, 0x49, 0x0d, 0xd6, 0x68, 0x95, 0x0e, 0x52, 0x20, 0x74, 0xc8, 0x62, 0x09, 0x76, 0xb6, 0x74, 0x53, 0x83, 0x00, 0x5e, 0xd2, 0x42, 0x48, 0x97, 0x2e, 0x06, - 0x23, 0xd2, 0x36, 0x11, 0x8b, 0x34, 0x44, 0xe6, 0x8f, 0xf7, 0x3e, 0x40, 0x87, 0x3e, 0x40, 0x1f, - 0xc7, 0x63, 0xc6, 0x4e, 0x42, 0x2b, 0x2f, 0x9d, 0xfd, 0x04, 0x05, 0x29, 0x29, 0x30, 0xea, 0x25, - 0x8b, 0x70, 0xf7, 0xdd, 0x1d, 0x7e, 0xdf, 0x89, 0x07, 0xba, 0x6c, 0x22, 0xc3, 0xfb, 0x41, 0xc8, - 0x26, 0x32, 0x58, 0x64, 0x5c, 0x72, 0xd8, 0x4d, 0x66, 0x88, 0xb2, 0x14, 0x51, 0x16, 0x28, 0xf1, - 0x7e, 0xf0, 0xae, 0x37, 0xe5, 0x53, 0xae, 0x8b, 0xa1, 0x8a, 0xca, 0x3e, 0xff, 0x11, 0xb4, 0x22, - 0x24, 0xc8, 0xd5, 0xe5, 0x35, 0x3c, 0x02, 0x0d, 0x8a, 0x1d, 0xf3, 0xc4, 0x3c, 0x6d, 0xc7, 0x0d, - 0x8a, 0x21, 0x04, 0x16, 0x43, 0x29, 0x71, 0x1a, 0x5a, 0xd1, 0x31, 0x3c, 0x06, 0xcd, 0xbb, 0x8c, - 0x3a, 0x4d, 0x25, 0x45, 0xad, 0x22, 0xf7, 0x9a, 0x5f, 0xe2, 0x51, 0xac, 0x34, 0xd5, 0x8e, 0x91, - 0x44, 0x8e, 0x55, 0xb6, 0xab, 0x18, 0xf6, 0x80, 0xcd, 0x1f, 0x18, 0xc9, 0x1c, 0x5b, 0x8b, 0x65, - 0x72, 0x6e, 0xfd, 0xfd, 0xe9, 0x99, 0x7e, 0x02, 0xec, 0x0b, 0xc2, 0x78, 0xfa, 0x22, 0xee, 0x1b, - 0xb0, 0x27, 0x92, 0x19, 0x49, 0x51, 0x89, 0x8e, 0xab, 0x0c, 0x3a, 0xa0, 0x95, 0x64, 0x04, 0x49, - 0x9e, 0x55, 0xdc, 0x3a, 0xad, 0x20, 0x0f, 0xe0, 0x60, 0x74, 0xf1, 0x91, 0xcf, 0xe7, 0x24, 0x91, - 0x94, 0x33, 0x18, 0x80, 0x7d, 0xac, 0xa0, 0xe3, 0x9a, 0x18, 0xbd, 0xde, 0xe4, 0xde, 0xab, 0x25, - 0x4a, 0xe7, 0xe7, 0x7e, 0x5d, 0xf1, 0xe3, 0x96, 0x0e, 0x47, 0x18, 0x0e, 0x40, 0x5b, 0xf2, 0x5b, - 0xc2, 0xc6, 0x14, 0x0b, 0xa7, 0x71, 0xd2, 0x3c, 0x6d, 0x47, 0xbd, 0x4d, 0xee, 0x75, 0xcb, 0x81, - 0xe7, 0x92, 0x1f, 0xef, 0xeb, 0x78, 0x84, 0x45, 0x05, 0xfe, 0x61, 0x02, 0xfb, 0x93, 0xda, 0x56, - 0x59, 0x44, 0x18, 0x67, 0x44, 0x88, 0x6a, 0xc7, 0x3a, 0x85, 0xb7, 0xe0, 0x88, 0xe2, 0x71, 0xf2, - 0xec, 0xae, 0x24, 0x74, 0x86, 0x6e, 0xf0, 0xff, 0xe3, 0x05, 0xdb, 0x4b, 0x44, 0xef, 0x57, 0xb9, - 0x67, 0x14, 0xb9, 0x77, 0xb8, 0xad, 0x8a, 0x4d, 0xee, 0x75, 0x4a, 0x5b, 0x14, 0x27, 0xc2, 0x8f, - 0x0f, 0x29, 0xde, 0xaa, 0x56, 0xb6, 0xbe, 0x99, 0x00, 0x6c, 0xfd, 0x8e, 0x33, 0x60, 0xeb, 0x4d, - 0xb5, 0xb3, 0xce, 0xf0, 0xed, 0x2e, 0x58, 0x3f, 0x51, 0x64, 0x29, 0x62, 0x5c, 0xf6, 0xc2, 0x0f, - 0xc0, 0x62, 0x13, 0x59, 0x9b, 0x3d, 0xde, 0x9d, 0xa9, 0x0e, 0x2a, 0x3a, 0xa8, 0x7c, 0x5a, 0x57, - 0x97, 0xd7, 0x22, 0xd6, 0x43, 0xa5, 0x8d, 0xe8, 0xf3, 0xea, 0x8f, 0x6b, 0xac, 0x0a, 0xd7, 0x7c, - 0x2a, 0x5c, 0xf3, 0x77, 0xe1, 0x9a, 0xdf, 0xd7, 0xae, 0xf1, 0xb4, 0x76, 0x8d, 0x5f, 0x6b, 0xd7, - 0xf8, 0x3a, 0x9c, 0x52, 0x39, 0xbb, 0xbb, 0x09, 0x12, 0x9e, 0x86, 0x49, 0xb6, 0x5c, 0x48, 0xde, - 0xe7, 0xd9, 0xb4, 0xaf, 0x39, 0xa1, 0xfe, 0xf6, 0x15, 0x2e, 0x7c, 0x54, 0xf7, 0x1e, 0xca, 0xe5, - 0x82, 0x88, 0x9b, 0x3d, 0x7d, 0xce, 0x67, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x36, 0xb0, 0x07, - 0xc9, 0x0a, 0x03, 0x00, 0x00, + 0x23, 0xd2, 0x36, 0x11, 0x8b, 0x34, 0x44, 0x3a, 0x89, 0xf7, 0x1e, 0xa0, 0x43, 0x0f, 0xd0, 0xe3, + 0x78, 0xcc, 0xd8, 0x49, 0x68, 0xe5, 0xa5, 0xb3, 0x4f, 0x50, 0x90, 0x94, 0x02, 0xa3, 0x59, 0xb2, + 0x18, 0xff, 0xfb, 0xdf, 0x23, 0xfe, 0xef, 0x99, 0x14, 0xe8, 0xb2, 0x89, 0x0c, 0xef, 0x06, 0x21, + 0x9b, 0xc8, 0x60, 0x91, 0x71, 0xc9, 0x61, 0x37, 0x99, 0x21, 0xca, 0x52, 0x44, 0x59, 0xa0, 0xcc, + 0xbb, 0xc1, 0xbb, 0xde, 0x94, 0x4f, 0xb9, 0x6e, 0x86, 0x4a, 0x99, 0x39, 0xff, 0x01, 0xb4, 0x22, + 0x24, 0xc8, 0xd5, 0xe5, 0x35, 0x3c, 0x02, 0x75, 0x8a, 0x1d, 0xeb, 0xc4, 0x3a, 0x6d, 0xc7, 0x75, + 0x8a, 0x21, 0x04, 0x36, 0x43, 0x29, 0x71, 0xea, 0xda, 0xd1, 0x1a, 0x1e, 0x83, 0xc6, 0x32, 0xa3, + 0x4e, 0x43, 0x59, 0x51, 0xab, 0xc8, 0xbd, 0xc6, 0x97, 0x78, 0x14, 0x2b, 0x4f, 0x8d, 0x63, 0x24, + 0x91, 0x63, 0x9b, 0x71, 0xa5, 0x61, 0x0f, 0x34, 0xf9, 0x3d, 0x23, 0x99, 0xd3, 0xd4, 0xa6, 0x29, + 0xce, 0xed, 0xbf, 0x3f, 0x3d, 0xcb, 0x5f, 0x82, 0xe6, 0x05, 0x61, 0x3c, 0x7d, 0x51, 0xee, 0x1b, + 0xb0, 0x27, 0x92, 0x19, 0x49, 0x91, 0x89, 0x8e, 0xcb, 0x0a, 0x3a, 0xa0, 0x95, 0x64, 0x04, 0x49, + 0x9e, 0x95, 0xb9, 0x55, 0x09, 0xbb, 0x86, 0xd4, 0x04, 0x2b, 0x59, 0xc6, 0xde, 0x83, 0x83, 0xd1, + 0xc5, 0x47, 0x3e, 0x9f, 0x93, 0x44, 0x52, 0xce, 0x60, 0x00, 0xf6, 0xb1, 0xc2, 0x18, 0x57, 0x0c, + 0xd1, 0xeb, 0x6d, 0xee, 0xbd, 0x5a, 0xa1, 0x74, 0x7e, 0xee, 0x57, 0x1d, 0x3f, 0x6e, 0x69, 0x39, + 0xc2, 0x70, 0x00, 0xda, 0x92, 0xdf, 0x12, 0x36, 0xa6, 0x58, 0x38, 0xf5, 0x93, 0xc6, 0x69, 0x3b, + 0xea, 0x6d, 0x73, 0xaf, 0x6b, 0x0e, 0x3c, 0xb5, 0xfc, 0x78, 0x5f, 0xeb, 0x11, 0x16, 0x65, 0xf0, + 0x0f, 0x0b, 0x34, 0x3f, 0xa9, 0xfd, 0x15, 0x34, 0xc2, 0x38, 0x23, 0x42, 0x94, 0x5b, 0x57, 0x25, + 0xbc, 0x05, 0x47, 0x14, 0x8f, 0x93, 0x27, 0x3a, 0x93, 0xd0, 0x19, 0xba, 0xc1, 0xff, 0xd7, 0x19, + 0xec, 0x2e, 0x11, 0xbd, 0x5f, 0xe7, 0x5e, 0xad, 0xc8, 0xbd, 0xc3, 0x5d, 0x57, 0x6c, 0x73, 0xaf, + 0x63, 0xb0, 0x28, 0x4e, 0x84, 0x1f, 0x1f, 0x52, 0xbc, 0xd3, 0x2d, 0xb1, 0xbe, 0x59, 0x00, 0xec, + 0xfc, 0x1d, 0x67, 0xa0, 0xa9, 0x37, 0xd5, 0x64, 0x9d, 0xe1, 0xdb, 0xe7, 0xc1, 0xfa, 0xd2, 0x22, + 0x5b, 0x25, 0xc6, 0x66, 0x16, 0x7e, 0x00, 0x36, 0x9b, 0xc8, 0x0a, 0xf6, 0xf8, 0xf9, 0x99, 0xf2, + 0x89, 0x45, 0x07, 0x25, 0xa7, 0x7d, 0x75, 0x79, 0x2d, 0x62, 0x7d, 0xc8, 0x60, 0x44, 0x9f, 0xd7, + 0x7f, 0xdc, 0xda, 0xba, 0x70, 0xad, 0xc7, 0xc2, 0xb5, 0x7e, 0x17, 0xae, 0xf5, 0x7d, 0xe3, 0xd6, + 0x1e, 0x37, 0x6e, 0xed, 0xd7, 0xc6, 0xad, 0x7d, 0x1d, 0x4e, 0xa9, 0x9c, 0x2d, 0x6f, 0x82, 0x84, + 0xa7, 0x61, 0x92, 0xad, 0x16, 0x92, 0xf7, 0x79, 0x36, 0xed, 0xeb, 0x9c, 0x50, 0xff, 0xf6, 0x55, + 0x5c, 0xf8, 0xa0, 0xbe, 0x80, 0x50, 0xae, 0x16, 0x44, 0xdc, 0xec, 0xe9, 0x07, 0x7e, 0xf6, 0x2f, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x43, 0xf5, 0x6e, 0x1c, 0x03, 0x00, 0x00, } func (this *BaseNFT) Equal(that interface{}) bool { @@ -335,6 +336,9 @@ func (this *Denom) Equal(that interface{}) bool { if this.Creator != that1.Creator { return false } + if this.Uri != that1.Uri { + return false + } return true } func (this *IDCollection) Equal(that interface{}) bool { @@ -511,6 +515,13 @@ func (m *Denom) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = encodeVarintNft(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0x2a + } if len(m.Creator) > 0 { i -= len(m.Creator) copy(dAtA[i:], m.Creator) @@ -734,6 +745,10 @@ func (m *Denom) Size() (n int) { if l > 0 { n += 1 + l + sovNft(uint64(l)) } + l = len(m.Uri) + if l > 0 { + n += 1 + l + sovNft(uint64(l)) + } return n } @@ -1165,6 +1180,38 @@ func (m *Denom) Unmarshal(dAtA []byte) error { } m.Creator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNft + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthNft + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uri = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipNft(dAtA[iNdEx:]) diff --git a/x/nft/types/tx.pb.go b/x/nft/types/tx.pb.go index 36be053c9..61d4d9c5e 100644 --- a/x/nft/types/tx.pb.go +++ b/x/nft/types/tx.pb.go @@ -34,6 +34,7 @@ type MsgIssueDenom struct { Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"` Sender string `protobuf:"bytes,4,opt,name=sender,proto3" json:"sender,omitempty"` + Uri string `protobuf:"bytes,5,opt,name=uri,proto3" json:"uri,omitempty"` } func (m *MsgIssueDenom) Reset() { *m = MsgIssueDenom{} } @@ -438,40 +439,41 @@ func init() { func init() { proto.RegisterFile("nft/v1/tx.proto", fileDescriptor_714f8ececec76715) } var fileDescriptor_714f8ececec76715 = []byte{ - // 520 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0xe3, 0x38, 0x24, 0xf4, 0x21, 0x5a, 0x74, 0x94, 0x62, 0xa2, 0xca, 0xa9, 0x22, 0x24, - 0xba, 0xc4, 0x56, 0xcb, 0xd6, 0x31, 0x02, 0xa4, 0x0c, 0x46, 0xc8, 0x2a, 0x48, 0xb0, 0x20, 0xd7, - 0xbe, 0x38, 0x87, 0xf0, 0xd9, 0xba, 0xbb, 0x54, 0xcd, 0x77, 0x60, 0xe0, 0x23, 0xb0, 0x30, 0xf2, - 0x3d, 0x2a, 0xb1, 0x74, 0x64, 0xaa, 0x20, 0x59, 0x98, 0xf9, 0x04, 0xc8, 0xe7, 0x73, 0x7a, 0x0e, - 0x2e, 0x5d, 0x40, 0xea, 0x62, 0x3d, 0xbf, 0xf7, 0xee, 0xdd, 0xef, 0x7f, 0xef, 0xde, 0xc1, 0x06, - 0x1d, 0x0b, 0xf7, 0x78, 0xcf, 0x15, 0x27, 0x4e, 0xc6, 0x52, 0x91, 0xa2, 0x3b, 0xe1, 0x24, 0x20, - 0x34, 0x09, 0x08, 0x75, 0xe8, 0x58, 0x38, 0xc7, 0x7b, 0xdd, 0xcd, 0x38, 0x8d, 0x53, 0x19, 0x74, - 0x73, 0xab, 0xc8, 0xeb, 0x13, 0xb8, 0xed, 0xf1, 0x78, 0xc4, 0xf9, 0x14, 0x3f, 0xc1, 0x34, 0x4d, - 0xd0, 0x3a, 0x34, 0x49, 0x64, 0x19, 0x3b, 0xc6, 0xee, 0x9a, 0xdf, 0x24, 0x11, 0x42, 0xd0, 0xa2, - 0x41, 0x82, 0xad, 0xa6, 0xf4, 0x48, 0x1b, 0x6d, 0x41, 0x9b, 0x87, 0x13, 0x9c, 0x04, 0x96, 0x29, - 0xbd, 0xea, 0x4f, 0xfa, 0x31, 0x8d, 0x30, 0xb3, 0x5a, 0xca, 0x2f, 0xff, 0x0e, 0x5a, 0x3f, 0x3f, - 0xf5, 0x8c, 0xfe, 0x7d, 0xb8, 0x57, 0xd9, 0xca, 0xc7, 0x3c, 0x4b, 0x29, 0xc7, 0xfd, 0x0f, 0x06, - 0xac, 0x7b, 0x3c, 0x3e, 0x64, 0x01, 0xe5, 0x63, 0xcc, 0x9e, 0x3f, 0x3b, 0xfc, 0x83, 0xc2, 0x81, - 0x9b, 0x51, 0xbe, 0xe6, 0x2d, 0x89, 0x0a, 0x92, 0xe1, 0xdd, 0x5f, 0xe7, 0xbd, 0x8d, 0x59, 0x90, - 0xbc, 0x3f, 0xe8, 0x97, 0x91, 0xbe, 0xdf, 0x91, 0xe6, 0x28, 0xd2, 0x48, 0x4c, 0x9d, 0x04, 0x6d, - 0xc3, 0x1a, 0xc3, 0x21, 0xc9, 0x08, 0xa6, 0x42, 0x41, 0x5e, 0x38, 0x14, 0xa7, 0x05, 0x5b, 0x55, - 0x9a, 0x25, 0xe8, 0x17, 0x03, 0xc0, 0xe3, 0xf1, 0xd3, 0x88, 0x88, 0x7f, 0x01, 0x59, 0x1e, 0xad, - 0xa9, 0x1d, 0xed, 0x03, 0x30, 0xa7, 0x8c, 0x14, 0x68, 0xc3, 0xce, 0xfc, 0xbc, 0x67, 0xbe, 0xf4, - 0x47, 0x7e, 0xee, 0xcb, 0xd3, 0xa3, 0x40, 0x04, 0xd6, 0x8d, 0x22, 0x3d, 0xb7, 0x35, 0x9d, 0xed, - 0x9a, 0x13, 0xdf, 0x04, 0x74, 0x81, 0xbb, 0x54, 0xf1, 0xb5, 0x50, 0xe1, 0x11, 0x7a, 0xcd, 0x55, - 0x54, 0xbb, 0xd5, 0xa9, 0xef, 0x56, 0xa1, 0x51, 0x89, 0x59, 0x6a, 0x7c, 0x27, 0x25, 0x0e, 0xa7, - 0x8c, 0xfe, 0xc7, 0xdb, 0x54, 0x21, 0x50, 0x7b, 0x95, 0x04, 0xfb, 0x9f, 0x4d, 0x30, 0x3d, 0x1e, - 0xa3, 0x57, 0x00, 0xda, 0x74, 0xf5, 0x9c, 0xd5, 0xb9, 0x74, 0x2a, 0x33, 0xd1, 0x7d, 0x74, 0x45, - 0x42, 0x59, 0x1f, 0x79, 0xd0, 0x29, 0x3b, 0xb8, 0x5d, 0xbb, 0x46, 0x45, 0xbb, 0x0f, 0xff, 0x16, - 0xd5, 0xcb, 0x95, 0xd7, 0xba, 0xbe, 0x9c, 0x8a, 0x5e, 0x52, 0x6e, 0xe5, 0x8e, 0xa1, 0xd7, 0x70, - 0x4b, 0x1f, 0xe7, 0x9d, 0xda, 0x45, 0x5a, 0x46, 0x77, 0xf7, 0xaa, 0x0c, 0x9d, 0xb4, 0xec, 0x6b, - 0x3d, 0xa9, 0x8a, 0x5e, 0x42, 0xba, 0xd2, 0xa7, 0xe1, 0x8b, 0xd3, 0x1f, 0x76, 0xe3, 0x74, 0x6e, - 0x1b, 0x67, 0x73, 0xdb, 0xf8, 0x3e, 0xb7, 0x8d, 0x8f, 0x0b, 0xbb, 0x71, 0xb6, 0xb0, 0x1b, 0xdf, - 0x16, 0x76, 0xe3, 0xcd, 0x7e, 0x4c, 0xc4, 0x64, 0x7a, 0xe4, 0x84, 0x69, 0xe2, 0x86, 0x6c, 0x96, - 0x89, 0x74, 0x90, 0xb2, 0x78, 0x20, 0x0b, 0xbb, 0xf2, 0x3b, 0xc8, 0xeb, 0xbb, 0x27, 0x6e, 0xfe, - 0x00, 0x8b, 0x59, 0x86, 0xf9, 0x51, 0x5b, 0xbe, 0xac, 0x8f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, - 0x09, 0x8b, 0x9b, 0x6a, 0x94, 0x05, 0x00, 0x00, + // 530 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xc7, 0xe3, 0x38, 0x5f, 0xf2, 0x75, 0x10, 0x6d, 0xb5, 0x94, 0x62, 0xa2, 0xca, 0xa9, 0x2c, + 0x24, 0x7a, 0x89, 0xad, 0x96, 0x5b, 0x8f, 0x11, 0x20, 0xe5, 0x60, 0x84, 0xa2, 0x82, 0x04, 0x17, + 0xe4, 0xda, 0x1b, 0x67, 0x11, 0x5e, 0x5b, 0xbb, 0x9b, 0xaa, 0x11, 0xaf, 0xc0, 0x81, 0x47, 0xe0, + 0xc2, 0x91, 0xf7, 0xa8, 0xc4, 0xa5, 0x47, 0x4e, 0x15, 0x24, 0x17, 0xce, 0x3c, 0x01, 0xf2, 0x7a, + 0x9d, 0xae, 0x83, 0x4b, 0x2f, 0x20, 0x71, 0xb1, 0xc6, 0x33, 0xb3, 0xe3, 0xdf, 0x7f, 0x66, 0xc7, + 0xb0, 0x41, 0xc7, 0xc2, 0x3b, 0xd9, 0xf7, 0xc4, 0xa9, 0x9b, 0xb1, 0x54, 0xa4, 0x68, 0x33, 0x9c, + 0x04, 0x84, 0x26, 0x01, 0xa1, 0x2e, 0x1d, 0x0b, 0xf7, 0x64, 0xbf, 0xbb, 0x15, 0xa7, 0x71, 0x2a, + 0x83, 0x5e, 0x6e, 0x15, 0x79, 0xce, 0x5b, 0xb8, 0xe9, 0xf3, 0x78, 0xc8, 0xf9, 0x14, 0x3f, 0xc4, + 0x34, 0x4d, 0xd0, 0x3a, 0x34, 0x49, 0x64, 0x19, 0xbb, 0xc6, 0xde, 0xda, 0xa8, 0x49, 0x22, 0x84, + 0xa0, 0x45, 0x83, 0x04, 0x5b, 0x4d, 0xe9, 0x91, 0x36, 0xda, 0x86, 0x36, 0x0f, 0x27, 0x38, 0x09, + 0x2c, 0x53, 0x7a, 0xd5, 0x9b, 0xf4, 0x63, 0x1a, 0x61, 0x66, 0xb5, 0x94, 0x5f, 0xbe, 0xa1, 0x4d, + 0x30, 0xa7, 0x8c, 0x58, 0xff, 0x49, 0x67, 0x6e, 0x1e, 0xb6, 0xbe, 0x7f, 0xe8, 0x19, 0xce, 0x1d, + 0xb8, 0x5d, 0xf9, 0xf8, 0x08, 0xf3, 0x2c, 0xa5, 0x1c, 0x3b, 0xef, 0x0c, 0x58, 0xf7, 0x79, 0x7c, + 0xc4, 0x02, 0xca, 0xc7, 0x98, 0x3d, 0x79, 0x7c, 0xf4, 0x0b, 0x97, 0x0b, 0xff, 0x47, 0xf9, 0x99, + 0x57, 0x24, 0x2a, 0xd8, 0x06, 0xb7, 0x7e, 0x5c, 0xf4, 0x36, 0x66, 0x41, 0xf2, 0xe6, 0xd0, 0x29, + 0x23, 0xce, 0xa8, 0x23, 0xcd, 0x61, 0xa4, 0xb1, 0x99, 0x15, 0xb6, 0x1d, 0x58, 0x63, 0x38, 0x24, + 0x19, 0xc1, 0x54, 0x28, 0xec, 0x4b, 0x87, 0xe2, 0xb4, 0x60, 0xbb, 0x4a, 0xb3, 0x04, 0xfd, 0x64, + 0x00, 0xf8, 0x3c, 0x7e, 0x14, 0x11, 0xf1, 0x27, 0x20, 0xcb, 0x66, 0x9b, 0x5a, 0xb3, 0xef, 0x16, + 0xcd, 0x93, 0x68, 0x83, 0xce, 0xfc, 0xa2, 0x67, 0x3e, 0x1b, 0x0d, 0x65, 0x17, 0xf3, 0xf4, 0x28, + 0x10, 0x81, 0x6a, 0xac, 0xb4, 0x35, 0x9d, 0x6d, 0x5d, 0xa7, 0x52, 0xb2, 0x05, 0xe8, 0x12, 0x77, + 0xa9, 0xe2, 0x73, 0xa1, 0xc2, 0x27, 0xf4, 0x1f, 0x57, 0x51, 0x9d, 0x56, 0xa7, 0x7e, 0x5a, 0x85, + 0x46, 0x25, 0x66, 0xa9, 0xf1, 0xb5, 0x94, 0x38, 0x98, 0x32, 0xfa, 0x17, 0x6f, 0x53, 0x85, 0x40, + 0x7d, 0xab, 0x24, 0x38, 0xf8, 0x68, 0x82, 0xe9, 0xf3, 0x18, 0x3d, 0x07, 0xd0, 0xf6, 0xad, 0xe7, + 0xae, 0x6e, 0xaa, 0x5b, 0xd9, 0x89, 0xee, 0xfd, 0x6b, 0x12, 0xca, 0xfa, 0xc8, 0x87, 0x4e, 0x39, + 0xc1, 0x9d, 0xda, 0x33, 0x2a, 0xda, 0xbd, 0xf7, 0xbb, 0xa8, 0x5e, 0xae, 0xbc, 0xd6, 0xf5, 0xe5, + 0x54, 0xf4, 0x8a, 0x72, 0x2b, 0x77, 0x0c, 0xbd, 0x80, 0x1b, 0xfa, 0x3a, 0xef, 0xd6, 0x1e, 0xd2, + 0x32, 0xba, 0x7b, 0xd7, 0x65, 0xe8, 0xa4, 0xe5, 0x5c, 0xeb, 0x49, 0x55, 0xf4, 0x0a, 0xd2, 0x95, + 0x39, 0x0d, 0x9e, 0x9e, 0x7d, 0xb3, 0x1b, 0x67, 0x73, 0xdb, 0x38, 0x9f, 0xdb, 0xc6, 0xd7, 0xb9, + 0x6d, 0xbc, 0x5f, 0xd8, 0x8d, 0xf3, 0x85, 0xdd, 0xf8, 0xb2, 0xb0, 0x1b, 0x2f, 0x0f, 0x62, 0x22, + 0x26, 0xd3, 0x63, 0x37, 0x4c, 0x13, 0x2f, 0x64, 0xb3, 0x4c, 0xa4, 0xfd, 0x94, 0xc5, 0x7d, 0x59, + 0xd8, 0x93, 0xcf, 0x7e, 0x5e, 0xdf, 0x3b, 0xf5, 0xf2, 0x5f, 0xb2, 0x98, 0x65, 0x98, 0x1f, 0xb7, + 0xe5, 0xbf, 0xf6, 0xc1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaf, 0xbe, 0xa9, 0xfa, 0xa6, 0x05, + 0x00, 0x00, } func (this *MsgIssueDenom) Equal(that interface{}) bool { @@ -505,6 +507,9 @@ func (this *MsgIssueDenom) Equal(that interface{}) bool { if this.Sender != that1.Sender { return false } + if this.Uri != that1.Uri { + return false + } return true } func (this *MsgTransferNFT) Equal(that interface{}) bool { @@ -906,6 +911,13 @@ func (m *MsgIssueDenom) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = encodeVarintTx(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0x2a + } if len(m.Sender) > 0 { i -= len(m.Sender) copy(dAtA[i:], m.Sender) @@ -1317,6 +1329,10 @@ func (m *MsgIssueDenom) Size() (n int) { if l > 0 { n += 1 + l + sovTx(uint64(l)) } + l = len(m.Uri) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } return n } @@ -1644,6 +1660,38 @@ func (m *MsgIssueDenom) Unmarshal(dAtA []byte) error { } m.Sender = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uri = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTx(dAtA[iNdEx:]) From 0418e5733997957a516ad006eae2af75c6323740 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Sat, 17 Sep 2022 09:22:17 +0800 Subject: [PATCH 02/16] Change module name for potential key collision --- x/nft-transfer/types/keys.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x/nft-transfer/types/keys.go b/x/nft-transfer/types/keys.go index 6f770e32d..4621dd746 100644 --- a/x/nft-transfer/types/keys.go +++ b/x/nft-transfer/types/keys.go @@ -9,14 +9,14 @@ import ( const ( // Module name defines IBC nft-transfer moduel name - ModuleName = "nft-transfer" - - // RouterKey is the message route for IBC nft-transfer - RouterKey = ModuleName + ModuleName = "nonfungibletokentransfer" // StoreKey is the store key string for IBC nft-transfer StoreKey = ModuleName + // RouterKey is the message route for IBC nft-transfer + RouterKey = ModuleName + // QuerierRoute is the querier route for IBC nft-transfer QuerierRoute = ModuleName From 833fee56c609c84aa1d32eed44c8faeba01c5a6a Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Sat, 17 Sep 2022 11:04:54 +0800 Subject: [PATCH 03/16] test nft issue denom after upgrade --- integration_tests/test_upgrade.py | 40 ++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/integration_tests/test_upgrade.py b/integration_tests/test_upgrade.py index 9189234e5..abea71e46 100644 --- a/integration_tests/test_upgrade.py +++ b/integration_tests/test_upgrade.py @@ -348,11 +348,49 @@ def assert_commission(adr, expected): assert_commission(validator1_operator_address, "0.000000000000000000") assert_commission(validator2_operator_address, default_rate) + # create denom before upgrade + cli = cluster.cosmos_cli() + rsp = json.loads( + cli.raw( + "tx", + "nft", + "issue", + "testdenomid", + "-y", + name="testdenomname", + home=cli.data_dir, + node=cli.node_rpc, + output="json", + _from="community", + keyring_backend="test", + chain_id=cli.chain_id, + ) + ) + raw_log = json.loads(rsp["raw_log"]) + assert raw_log[0]["events"][0]["type"] == "issue_denom" + target_height = cluster.block_height() + 30 upgrade(cluster, "v4.0.0", target_height, cosmos_sdk_46=False) - # check icaauth params cli = cluster.cosmos_cli() + + # check denom after upgrade + rsp = json.loads( + cli.raw( + "query", + "nft", + "denom", + "testdenomid", + home=cli.data_dir, + node=cli.node_rpc, + output="json", + ) + ) + + assert rsp["name"] == "testdenomname", rsp + assert rsp["uri"] == "", rsp + + # check icaauth params rsp = json.loads( cli.raw( "query", From 587bcd98f811ea02c109da3468bd70e7c3f3731f Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Sat, 17 Sep 2022 14:00:29 +0800 Subject: [PATCH 04/16] Add integration tests for nft transfer --- .../configs/nft_transfer.jsonnet | 36 +++ integration_tests/ibc_utils.py | 39 +++- integration_tests/test_nft_transfer.py | 210 ++++++++++++++++++ integration_tests/test_upgrade.py | 2 +- x/nft-transfer/ibc_module.go | 4 +- x/nft/client/cli/query.go | 24 +- x/nft/keeper/msg_server.go | 2 +- 7 files changed, 298 insertions(+), 19 deletions(-) create mode 100644 integration_tests/configs/nft_transfer.jsonnet create mode 100644 integration_tests/test_nft_transfer.py diff --git a/integration_tests/configs/nft_transfer.jsonnet b/integration_tests/configs/nft_transfer.jsonnet new file mode 100644 index 000000000..6429aa9dd --- /dev/null +++ b/integration_tests/configs/nft_transfer.jsonnet @@ -0,0 +1,36 @@ +local default = { + accounts: [ + { + name: 'relayer', + coins: '100cro', + }, + { + name: 'signer', + coins: '200cro', + }, + ], + genesis: { + app_state: { + transfer: { + params: { + receive_enabled: true, + send_enabled: true, + }, + }, + }, + }, +}; +local validator = { + coins: '10cro', + staked: '10cro', +}; + +{ + 'ibc-0': default { + validators: [validator { base_port: 26650 }, validator], + }, + 'ibc-1': default { + validators: [validator { base_port: port } for port in [26750, 26760]], + }, + relayer: {}, +} diff --git a/integration_tests/ibc_utils.py b/integration_tests/ibc_utils.py index 261bee0ef..977dce165 100644 --- a/integration_tests/ibc_utils.py +++ b/integration_tests/ibc_utils.py @@ -29,7 +29,7 @@ def search_target(query, key, chains): return results -def start_and_wait_relayer(cluster, init_relayer=True): +def start_and_wait_relayer(cluster, port="transfer", init_relayer=True): relayer = wait_relayer_ready(cluster) chains = ["ibc-0", "ibc-1"] if init_relayer: @@ -40,9 +40,9 @@ def start_and_wait_relayer(cluster, init_relayer=True): "create", "channel", "--a-port", - "transfer", + port, "--b-port", - "transfer", + port, "--a-chain", chains[0], "--b-chain", @@ -58,3 +58,36 @@ def start_and_wait_relayer(cluster, init_relayer=True): query = relayer + ["query", "channels", "--chain"] return search_target(query, "channel", chains) + + +# def start_and_wait_relayer_nft_transfer(cluster, init_relayer=True): +# relayer = wait_relayer_ready(cluster) +# chains = ["ibc-0", "ibc-1"] +# if init_relayer: +# # create connection and channel +# subprocess.run( +# relayer +# + [ +# "create", +# "channel", +# "--a-port", +# "nft-transfer", +# "--b-port", +# "nft-transfer", +# "--a-chain", +# chains[0], +# "--b-chain", +# chains[1], +# "--new-client-connection", +# "--channel-version", +# "ics721-1", +# "--yes", +# ], +# check=True, +# ) + +# # start relaying +# cluster[chains[0]].supervisor.startProcess("relayer-demo") + +# query = relayer + ["query", "channels", "--chain"] +# return search_target(query, "channel", chains) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py new file mode 100644 index 000000000..f056d45c6 --- /dev/null +++ b/integration_tests/test_nft_transfer.py @@ -0,0 +1,210 @@ +import json +from pathlib import Path +import time +import pytest + +from .ibc_utils import start_and_wait_relayer +from .utils import cluster_fixture + +pytestmark = pytest.mark.ibc + + +@pytest.fixture(scope="module") +def cluster(worker_index, pytestconfig, tmp_path_factory): + "override cluster fixture for this test module" + yield from cluster_fixture( + Path(__file__).parent / "configs/nft_transfer.jsonnet", + worker_index, + tmp_path_factory.mktemp("data"), + ) + + +def test_nft_transfer(cluster): + src_channel, dst_channel = start_and_wait_relayer(cluster, "nft-transfer") + + cli_src = cluster["ibc-0"].cosmos_cli() + cli_dst = cluster["ibc-1"].cosmos_cli() + + addr_src = cluster["ibc-0"].address("relayer") + addr_dst = cluster["ibc-1"].address("relayer") + + denomid = "testdenomid" + denomname = "testdenomname" + denomuri = "testdenomuri" + + tokenid = "testtokenid" + tokenuri = "testtokenuri" + + # mint nft on source chain + rsp = json.loads( + cli_src.raw( + "tx", + "nft", + "issue", + denomid, + "-y", + name=denomname, + uri=denomuri, + home=cli_src.data_dir, + from_=addr_src, + keyring_backend="test", + chain_id=cli_src.chain_id, + node=cli_src.node_rpc, + ) + ) + + raw_log = json.loads(rsp["raw_log"]) + assert raw_log[0]["events"][0]["type"] == "issue_denom" + + rsp = json.loads( + cli_src.raw( + "tx", + "nft", + "mint", + denomid, + tokenid, + "-y", + uri=tokenuri, + recipient=addr_src, + home=cli_src.data_dir, + from_=addr_src, + keyring_backend="test", + chain_id=cli_src.chain_id, + node=cli_src.node_rpc, + ) + ) + + raw_log = json.loads(rsp["raw_log"]) + assert ( + raw_log[0]["events"][0]["attributes"][0]["value"] + == "/chainmain.nft.v1.MsgMintNFT" + ) + + # transfer nft on destination chain + rsp = json.loads( + cli_src.raw( + "tx", + "nft-transfer", + "transfer", + "nft-transfer", + src_channel, + addr_dst, + denomid, + tokenid, + "-y", + home=cli_src.data_dir, + from_=addr_src, + keyring_backend="test", + chain_id=cli_src.chain_id, + node=cli_src.node_rpc, + ) + ) + + assert rsp["code"] == 0, rsp["raw_log"] + + # FIXME more stable way to wait for relaying + time.sleep(20) + + # get class hash on destination chain + class_hash = json.loads( + cli_dst.raw( + "query", + "nft-transfer", + "class-hash", + "nft-transfer/" + dst_channel + "/" + denomid, + home=cli_dst.data_dir, + node=cli_dst.node_rpc, + output="json", + ) + )["hash"] + + dst_denom_id = "ibc/" + class_hash + + # query denom on destination chain + rsp = json.loads( + cli_dst.raw( + "query", + "nft", + "denom", + dst_denom_id, + home=cli_dst.data_dir, + node=cli_dst.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == denomuri, rsp["uri"] + + # query nft on destination chain + rsp = json.loads( + cli_dst.raw( + "query", + "nft", + "token", + "ibc/" + class_hash, + tokenid, + home=cli_dst.data_dir, + node=cli_dst.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == addr_dst, rsp + + # transfer nft back to source chain + rsp = json.loads( + cli_dst.raw( + "tx", + "nft-transfer", + "transfer", + "nft-transfer", + dst_channel, + addr_src, + dst_denom_id, + tokenid, + "-y", + home=cli_dst.data_dir, + from_=addr_dst, + keyring_backend="test", + chain_id=cli_dst.chain_id, + node=cli_dst.node_rpc, + ) + ) + + assert rsp["code"] == 0, rsp["raw_log"] + + # FIXME more stable way to wait for relaying + time.sleep(20) + + # nft should be burnt on destination chain + rsp = json.loads( + cli_dst.raw( + "query", + "nft", + "collection", + dst_denom_id, + home=cli_dst.data_dir, + node=cli_dst.node_rpc, + output="json", + ) + )["collection"] + + assert len(rsp["nfts"]) == 0, rsp + + # query nft on source chain + rsp = json.loads( + cli_src.raw( + "query", + "nft", + "token", + denomid, + tokenid, + home=cli_src.data_dir, + node=cli_src.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == addr_src, rsp diff --git a/integration_tests/test_upgrade.py b/integration_tests/test_upgrade.py index abea71e46..aeacb7f48 100644 --- a/integration_tests/test_upgrade.py +++ b/integration_tests/test_upgrade.py @@ -361,7 +361,7 @@ def assert_commission(adr, expected): home=cli.data_dir, node=cli.node_rpc, output="json", - _from="community", + from_="community", keyring_backend="test", chain_id=cli.chain_id, ) diff --git a/x/nft-transfer/ibc_module.go b/x/nft-transfer/ibc_module.go index b34d0353d..318ad0346 100644 --- a/x/nft-transfer/ibc_module.go +++ b/x/nft-transfer/ibc_module.go @@ -91,7 +91,7 @@ func (im IBCModule) OnChanOpenInit( return "", err } - return "", nil + return version, nil } // OnChanOpenTry implements the IBCModule interface. @@ -124,7 +124,7 @@ func (im IBCModule) OnChanOpenTry( } } - return "", nil + return types.Version, nil } // OnChanOpenAck implements the IBCModule interface diff --git a/x/nft/client/cli/query.go b/x/nft/client/cli/query.go index f8dd4570e..6bfdd84e4 100644 --- a/x/nft/client/cli/query.go +++ b/x/nft/client/cli/query.go @@ -64,9 +64,9 @@ func GetCmdQuerySupply() *cobra.Command { } // nolint: govet - if err := types.ValidateDenomID(args[0]); err != nil { - return err - } + // if err := types.ValidateDenomID(args[0]); err != nil { + // return err + // } queryClient := types.NewQueryClient(clientCtx) resp, err := queryClient.Supply(context.Background(), &types.QuerySupplyRequest{ @@ -143,9 +143,9 @@ func GetCmdQueryCollection() *cobra.Command { } // nolint: govet - if err := types.ValidateDenomID(args[0]); err != nil { - return err - } + // if err := types.ValidateDenomID(args[0]); err != nil { + // return err + // } pageReq, err := client.ReadPageRequest(cmd.Flags()) if err != nil { return err @@ -213,9 +213,9 @@ func GetCmdQueryDenom() *cobra.Command { } // nolint: govet - if err := types.ValidateDenomID(args[0]); err != nil { - return err - } + // if err := types.ValidateDenomID(args[0]); err != nil { + // return err + // } queryClient := types.NewQueryClient(clientCtx) resp, err := queryClient.Denom( @@ -281,9 +281,9 @@ func GetCmdQueryNFT() *cobra.Command { } // nolint: govet - if err := types.ValidateDenomID(args[0]); err != nil { - return err - } + // if err := types.ValidateDenomID(args[0]); err != nil { + // return err + // } // nolint: govet if err := types.ValidateTokenID(args[1]); err != nil { diff --git a/x/nft/keeper/msg_server.go b/x/nft/keeper/msg_server.go index 613b39fba..4934fdf73 100644 --- a/x/nft/keeper/msg_server.go +++ b/x/nft/keeper/msg_server.go @@ -30,7 +30,7 @@ func (m msgServer) IssueDenom(goCtx context.Context, msg *types.MsgIssueDenom) ( } ctx := sdk.UnwrapSDKContext(goCtx) - if err := m.Keeper.IssueDenom(ctx, msg.Id, msg.Name, msg.Schema, "", sender); err != nil { + if err := m.Keeper.IssueDenom(ctx, msg.Id, msg.Name, msg.Schema, msg.Uri, sender); err != nil { return nil, err } From d80905282b84bc099f3ba32d457d9025fa34dff2 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Mon, 19 Sep 2022 08:44:26 +0800 Subject: [PATCH 05/16] Fix lint errors --- integration_tests/test_nft_transfer.py | 1 + x/nft-transfer/module.go | 1 + 2 files changed, 2 insertions(+) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index f056d45c6..1751606d1 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -1,6 +1,7 @@ import json from pathlib import Path import time + import pytest from .ibc_utils import start_and_wait_relayer diff --git a/x/nft-transfer/module.go b/x/nft-transfer/module.go index 7b4b6594a..d8918d1b9 100644 --- a/x/nft-transfer/module.go +++ b/x/nft-transfer/module.go @@ -67,6 +67,7 @@ func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncod // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for ics29 fee module. func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + // nolint: errcheck types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) } From 1b6a2ef79a8b5e5f06a94efc97a5652a13d95584 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Mon, 19 Sep 2022 08:47:02 +0800 Subject: [PATCH 06/16] Fix lint errors --- integration_tests/test_nft_transfer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index 1751606d1..636b12bb6 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -1,6 +1,6 @@ import json -from pathlib import Path import time +from pathlib import Path import pytest From b94beae9e4c0dc318f6614465a6aceb1f73e2814 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Mon, 19 Sep 2022 09:29:01 +0800 Subject: [PATCH 07/16] Exclude ibc-go from python linter --- .flake8 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.flake8 b/.flake8 index 7fcd9ea22..b1132de04 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,4 @@ [flake8] max-line-length = 88 extend-ignore = E203 -exclude = .git,__pycache__,./pystarport/pystarport/tendermint,./pystarport/pystarport/proto_python,./third_party/cosmos-sdk +exclude = .git,__pycache__,./pystarport/pystarport/tendermint,./pystarport/pystarport/proto_python,./third_party/cosmos-sdk,.third_party/ibc-go From 79e13c7df08494a835b1295d4292cbce80a732a7 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Mon, 19 Sep 2022 09:29:20 +0800 Subject: [PATCH 08/16] Exclude ibc-go from python linter --- .flake8 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.flake8 b/.flake8 index b1132de04..8dca2acd2 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,4 @@ [flake8] max-line-length = 88 extend-ignore = E203 -exclude = .git,__pycache__,./pystarport/pystarport/tendermint,./pystarport/pystarport/proto_python,./third_party/cosmos-sdk,.third_party/ibc-go +exclude = .git,__pycache__,./pystarport/pystarport/tendermint,./pystarport/pystarport/proto_python,./third_party/cosmos-sdk,./third_party/ibc-go From f598631b4b71636e4fb354a8a49683570b4d165b Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Mon, 19 Sep 2022 16:34:30 +0800 Subject: [PATCH 09/16] PR comments --- .../configs/nft_transfer.jsonnet | 11 +------ integration_tests/ibc_utils.py | 33 ------------------- integration_tests/test_nft_transfer.py | 8 ++--- x/nft-transfer/keeper/packet.go | 3 ++ x/nft-transfer/types/keys.go | 2 +- x/nft/client/cli/query.go | 24 +++++++------- x/nft/types/validation.go | 14 ++++++++ 7 files changed, 35 insertions(+), 60 deletions(-) diff --git a/integration_tests/configs/nft_transfer.jsonnet b/integration_tests/configs/nft_transfer.jsonnet index 6429aa9dd..ee5643992 100644 --- a/integration_tests/configs/nft_transfer.jsonnet +++ b/integration_tests/configs/nft_transfer.jsonnet @@ -9,16 +9,7 @@ local default = { coins: '200cro', }, ], - genesis: { - app_state: { - transfer: { - params: { - receive_enabled: true, - send_enabled: true, - }, - }, - }, - }, + genesis: {}, }; local validator = { coins: '10cro', diff --git a/integration_tests/ibc_utils.py b/integration_tests/ibc_utils.py index 977dce165..e15aca70d 100644 --- a/integration_tests/ibc_utils.py +++ b/integration_tests/ibc_utils.py @@ -58,36 +58,3 @@ def start_and_wait_relayer(cluster, port="transfer", init_relayer=True): query = relayer + ["query", "channels", "--chain"] return search_target(query, "channel", chains) - - -# def start_and_wait_relayer_nft_transfer(cluster, init_relayer=True): -# relayer = wait_relayer_ready(cluster) -# chains = ["ibc-0", "ibc-1"] -# if init_relayer: -# # create connection and channel -# subprocess.run( -# relayer -# + [ -# "create", -# "channel", -# "--a-port", -# "nft-transfer", -# "--b-port", -# "nft-transfer", -# "--a-chain", -# chains[0], -# "--b-chain", -# chains[1], -# "--new-client-connection", -# "--channel-version", -# "ics721-1", -# "--yes", -# ], -# check=True, -# ) - -# # start relaying -# cluster[chains[0]].supervisor.startProcess("relayer-demo") - -# query = relayer + ["query", "channels", "--chain"] -# return search_target(query, "channel", chains) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index 636b12bb6..1a4e785b1 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -21,7 +21,7 @@ def cluster(worker_index, pytestconfig, tmp_path_factory): def test_nft_transfer(cluster): - src_channel, dst_channel = start_and_wait_relayer(cluster, "nft-transfer") + src_channel, dst_channel = start_and_wait_relayer(cluster, "nft") cli_src = cluster["ibc-0"].cosmos_cli() cli_dst = cluster["ibc-1"].cosmos_cli() @@ -87,7 +87,7 @@ def test_nft_transfer(cluster): "tx", "nft-transfer", "transfer", - "nft-transfer", + "nft", src_channel, addr_dst, denomid, @@ -112,7 +112,7 @@ def test_nft_transfer(cluster): "query", "nft-transfer", "class-hash", - "nft-transfer/" + dst_channel + "/" + denomid, + "nft/" + dst_channel + "/" + denomid, home=cli_dst.data_dir, node=cli_dst.node_rpc, output="json", @@ -159,7 +159,7 @@ def test_nft_transfer(cluster): "tx", "nft-transfer", "transfer", - "nft-transfer", + "nft", dst_channel, addr_src, dst_denom_id, diff --git a/x/nft-transfer/keeper/packet.go b/x/nft-transfer/keeper/packet.go index 5b397a572..169aec7f0 100644 --- a/x/nft-transfer/keeper/packet.go +++ b/x/nft-transfer/keeper/packet.go @@ -29,12 +29,14 @@ func (k Keeper) refundPacketToken(ctx sdk.Context, packet channeltypes.Packet, d escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel()) if isAwayFromOrigin { + // unescrow tokens back to the sender for _, tokenID := range data.TokenIds { if err := k.nftKeeper.TransferOwner(ctx, voucherClassID, tokenID, escrowAddress, sender); err != nil { return err } } } else { + // we are sink chain, mint voucher back to sender for i, tokenID := range data.TokenIds { if err := k.nftKeeper.MintNFT(ctx, voucherClassID, tokenID, "", data.TokenUris[i], "", escrowAddress, sender); err != nil { return err @@ -104,6 +106,7 @@ func (k Keeper) createOutgoingPacket(ctx sdk.Context, return channeltypes.Packet{}, err } } else { + // we are sink chain, burn the voucher if err := k.nftKeeper.BurnNFTUnverified(ctx, classID, tokenID, sender); err != nil { return channeltypes.Packet{}, err } diff --git a/x/nft-transfer/types/keys.go b/x/nft-transfer/types/keys.go index 4621dd746..c4395f577 100644 --- a/x/nft-transfer/types/keys.go +++ b/x/nft-transfer/types/keys.go @@ -25,7 +25,7 @@ const ( Version = "ics721-1" // PortID is the default port id that nft-transfer module binds to - PortID = "nft-transfer" + PortID = "nft" // ClassPrefix is the prefix used for internal SDK NFT representation. ClassPrefix = "ibc" diff --git a/x/nft/client/cli/query.go b/x/nft/client/cli/query.go index 6bfdd84e4..1da7a9344 100644 --- a/x/nft/client/cli/query.go +++ b/x/nft/client/cli/query.go @@ -64,9 +64,9 @@ func GetCmdQuerySupply() *cobra.Command { } // nolint: govet - // if err := types.ValidateDenomID(args[0]); err != nil { - // return err - // } + if err := types.ValidateDenomIDWithIBC(args[0]); err != nil { + return err + } queryClient := types.NewQueryClient(clientCtx) resp, err := queryClient.Supply(context.Background(), &types.QuerySupplyRequest{ @@ -143,9 +143,9 @@ func GetCmdQueryCollection() *cobra.Command { } // nolint: govet - // if err := types.ValidateDenomID(args[0]); err != nil { - // return err - // } + if err := types.ValidateDenomIDWithIBC(args[0]); err != nil { + return err + } pageReq, err := client.ReadPageRequest(cmd.Flags()) if err != nil { return err @@ -213,9 +213,9 @@ func GetCmdQueryDenom() *cobra.Command { } // nolint: govet - // if err := types.ValidateDenomID(args[0]); err != nil { - // return err - // } + if err := types.ValidateDenomIDWithIBC(args[0]); err != nil { + return err + } queryClient := types.NewQueryClient(clientCtx) resp, err := queryClient.Denom( @@ -281,9 +281,9 @@ func GetCmdQueryNFT() *cobra.Command { } // nolint: govet - // if err := types.ValidateDenomID(args[0]); err != nil { - // return err - // } + if err := types.ValidateDenomIDWithIBC(args[0]); err != nil { + return err + } // nolint: govet if err := types.ValidateTokenID(args[1]); err != nil { diff --git a/x/nft/types/validation.go b/x/nft/types/validation.go index d9558c8f7..16c101d94 100644 --- a/x/nft/types/validation.go +++ b/x/nft/types/validation.go @@ -13,6 +13,7 @@ const ( DoNotModify = "[do-not-modify]" MinDenomLen = 3 MaxDenomLen = 64 + IBCDenomLen = 68 MaxTokenURILen = 256 ) @@ -35,6 +36,19 @@ func ValidateDenomID(denomID string) error { return nil } +// ValidateDenomIDWithIBC verifies whether the parameters are legal and considers IBC denom IDs when checking +func ValidateDenomIDWithIBC(denomID string) error { + if strings.HasPrefix(denomID, "ibc/") { + if len(denomID) != IBCDenomLen { + return sdkerrors.Wrapf(ErrInvalidDenom, "the length of ibc denom(%s) only accepts value [%d]", denomID, IBCDenomLen) + } + + return nil + } + + return ValidateDenomID(denomID) +} + // ValidateDenomName verifies whether the parameters are legal func ValidateDenomName(denomName string) error { denomName = strings.TrimSpace(denomName) From 3afb68410587a6e937109af44e6fbf5b2adcc88c Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Wed, 21 Sep 2022 14:26:07 +0800 Subject: [PATCH 10/16] add integration test for testing multi hop nft transfer --- .../configs/nft_transfer.jsonnet | 3 + integration_tests/ibc_utils.py | 12 +- integration_tests/test_nft_transfer.py | 264 +++++++++++++++++- 3 files changed, 264 insertions(+), 15 deletions(-) diff --git a/integration_tests/configs/nft_transfer.jsonnet b/integration_tests/configs/nft_transfer.jsonnet index ee5643992..ca40b8287 100644 --- a/integration_tests/configs/nft_transfer.jsonnet +++ b/integration_tests/configs/nft_transfer.jsonnet @@ -23,5 +23,8 @@ local validator = { 'ibc-1': default { validators: [validator { base_port: port } for port in [26750, 26760]], }, + 'ibc-2': default { + validators: [validator { base_port: port } for port in [26850, 26860]], + }, relayer: {}, } diff --git a/integration_tests/ibc_utils.py b/integration_tests/ibc_utils.py index e15aca70d..d9c6b48a9 100644 --- a/integration_tests/ibc_utils.py +++ b/integration_tests/ibc_utils.py @@ -29,9 +29,14 @@ def search_target(query, key, chains): return results -def start_and_wait_relayer(cluster, port="transfer", init_relayer=True): +def start_and_wait_relayer( + cluster, + port="transfer", + chains=["ibc-0", "ibc-1"], + start_relaying=True, + init_relayer=True, +): relayer = wait_relayer_ready(cluster) - chains = ["ibc-0", "ibc-1"] if init_relayer: # create connection and channel subprocess.run( @@ -54,7 +59,8 @@ def start_and_wait_relayer(cluster, port="transfer", init_relayer=True): ) # start relaying - cluster[chains[0]].supervisor.startProcess("relayer-demo") + if start_relaying: + cluster[chains[0]].supervisor.startProcess("relayer-demo") query = relayer + ["query", "channels", "--chain"] return search_target(query, "channel", chains) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index 1a4e785b1..3b670a351 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -1,4 +1,5 @@ import json +from re import A import time from pathlib import Path @@ -20,14 +21,28 @@ def cluster(worker_index, pytestconfig, tmp_path_factory): ) +# This function tests nft transfer from source chain -> mid chain -> destination chain and all the way back to source +# chain following the same path def test_nft_transfer(cluster): - src_channel, dst_channel = start_and_wait_relayer(cluster, "nft") + src_channel, mid_src_channel = start_and_wait_relayer(cluster, "nft") + mid_dst_channel, dst_channel = start_and_wait_relayer( + cluster, "nft", ["ibc-1", "ibc-2"], False + ) + + assert src_channel == "channel-0", src_channel + assert mid_src_channel == "channel-0", mid_src_channel + # assert mid_dst_channel == "channel-1", mid_dst_channel + assert dst_channel == "channel-0", dst_channel + + mid_dst_channel = "channel-1" cli_src = cluster["ibc-0"].cosmos_cli() - cli_dst = cluster["ibc-1"].cosmos_cli() + cli_mid = cluster["ibc-1"].cosmos_cli() + cli_dst = cluster["ibc-2"].cosmos_cli() addr_src = cluster["ibc-0"].address("relayer") - addr_dst = cluster["ibc-1"].address("relayer") + addr_mid = cluster["ibc-1"].address("relayer") + addr_dst = cluster["ibc-2"].address("relayer") denomid = "testdenomid" denomname = "testdenomname" @@ -81,7 +96,7 @@ def test_nft_transfer(cluster): == "/chainmain.nft.v1.MsgMintNFT" ) - # transfer nft on destination chain + # transfer nft on mid-destination chain rsp = json.loads( cli_src.raw( "tx", @@ -89,7 +104,7 @@ def test_nft_transfer(cluster): "transfer", "nft", src_channel, - addr_dst, + addr_mid, denomid, tokenid, "-y", @@ -106,20 +121,157 @@ def test_nft_transfer(cluster): # FIXME more stable way to wait for relaying time.sleep(20) + # get class hash on mid chain + mid_class_hash = json.loads( + cli_mid.raw( + "query", + "nft-transfer", + "class-hash", + "nft/" + mid_src_channel + "/" + denomid, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + )["hash"] + + # get class trace on mid chain + mid_class_trace = json.loads( + cli_mid.raw( + "query", + "nft-transfer", + "class-trace", + mid_class_hash, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + )["class_trace"] + + assert mid_class_trace["base_class_id"] == denomid, mid_class_trace + assert mid_class_trace["path"] == "nft/" + mid_src_channel, mid_class_trace + + mid_denom_id = "ibc/" + mid_class_hash + + # query denom on mid chain + rsp = json.loads( + cli_mid.raw( + "query", + "nft", + "denom", + mid_denom_id, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == denomuri, rsp["uri"] + + # query nft on mid chain + rsp = json.loads( + cli_mid.raw( + "query", + "nft", + "token", + mid_denom_id, + tokenid, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == addr_mid, rsp + + # query nft on source chain's escrow address + src_escrow_address = str( + cli_src.raw( + "query", + "nft-transfer", + "escrow-address", + "nft", + src_channel, + home=cli_src.data_dir, + node=cli_src.node_rpc, + output="json", + ), + "UTF-8", + ).strip() + + rsp = json.loads( + cli_src.raw( + "query", + "nft", + "token", + denomid, + tokenid, + home=cli_src.data_dir, + node=cli_src.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == src_escrow_address, rsp + + # transfer nft to destination chain + rsp = json.loads( + cli_mid.raw( + "tx", + "nft-transfer", + "transfer", + "nft", + mid_dst_channel, + addr_dst, + mid_denom_id, + tokenid, + "-y", + home=cli_mid.data_dir, + from_=addr_mid, + keyring_backend="test", + chain_id=cli_mid.chain_id, + node=cli_mid.node_rpc, + ) + ) + + assert rsp["code"] == 0, rsp["raw_log"] + + # FIXME more stable way to wait for relaying + time.sleep(20) + # get class hash on destination chain - class_hash = json.loads( + dst_class_hash = json.loads( cli_dst.raw( "query", "nft-transfer", "class-hash", - "nft/" + dst_channel + "/" + denomid, + "nft/" + dst_channel + "/nft/" + mid_src_channel + "/" + denomid, home=cli_dst.data_dir, node=cli_dst.node_rpc, output="json", ) )["hash"] - dst_denom_id = "ibc/" + class_hash + # get class trace on destination chain + dst_class_trace = json.loads( + cli_dst.raw( + "query", + "nft-transfer", + "class-trace", + dst_class_hash, + home=cli_dst.data_dir, + node=cli_dst.node_rpc, + output="json", + ) + )["class_trace"] + + assert dst_class_trace["base_class_id"] == denomid, dst_class_trace + assert ( + dst_class_trace["path"] == "nft/" + dst_channel + "/nft/" + mid_src_channel + ), dst_class_trace + + dst_denom_id = "ibc/" + dst_class_hash # query denom on destination chain rsp = json.loads( @@ -142,7 +294,7 @@ def test_nft_transfer(cluster): "query", "nft", "token", - "ibc/" + class_hash, + dst_denom_id, tokenid, home=cli_dst.data_dir, node=cli_dst.node_rpc, @@ -153,7 +305,38 @@ def test_nft_transfer(cluster): assert rsp["uri"] == tokenuri, rsp assert rsp["owner"] == addr_dst, rsp - # transfer nft back to source chain + # quert nft on mid chain's escrow address + mid_escrow_address = str( + cli_mid.raw( + "query", + "nft-transfer", + "escrow-address", + "nft", + mid_dst_channel, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ), + "UTF-8", + ).strip() + + rsp = json.loads( + cli_mid.raw( + "query", + "nft", + "token", + mid_denom_id, + tokenid, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == mid_escrow_address, rsp + + # transfer nft back to mid chain rsp = json.loads( cli_dst.raw( "tx", @@ -161,7 +344,7 @@ def test_nft_transfer(cluster): "transfer", "nft", dst_channel, - addr_src, + addr_mid, dst_denom_id, tokenid, "-y", @@ -178,7 +361,7 @@ def test_nft_transfer(cluster): # FIXME more stable way to wait for relaying time.sleep(20) - # nft should be burnt on destination chain + # TODO: nft should be burnt on destination chain rsp = json.loads( cli_dst.raw( "query", @@ -193,6 +376,63 @@ def test_nft_transfer(cluster): assert len(rsp["nfts"]) == 0, rsp + # TODO: query nft on mid chain + rsp = json.loads( + cli_mid.raw( + "query", + "nft", + "token", + mid_denom_id, + tokenid, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == addr_mid, rsp + + # transfer nft back to source chain + rsp = json.loads( + cli_mid.raw( + "tx", + "nft-transfer", + "transfer", + "nft", + mid_src_channel, + addr_src, + mid_denom_id, + tokenid, + "-y", + home=cli_mid.data_dir, + from_=addr_mid, + keyring_backend="test", + chain_id=cli_mid.chain_id, + node=cli_mid.node_rpc, + ) + ) + + assert rsp["code"] == 0, rsp["raw_log"] + + # FIXME more stable way to wait for relaying + time.sleep(20) + + # nft should be burnt on mid chain + rsp = json.loads( + cli_mid.raw( + "query", + "nft", + "collection", + mid_denom_id, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + )["collection"] + + assert len(rsp["nfts"]) == 0, rsp + # query nft on source chain rsp = json.loads( cli_src.raw( From b02f1a0d7135b519977be4596031d8b998d88a7c Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Thu, 22 Sep 2022 12:36:10 +0800 Subject: [PATCH 11/16] PR comments --- proto/nft/v1/nft.proto | 3 ++- proto/nft_transfer/v1/packet.proto | 4 ++-- x/nft-transfer/client/cli/tx.go | 1 + x/nft-transfer/types/packet.go | 1 + x/nft-transfer/types/packet.pb.go | 4 ++-- 5 files changed, 8 insertions(+), 5 deletions(-) diff --git a/proto/nft/v1/nft.proto b/proto/nft/v1/nft.proto index 07eefe49e..40cd27eba 100644 --- a/proto/nft/v1/nft.proto +++ b/proto/nft/v1/nft.proto @@ -27,7 +27,8 @@ message Denom { string name = 2; string schema = 3; string creator = 4; - string uri = 5; + string uri = 5; // This was added because Cosmos SDK's native NFT module has uri as a parameter for class which is + // needed for nft transfers } // IDCollection defines a type of collection with specified ID diff --git a/proto/nft_transfer/v1/packet.proto b/proto/nft_transfer/v1/packet.proto index b880b03ae..a85fd3702 100644 --- a/proto/nft_transfer/v1/packet.proto +++ b/proto/nft_transfer/v1/packet.proto @@ -11,9 +11,9 @@ message NonFungibleTokenPacketData { string class_id = 1; // the class_uri of tokens to be transferred string class_uri = 2; - // the non fungible tokens to be transferred + // the non fungible tokens to be transferred (count should be equal to token_uris) repeated string token_ids = 3; - // the non fungible tokens's uri to be transferred + // the non fungible tokens's uri to be transferred (count should be equal to token ids) repeated string token_uris = 4; // the sender address string sender = 5; diff --git a/x/nft-transfer/client/cli/tx.go b/x/nft-transfer/client/cli/tx.go index 9cd0f5559..c55228811 100644 --- a/x/nft-transfer/client/cli/tx.go +++ b/x/nft-transfer/client/cli/tx.go @@ -93,6 +93,7 @@ corresponding to the counterparty channel. Any timeout set to 0 is disabled.`), now := time.Now().UnixNano() consensusStateTimestamp := consensusState.GetTimestamp() if now > 0 { + // nolint: gosec now := uint64(now) if now > consensusStateTimestamp { timeoutTimestamp = now + timeoutTimestamp diff --git a/x/nft-transfer/types/packet.go b/x/nft-transfer/types/packet.go index e463d3761..440d95d7a 100644 --- a/x/nft-transfer/types/packet.go +++ b/x/nft-transfer/types/packet.go @@ -18,6 +18,7 @@ var ( // relative to the current block timestamp of the counterparty chain provided by the client // state. The timeout is disabled when set to 0. The default is currently set to a 10 minute // timeout. + // nolint: gosec DefaultRelativePacketTimeoutTimestamp = uint64((time.Duration(10) * time.Minute).Nanoseconds()) ) diff --git a/x/nft-transfer/types/packet.pb.go b/x/nft-transfer/types/packet.pb.go index 4ff14ac0f..fcc603b35 100644 --- a/x/nft-transfer/types/packet.pb.go +++ b/x/nft-transfer/types/packet.pb.go @@ -30,9 +30,9 @@ type NonFungibleTokenPacketData struct { ClassId string `protobuf:"bytes,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"` // the class_uri of tokens to be transferred ClassUri string `protobuf:"bytes,2,opt,name=class_uri,json=classUri,proto3" json:"class_uri,omitempty"` - // the non fungible tokens to be transferred + // the non fungible tokens to be transferred (count should be equal to token_uris) TokenIds []string `protobuf:"bytes,3,rep,name=token_ids,json=tokenIds,proto3" json:"token_ids,omitempty"` - // the non fungible tokens's uri to be transferred + // the non fungible tokens's uri to be transferred (count should be equal to token ids) TokenUris []string `protobuf:"bytes,4,rep,name=token_uris,json=tokenUris,proto3" json:"token_uris,omitempty"` // the sender address Sender string `protobuf:"bytes,5,opt,name=sender,proto3" json:"sender,omitempty"` From 62ea7fdaf665eec62ee963d56107793851ee609b Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Thu, 22 Sep 2022 13:05:49 +0800 Subject: [PATCH 12/16] Add nft-transfer packet timeout test --- integration_tests/test_nft_transfer.py | 60 ++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index 3b670a351..16daa0504 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -449,3 +449,63 @@ def test_nft_transfer(cluster): assert rsp["uri"] == tokenuri, rsp assert rsp["owner"] == addr_src, rsp + + # Test packet timeout + + # transfer nft on mid chain (with very less timeout so that the packet times out) + rsp = json.loads( + cli_src.raw( + "tx", + "nft-transfer", + "transfer", + "nft", + src_channel, + addr_mid, + denomid, + tokenid, + "-y", + packet_timeout_height="0-1", + home=cli_src.data_dir, + from_=addr_src, + keyring_backend="test", + chain_id=cli_src.chain_id, + node=cli_src.node_rpc, + ) + ) + + assert rsp["code"] == 0, rsp["raw_log"] + + # FIXME more stable way to wait for relaying + time.sleep(20) + + # nft should be not be present on mid chain + rsp = json.loads( + cli_mid.raw( + "query", + "nft", + "collection", + mid_denom_id, + home=cli_mid.data_dir, + node=cli_mid.node_rpc, + output="json", + ) + )["collection"] + + assert len(rsp["nfts"]) == 0, rsp + + # query nft on source chain (as the transfer should time out) + rsp = json.loads( + cli_src.raw( + "query", + "nft", + "token", + denomid, + tokenid, + home=cli_src.data_dir, + node=cli_src.node_rpc, + output="json", + ) + ) + + assert rsp["uri"] == tokenuri, rsp + assert rsp["owner"] == addr_src, rsp From a501a1a40b358286bf36da72e36fb0283679ddd6 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Thu, 22 Sep 2022 13:08:53 +0800 Subject: [PATCH 13/16] Fix lint errors --- integration_tests/test_nft_transfer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index 16daa0504..0f4756f4c 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -1,5 +1,4 @@ import json -from re import A import time from pathlib import Path @@ -21,7 +20,8 @@ def cluster(worker_index, pytestconfig, tmp_path_factory): ) -# This function tests nft transfer from source chain -> mid chain -> destination chain and all the way back to source +# This function tests nft transfer from source chain -> mid chain -> destination chain +# and all the way back to source # chain following the same path def test_nft_transfer(cluster): src_channel, mid_src_channel = start_and_wait_relayer(cluster, "nft") From 4d3dcf03e59aa98b950743c3f60ecfb9c1bfa215 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Thu, 22 Sep 2022 13:15:04 +0800 Subject: [PATCH 14/16] Emit event on timeout --- x/nft-transfer/ibc_module.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/x/nft-transfer/ibc_module.go b/x/nft-transfer/ibc_module.go index 318ad0346..53364a092 100644 --- a/x/nft-transfer/ibc_module.go +++ b/x/nft-transfer/ibc_module.go @@ -277,15 +277,15 @@ func (im IBCModule) OnTimeoutPacket( return err } - // ctx.EventManager().EmitEvent( - // sdk.NewEvent( - // types.EventTypeTimeout, - // sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), - // sdk.NewAttribute(types.AttributeKeyRefundReceiver, data.Sender), - // sdk.NewAttribute(types.AttributeKeyRefundDenom, data.Denom), - // sdk.NewAttribute(types.AttributeKeyRefundAmount, data.Amount), - // ), - // ) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeTimeout, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReceiver, data.Sender), + sdk.NewAttribute(types.AttributeKeyClassID, data.ClassId), + sdk.NewAttribute(types.AttributeKeyTokenIDs, strings.Join(data.TokenIds, ",")), + ), + ) return nil } From 94702e8e421d02973048a9c2b0f169cf1b99bf30 Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Thu, 22 Sep 2022 15:14:37 +0800 Subject: [PATCH 15/16] PR comments --- integration_tests/test_nft_transfer.py | 4 ++-- x/nft-transfer/ibc_module.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/integration_tests/test_nft_transfer.py b/integration_tests/test_nft_transfer.py index 0f4756f4c..2408b9b9b 100644 --- a/integration_tests/test_nft_transfer.py +++ b/integration_tests/test_nft_transfer.py @@ -361,7 +361,7 @@ def test_nft_transfer(cluster): # FIXME more stable way to wait for relaying time.sleep(20) - # TODO: nft should be burnt on destination chain + # nft should be burnt on destination chain rsp = json.loads( cli_dst.raw( "query", @@ -376,7 +376,7 @@ def test_nft_transfer(cluster): assert len(rsp["nfts"]) == 0, rsp - # TODO: query nft on mid chain + # query nft on mid chain rsp = json.loads( cli_mid.raw( "query", diff --git a/x/nft-transfer/ibc_module.go b/x/nft-transfer/ibc_module.go index 53364a092..5b277993e 100644 --- a/x/nft-transfer/ibc_module.go +++ b/x/nft-transfer/ibc_module.go @@ -32,7 +32,7 @@ func NewIBCModule(k keeper.Keeper) IBCModule { } // ValidateTransferChannelParams does validation of a newly created nft-transfer channel. A nft-transfer -// channel must be UNORDERED, use the correct port (by default 'nft-transfer'), and use the current +// channel must be UNORDERED, use the correct port (by default 'nft'), and use the current // supported version. Only 2^32 channels are allowed to be created. func ValidateTransferChannelParams( ctx sdk.Context, From bb0e6e5a5a2b9ae7860428ce0f9435cfb33790fd Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Thu, 22 Sep 2022 19:48:02 +0800 Subject: [PATCH 16/16] PR comments --- x/nft-transfer/types/trace.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/x/nft-transfer/types/trace.go b/x/nft-transfer/types/trace.go index 09bcfa037..0606c59d8 100644 --- a/x/nft-transfer/types/trace.go +++ b/x/nft-transfer/types/trace.go @@ -48,10 +48,7 @@ func RemoveClassPrefix(portID, channelID, classID string) string { // The longer the fullClassPath, the farther it is from the origin chain func IsAwayFromOrigin(sourcePort, sourceChannel, fullClassPath string) bool { prefixClassID := GetClassPrefix(sourcePort, sourceChannel) - if !strings.HasPrefix(fullClassPath, prefixClassID) { - return true - } - return fullClassPath[:len(prefixClassID)] != prefixClassID + return !strings.HasPrefix(fullClassPath, prefixClassID) } // ParseClassTrace parses a string with the ibc prefix (class trace) and the base classID