From 73b14c137376384e4c3a1307dd8c9a9a7408a91f Mon Sep 17 00:00:00 2001 From: larry-aptos <112209412+larry-aptos@users.noreply.github.com> Date: Fri, 25 Aug 2023 11:38:16 -0700 Subject: [PATCH] The processors are under aptos-indexer-processors now! (#9657) Co-authored-by: Daniel Porteous --- .gitmodules | 3 + Cargo.lock | 33 - Cargo.toml | 2 - .../indexer-grpc-parser/Cargo.toml | 42 - .../indexer-grpc-parser/Dockerfile | 6 - .../indexer-grpc-parser/README.md | 4 +- .../aptos-indexer-processors | 1 + .../indexer-grpc-parser/diesel.toml | 5 - .../indexer-grpc-parser/migrations/.keep | 0 .../down.sql | 6 - .../up.sql | 36 - .../2022-08-08-043603_core_tables/down.sql | 11 - .../2022-08-08-043603_core_tables/up.sql | 305 ---- .../2022-09-04-194128_add_token_data/down.sql | 5 - .../2022-09-04-194128_add_token_data/up.sql | 93 - .../down.sql | 4 - .../up.sql | 67 - .../2022-09-22-185845_token_offers/down.sql | 10 - .../2022-09-22-185845_token_offers/up.sql | 80 - .../down.sql | 5 - .../up.sql | 6 - .../down.sql | 2 - .../2022-10-02-011020_ans_lookup_table/up.sql | 29 - .../down.sql | 24 - .../2022-10-04-073529_add_coin_tables/up.sql | 120 -- .../down.sql | 5 - .../up.sql | 48 - .../down.sql | 12 - .../2022-10-07-231825_add_coin_supply/up.sql | 33 - .../down.sql | 2 - .../up.sql | 7 - .../2022-10-21-055518_stake_to_voter/down.sql | 2 - .../2022-10-21-055518_stake_to_voter/up.sql | 11 - .../2022-10-30-053525_add_vote_data/down.sql | 8 - .../2022-10-30-053525_add_vote_data/up.sql | 22 - .../down.sql | 7 - .../2022-12-29-222902_curr_table_items/up.sql | 35 - .../down.sql | 6 - .../2023-02-15-070116_stake_delegation/up.sql | 33 - .../2023-03-08-205402_nft_points/down.sql | 5 - .../2023-03-08-205402_nft_points/up.sql | 13 - .../down.sql | 6 - .../2023-04-02-032121_delegator_pools/up.sql | 9 - .../down.sql | 6 - .../2023-04-14-033932_optimize_queries/up.sql | 38 - .../down.sql | 14 - .../up.sql | 29 - .../down.sql | 36 - .../2023-04-28-053048_object_token_v2/up.sql | 170 -- .../2023-05-17-010107_activities_v2/down.sql | 8 - .../2023-05-17-010107_activities_v2/up.sql | 25 - .../down.sql | 24 - .../up.sql | 46 - .../down.sql | 7 - .../up.sql | 58 - .../down.sql | 2 - .../up.sql | 29 - .../down.sql | 10 - .../up.sql | 20 - .../indexer-grpc-parser/parser.yaml | 7 - .../indexer-grpc-parser/src/lib.rs | 18 - .../indexer-grpc-parser/src/main.rs | 64 - .../coin_models/account_transactions.rs | 144 -- .../src/models/coin_models/coin_activities.rs | 293 --- .../src/models/coin_models/coin_balances.rs | 96 - .../src/models/coin_models/coin_infos.rs | 95 - .../src/models/coin_models/coin_supply.rs | 99 -- .../src/models/coin_models/coin_utils.rs | 287 --- .../src/models/coin_models/mod.rs | 10 - .../coin_models/v2_fungible_asset_utils.rs | 282 --- .../block_metadata_transactions.rs | 83 - .../src/models/default_models/events.rs | 85 - .../src/models/default_models/mod.rs | 13 - .../src/models/default_models/move_modules.rs | 120 -- .../models/default_models/move_resources.rs | 134 -- .../src/models/default_models/move_tables.rs | 140 -- .../src/models/default_models/signatures.rs | 276 --- .../src/models/default_models/transactions.rs | 444 ----- .../default_models/user_transactions.rs | 131 -- .../src/models/default_models/v2_objects.rs | 205 --- .../default_models/write_set_changes.rs | 227 --- .../src/models/ledger_info.rs | 22 - .../indexer-grpc-parser/src/models/mod.rs | 10 - .../src/models/processor_status.rs | 35 - .../src/models/property_map.rs | 104 -- .../stake_models/delegator_activities.rs | 95 - .../models/stake_models/delegator_balances.rs | 417 ----- .../models/stake_models/delegator_pools.rs | 225 --- .../src/models/stake_models/mod.rs | 9 - .../src/models/stake_models/proposal_votes.rs | 61 - .../src/models/stake_models/stake_utils.rs | 223 --- .../models/stake_models/staking_pool_voter.rs | 51 - .../src/models/token_models/ans_lookup.rs | 180 -- .../models/token_models/collection_datas.rs | 194 -- .../src/models/token_models/mod.rs | 18 - .../src/models/token_models/nft_points.rs | 90 - .../models/token_models/token_activities.rs | 197 --- .../src/models/token_models/token_claims.rs | 172 -- .../src/models/token_models/token_datas.rs | 167 -- .../models/token_models/token_ownerships.rs | 142 -- .../src/models/token_models/token_utils.rs | 481 ----- .../src/models/token_models/tokens.rs | 433 ----- .../src/models/token_models/v2_collections.rs | 310 ---- .../token_models/v2_token_activities.rs | 307 ---- .../src/models/token_models/v2_token_datas.rs | 264 --- .../models/token_models/v2_token_metadata.rs | 79 - .../token_models/v2_token_ownerships.rs | 630 ------- .../src/models/token_models/v2_token_utils.rs | 513 ------ .../src/processors/coin_processor.rs | 360 ---- .../src/processors/default_processor.rs | 618 ------- .../indexer-grpc-parser/src/processors/mod.rs | 32 - .../src/processors/processor_trait.rs | 85 - .../src/processors/stake_processor.rs | 418 ----- .../src/processors/token_processor.rs | 1565 ----------------- .../indexer-grpc-parser/src/schema.rs | 976 ---------- .../indexer-grpc-parser/src/utils/counters.rs | 106 -- .../indexer-grpc-parser/src/utils/database.rs | 138 -- .../indexer-grpc-parser/src/utils/mod.rs | 6 - .../indexer-grpc-parser/src/utils/util.rs | 580 ------ .../indexer-grpc-parser/src/worker.rs | 482 ----- 120 files changed, 7 insertions(+), 15726 deletions(-) create mode 100644 .gitmodules delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/Cargo.toml delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/Dockerfile create mode 160000 ecosystem/indexer-grpc/indexer-grpc-parser/aptos-indexer-processors delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/diesel.toml delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/.keep delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/down.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/up.sql delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/parser.yaml delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/lib.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/main.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/account_transactions.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_activities.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_balances.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_infos.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_supply.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_utils.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/v2_fungible_asset_utils.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/block_metadata_transactions.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/events.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_modules.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_resources.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_tables.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/signatures.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/transactions.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/user_transactions.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/v2_objects.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/write_set_changes.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/ledger_info.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/processor_status.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/property_map.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_activities.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_balances.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_pools.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/proposal_votes.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/stake_utils.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/staking_pool_voter.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/ans_lookup.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/collection_datas.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/nft_points.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_activities.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_claims.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_datas.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_ownerships.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_utils.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/tokens.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_collections.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_activities.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_datas.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_metadata.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_ownerships.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_utils.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/coin_processor.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/default_processor.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/processor_trait.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/stake_processor.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/token_processor.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/schema.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/counters.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/database.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/mod.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/util.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-parser/src/worker.rs diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000..5c6a1bb7fa284 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ecosystem/indexer-grpc/indexer-grpc-parser/aptos-indexer-processors"] + path = ecosystem/indexer-grpc/indexer-grpc-parser/aptos-indexer-processors + url = https://github.com/aptos-labs/aptos-indexer-processors.git diff --git a/Cargo.lock b/Cargo.lock index 5a99bfe715f6e..78eb1c2e1ed1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2034,39 +2034,6 @@ dependencies = [ "warp", ] -[[package]] -name = "aptos-indexer-grpc-parser" -version = "1.0.0" -dependencies = [ - "anyhow", - "aptos-indexer-grpc-server-framework", - "aptos-indexer-grpc-utils", - "aptos-metrics-core", - "aptos-moving-average", - "aptos-protos", - "aptos-runtimes", - "async-trait", - "base64 0.13.0", - "bcs 0.1.4", - "bigdecimal", - "chrono", - "clap 4.3.21", - "diesel", - "diesel_migrations", - "field_count", - "futures", - "hex", - "once_cell", - "prost", - "regex", - "serde 1.0.149", - "serde_json", - "sha2 0.9.9", - "tokio", - "tonic 0.8.3", - "tracing", -] - [[package]] name = "aptos-indexer-grpc-post-processor" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 2433bc0acf50b..e6786363930f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,7 +106,6 @@ members = [ "ecosystem/indexer-grpc/indexer-grpc-file-store", "ecosystem/indexer-grpc/indexer-grpc-fullnode", "ecosystem/indexer-grpc/indexer-grpc-integration-tests", - "ecosystem/indexer-grpc/indexer-grpc-parser", "ecosystem/indexer-grpc/indexer-grpc-post-processor", "ecosystem/indexer-grpc/indexer-grpc-server-framework", "ecosystem/indexer-grpc/indexer-grpc-utils", @@ -331,7 +330,6 @@ aptos-indexer-grpc-file-store = { path = "ecosystem/indexer-grpc/indexer-grpc-fi aptos-indexer-grpc-post-processor = { path = "ecosystem/indexer-grpc/indexer-grpc-post-processor" } aptos-indexer-grpc-fullnode = { path = "ecosystem/indexer-grpc/indexer-grpc-fullnode" } aptos-indexer-grpc-utils = { path = "ecosystem/indexer-grpc/indexer-grpc-utils" } -aptos-indexer-grpc-parser = { path = "ecosystem/indexer-grpc/indexer-grpc-parser" } aptos-indexer-grpc-server-framework = { path = "ecosystem/indexer-grpc/indexer-grpc-server-framework" } aptos-infallible = { path = "crates/aptos-infallible" } aptos-inspection-service = { path = "crates/aptos-inspection-service" } diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-parser/Cargo.toml deleted file mode 100644 index 0193ba0b6e57b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "aptos-indexer-grpc-parser" -description = "Indexer GRPC Parser, or processor, in Rust." -version = "1.0.0" - -# Workspace inherited keys -authors = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -repository = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -anyhow = { workspace = true } -aptos-indexer-grpc-server-framework = { workspace = true } -aptos-indexer-grpc-utils = { workspace = true } -aptos-metrics-core = { workspace = true } -aptos-moving-average = { workspace = true } -aptos-protos = { workspace = true } -aptos-runtimes = { workspace = true } -async-trait = { workspace = true } -base64 = { workspace = true } -bcs = { workspace = true } -bigdecimal = { workspace = true } -chrono = { workspace = true } -clap = { workspace = true } -diesel = { workspace = true } -diesel_migrations = { workspace = true } -field_count = { workspace = true } -futures = { workspace = true } -hex = { workspace = true } -once_cell = { workspace = true } -prost = { workspace = true } -regex = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -sha2 = { workspace = true } -tokio = { workspace = true } -tonic = { workspace = true } -tracing = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/Dockerfile b/ecosystem/indexer-grpc/indexer-grpc-parser/Dockerfile deleted file mode 100644 index 3fa9fb928ce3b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM aptoslabs/indexer-grpc:indexer-grpc-preview -ADD parser.yaml /configs/parser.yaml - -WORKDIR /usr/local/bin - -CMD ["aptos-indexer-grpc-parser", "-c", "/configs/parser.yaml"] \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/README.md b/ecosystem/indexer-grpc/indexer-grpc-parser/README.md index b3a06b8c13592..ba3aabc57b5a3 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/README.md +++ b/ecosystem/indexer-grpc/indexer-grpc-parser/README.md @@ -1,8 +1,10 @@ +# All processors are migrated to [processor repo](https://github.com/aptos-labs/aptos-indexer-processors) + # Indexer GRPC Parser Indexer GRPC parser is to indexer data processor that leverages the indexer grpc data. -* __Note: We'll launch an official endpoint soon; stay tuned!__ + ## Tutorial ### Prerequisite diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/aptos-indexer-processors b/ecosystem/indexer-grpc/indexer-grpc-parser/aptos-indexer-processors new file mode 160000 index 0000000000000..6fdc6f31fcc49 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-parser/aptos-indexer-processors @@ -0,0 +1 @@ +Subproject commit 6fdc6f31fcc494d4ba77ca3bdc8c9eb6b3fc1acb diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/diesel.toml b/ecosystem/indexer-grpc/indexer-grpc-parser/diesel.toml deleted file mode 100644 index 92267c829f202..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/diesel.toml +++ /dev/null @@ -1,5 +0,0 @@ -# For documentation on how to configure this file, -# see diesel.rs/guides/configuring-diesel-cli - -[print_schema] -file = "src/schema.rs" diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/.keep b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/.keep deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/down.sql deleted file mode 100644 index a9f526091194b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - -DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); -DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/up.sql deleted file mode 100644 index d68895b1a7b7d..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/00000000000000_diesel_initial_setup/up.sql +++ /dev/null @@ -1,36 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - - - - --- Sets up a trigger for the given table to automatically set a column called --- `updated_at` whenever the row is modified (unless `updated_at` was included --- in the modified columns) --- --- # Example --- --- ```sql --- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); --- --- SELECT diesel_manage_updated_at('users'); --- ``` -CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ -BEGIN - EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); -END; -$$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ -BEGIN - IF ( - NEW IS DISTINCT FROM OLD AND - NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at - ) THEN - NEW.updated_at := current_timestamp; - END IF; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/down.sql deleted file mode 100644 index fe8714894beeb..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/down.sql +++ /dev/null @@ -1,11 +0,0 @@ -DROP TABLE IF EXISTS block_metadata_transactions; -DROP TABLE IF EXISTS user_transactions; -DROP TABLE IF EXISTS signatures; -DROP TABLE IF EXISTS events; -DROP TABLE IF EXISTS write_set_changes; -DROP TABLE IF EXISTS move_modules; -DROP TABLE IF EXISTS move_resources; -DROP TABLE IF EXISTS table_items; -DROP TABLE IF EXISTS table_metadatas; -DROP TABLE IF EXISTS ledger_infos; -DROP TABLE IF EXISTS transactions; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/up.sql deleted file mode 100644 index 9bcdd9c7961af..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-08-08-043603_core_tables/up.sql +++ /dev/null @@ -1,305 +0,0 @@ -/* Genesis Tx (doesn't have an entry in user_transactions or block_metadata_transactions) Ex: - { - "type":"genesis_transaction", - "version":"0", - "hash":"0x12180a4bbccf48de4d1e23b498add134328669ffc7741c8d529c6b2e3629ac99", - "state_root_hash":"0xb50adef3662d77e528be9e1cb5637fe5b7afd13eea317b330799f0c559c918c1", - "event_root_hash":"0xcbdbb1b830d1016d45a828bb3171ea81826e8315f14140acfbd7886f49fbcb40", - "gas_used":"0", - "success":true, - "vm_status":"Executed successfully", - "accumulator_root_hash":"0x188ed588547d551e652f04fccd5434c2977d6cff9e7443eb8e7c3038408caad4", - "payload":{ - "type":"write_set_payload", - "write_set":{ - "type":"direct_write_set", - "changes":[], - "events":[] - } - }, - "events":[ - { - "key":"0x0400000000000000000000000000000000000000000000000000000000000000000000000a550c18", - "sequence_number":"0", - "type":"0x1::reconfiguration::NewEpochEvent", - "data":{ - "epoch":"1" - } - } - ] - } - */ -CREATE TABLE transactions ( - version BIGINT UNIQUE PRIMARY KEY NOT NULL, - block_height BIGINT NOT NULL, - hash VARCHAR(66) UNIQUE NOT NULL, - type VARCHAR(50) NOT NULL, - payload jsonb, - state_change_hash VARCHAR(66) NOT NULL, - event_root_hash VARCHAR(66) NOT NULL, - state_checkpoint_hash VARCHAR(66), - gas_used NUMERIC NOT NULL, - success BOOLEAN NOT NULL, - vm_status TEXT NOT NULL, - accumulator_root_hash VARCHAR(66) NOT NULL, - num_events BIGINT NOT NULL, - num_write_set_changes BIGINT NOT NULL, - -- Default time columns - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX txn_insat_index ON transactions (inserted_at); -/* Ex: - { - "type":"block_metadata_transaction", - "version":"69158", - "hash":"0x2b7c58ed8524d228f9d0543a82e2793d04e8871df322f976b0e7bb8c5ced4ff5", - "state_root_hash":"0x3ead9eb40582fbc7df5e02f72280931dc3e6f1aae45dc832966b4cd972dac4b8", - "event_root_hash":"0x2e481956dea9c59b6fc9f823fe5f4c45efce173e42c551c1fe073b5d76a65504", - "gas_used":"0", - "success":true, - "vm_status":"Executed successfully", - "accumulator_root_hash":"0xb0ad602f805eb20c398f0f29a3504a9ef38bcc52c9c451deb9ec4a2d18807b49", - "id":"0xeef99391a3fc681f16963a6c03415bc0b1b12b56c00429308fa8bf46ac9eddf0", - "round":"57600", - "previous_block_votes":[ - "0x992da26d46e6d515a070c7f6e52376a1e674e850cb4d116babc6f870da9c258", - "0xfb4d785594a018bd980b4a20556d120c53a3f50b1cff9d5aa2e26eee582a587", - "0x2b7bce01a6f55e4a863c4822b154021a25588250c762ee01169b6208d6169208", - "0x43a2c4cefc4725e710dadf423dd9142057208e640c623b27c6bba704380825ab", - "0x4c91f3949924e988144550ece1da1bd9335cbecdd1c3ce1893f80e55376d018f", - "0x61616c1208b6b3491496370e7783d48426c674bdd7d04ed1a96afe2e4d8a3930", - "0x66ccccae2058641f136b79792d4d884419437826342ba84dfbbf3e52d8b3fc7d", - "0x68f04222bd9f8846cda028ea5ba3846a806b04a47e1f1a4f0939f350d713b2eb", - "0x6bbf2564ea4a6968df450da786b40b3f56b533a7b700c681c31b3714fc30256b", - "0x735c0a1cb33689ecba65907ba05a485f98831ff610955a44abf0a986f2904612", - "0x784a9514644c8ab6235aaff425381f2ea2719315a51388bc1f1e1c5afa2daaa9", - "0x7a8cee78757dfe0cee3631208cc81f171d27ca6004c63ebae5814e1754a03c79", - "0x803160c3a2f8e025df5a6e1110163493293dc974cc8abd43d4c1896000f4a1ec", - "0xcece26ebddbadfcfbc541baddc989fa73b919b82915164bbf77ebd86c7edbc90", - "0xe7be8996cbdf7db0f64abd17aa0968074b32e4b0df6560328921470e09fd608b" - ], - "proposer":"0x68f04222bd9f8846cda028ea5ba3846a806b04a47e1f1a4f0939f350d713b2eb", - "timestamp":"1649395495746947" - } - */ -CREATE TABLE block_metadata_transactions ( - version BIGINT UNIQUE PRIMARY KEY NOT NULL, - block_height BIGINT UNIQUE NOT NULL, - id VARCHAR(66) NOT NULL, - round BIGINT NOT NULL, - epoch BIGINT NOT NULL, - previous_block_votes_bitvec jsonb NOT NULL, - proposer VARCHAR(66) NOT NULL, - failed_proposer_indices jsonb NOT NULL, - "timestamp" TIMESTAMP NOT NULL, - -- Default time columns - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - CONSTRAINT fk_versions FOREIGN KEY (version) REFERENCES transactions (version) -); -CREATE INDEX bmt_insat_index ON block_metadata_transactions (inserted_at); -/* Ex: - { - "type":"user_transaction", - "version":"691595", - "hash":"0xefd4c865e00c240da0c426a37ceeda10d9b030d0e8a4fb4fb7ff452ad63401fb", - "state_root_hash":"0xebfe1eb7aa5321e7a7d741d927487163c34c821eaab60646ae0efd02b286c97c", - "event_root_hash":"0x414343554d554c41544f525f504c414345484f4c4445525f4841534800000000", - "gas_used":"43", - "success":true, - "vm_status":"Executed successfully", - "accumulator_root_hash":"0x97bfd5949d32f6c9a9efad93411924bfda658a8829de384d531ee73c2f740971", - "sender":"0xdfd557c68c6c12b8c65908b3d3c7b95d34bb12ae6eae5a43ee30aa67a4c12494", - "sequence_number":"21386", - "max_gas_amount":"1000", - "gas_unit_price":"1", - "expiration_timestamp_secs":"1649713172", - "payload":{ - "type":"entry_function_payload", - "function":"0x1::aptos_coin::mint", - "type_arguments":[ - - ], - "arguments":[ - "0x45b44793724a5ecc6ad85fa60949d0824cfc7f61d6bd74490b13598379313142", - "20000" - ] - }, - "signature":{ - "type":"ed25519_signature", - "public_key":"0x14ff6646855dad4a2dab30db773cdd4b22d6f9e6813f3e50142adf4f3efcf9f8", - "signature":"0x70781112e78cc8b54b86805c016cef2478bccdef21b721542af0323276ab906c989172adffed5bf2f475f2ec3a5b284a0ac46a6aef0d79f0dbb6b85bfca0080a" - }, - "events":[ - { - "key":"0x040000000000000000000000000000000000000000000000000000000000000000000000fefefefe", - "sequence_number":"0", - "type":"0x1::Whatever::FakeEvent1", - "data":{ - "amazing":"1" - } - }, - { - "key":"0x040000000000000000000000000000000000000000000000000000000000000000000000fefefefe", - "sequence_number":"1", - "type":"0x1::Whatever::FakeEvent2", - "data":{ - "amazing":"2" - } - } - ], - "timestamp":"1649713141723410" - } - */ -CREATE TABLE user_transactions ( - version BIGINT UNIQUE PRIMARY KEY NOT NULL, - block_height BIGINT NOT NULL, - parent_signature_type VARCHAR(50) NOT NULL, - sender VARCHAR(66) NOT NULL, - sequence_number BIGINT NOT NULL, - max_gas_amount NUMERIC NOT NULL, - expiration_timestamp_secs TIMESTAMP NOT NULL, - gas_unit_price NUMERIC NOT NULL, - -- from UserTransaction - "timestamp" TIMESTAMP NOT NULL, - entry_function_id_str text NOT NULL, - -- Default time columns - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - CONSTRAINT fk_versions FOREIGN KEY (version) REFERENCES transactions (version), - UNIQUE (sender, sequence_number) -); -CREATE INDEX ut_sender_seq_index ON user_transactions (sender, sequence_number); -CREATE INDEX ut_insat_index ON user_transactions (inserted_at); --- tracks signatures for user transactions -CREATE TABLE signatures ( - transaction_version BIGINT NOT NULL, - multi_agent_index BIGINT NOT NULL, - multi_sig_index BIGINT NOT NULL, - transaction_block_height BIGINT NOT NULL, - signer VARCHAR(66) NOT NULL, - is_sender_primary BOOLEAN NOT NULL, - type VARCHAR(50) NOT NULL, - public_key VARCHAR(66) NOT NULL, - signature VARCHAR(200) NOT NULL, - threshold BIGINT NOT NULL, - public_key_indices jsonb NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - transaction_version, - multi_agent_index, - multi_sig_index, - is_sender_primary - ), - CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) -); -CREATE INDEX sig_insat_index ON signatures (inserted_at); -/** Ex: - { - "key": "0x0400000000000000000000000000000000000000000000000000000000000000000000000a550c18", - "sequence_number": "0", - "type": "0x1::reconfiguration::NewEpochEvent", - "data": { - "epoch": "1" - } - } - */ -CREATE TABLE events ( - sequence_number BIGINT NOT NULL, - creation_number BIGINT NOT NULL, - account_address VARCHAR(66) NOT NULL, - transaction_version BIGINT NOT NULL, - transaction_block_height BIGINT NOT NULL, - type TEXT NOT NULL, - data jsonb NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - account_address, - creation_number, - sequence_number - ), - CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) -); -CREATE INDEX ev_addr_type_index ON events (account_address); -CREATE INDEX ev_insat_index ON events (inserted_at); --- write set changes -CREATE TABLE write_set_changes ( - transaction_version BIGINT NOT NULL, - index BIGINT NOT NULL, - hash VARCHAR(66) NOT NULL, - transaction_block_height BIGINT NOT NULL, - type TEXT NOT NULL, - address VARCHAR(66) NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, index), - CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) -); -CREATE INDEX wsc_addr_type_ver_index ON write_set_changes (address, transaction_version DESC); -CREATE INDEX wsc_insat_index ON write_set_changes (inserted_at); --- move modules in write set changes -CREATE TABLE move_modules ( - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - transaction_block_height BIGINT NOT NULL, - name TEXT NOT NULL, - address VARCHAR(66) NOT NULL, - bytecode bytea, - friends jsonb, - exposed_functions jsonb, - structs jsonb, - is_deleted BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, write_set_change_index), - CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) -); -CREATE INDEX mm_addr_name_ver_index ON move_modules (address, name, transaction_version); -CREATE INDEX mm_insat_index ON move_modules (inserted_at); --- move resources in write set changes -CREATE TABLE move_resources ( - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - transaction_block_height BIGINT NOT NULL, - name TEXT NOT NULL, - address VARCHAR(66) NOT NULL, - type TEXT NOT NULL, - module TEXT NOT NULL, - generic_type_params jsonb, - data jsonb, - is_deleted BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, write_set_change_index), - CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) -); -CREATE INDEX mr_addr_mod_name_ver_index ON move_resources (address, module, name, transaction_version); -CREATE INDEX mr_insat_index ON move_resources (inserted_at); --- table items in write set changes -CREATE TABLE table_items ( - key text NOT NULL, - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - transaction_block_height BIGINT NOT NULL, - table_handle VARCHAR(66) NOT NULL, - decoded_key jsonb NOT NULL, - decoded_value jsonb, - is_deleted BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, write_set_change_index), - CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) -); -CREATE INDEX ti_hand_ver_key_index ON table_items (table_handle, transaction_version); -CREATE INDEX ti_insat_index ON table_items (inserted_at); --- table metadatas from table items -CREATE TABLE table_metadatas ( - handle VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - key_type text NOT NULL, - value_type text NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX tm_insat_index ON table_metadatas (inserted_at); -CREATE TABLE ledger_infos (chain_id BIGINT UNIQUE PRIMARY KEY NOT NULL); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/down.sql deleted file mode 100644 index 591c0a65680d1..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/down.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS tokens; -DROP TABLE IF EXISTS token_ownerships; -DROP TABLE IF EXISTS token_datas; -DROP TABLE IF EXISTS collection_datas; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/up.sql deleted file mode 100644 index 22183c5982da3..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-04-194128_add_token_data/up.sql +++ /dev/null @@ -1,93 +0,0 @@ --- Your SQL goes here --- tracks tokens per version -CREATE TABLE tokens ( - -- sha256 of creator + collection_name + name - token_data_id_hash VARCHAR(64) NOT NULL, - property_version NUMERIC NOT NULL, - transaction_version BIGINT NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - token_properties jsonb NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - token_data_id_hash, - property_version, - transaction_version - ) -); -CREATE INDEX token_crea_cn_name_index ON tokens (creator_address, collection_name, name); -CREATE INDEX token_insat_index ON tokens (inserted_at); --- tracks who owns tokens at certain version -CREATE TABLE token_ownerships ( - -- sha256 of creator + collection_name + name - token_data_id_hash VARCHAR(64) NOT NULL, - property_version NUMERIC NOT NULL, - transaction_version BIGINT NOT NULL, - table_handle VARCHAR(66) NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - owner_address VARCHAR(66), - amount NUMERIC NOT NULL, - table_type TEXT, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - token_data_id_hash, - property_version, - transaction_version, - table_handle - ) -); -CREATE INDEX to_owner_index ON token_ownerships (owner_address); -CREATE INDEX to_crea_cn_name_index ON token_ownerships (creator_address, collection_name, name); -CREATE INDEX to_insat_index ON token_ownerships (inserted_at); --- tracks token metadata -CREATE TABLE token_datas ( - -- sha256 of creator + collection_name + name - token_data_id_hash VARCHAR(64) NOT NULL, - transaction_version BIGINT NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - maximum NUMERIC NOT NULL, - supply NUMERIC NOT NULL, - largest_property_version NUMERIC NOT NULL, - metadata_uri VARCHAR(512) NOT NULL, - payee_address VARCHAR(66) NOT NULL, - royalty_points_numerator NUMERIC NOT NULL, - royalty_points_denominator NUMERIC NOT NULL, - maximum_mutable BOOLEAN NOT NULL, - uri_mutable BOOLEAN NOT NULL, - description_mutable BOOLEAN NOT NULL, - properties_mutable BOOLEAN NOT NULL, - royalty_mutable BOOLEAN NOT NULL, - default_properties jsonb NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (token_data_id_hash, transaction_version) -); -CREATE INDEX td_crea_cn_name_index ON token_datas (creator_address, collection_name, name); -CREATE INDEX td_insat_index ON token_datas (inserted_at); --- tracks collection metadata -CREATE TABLE collection_datas ( - -- sha256 of creator + collection_name - collection_data_id_hash VARCHAR(64) NOT NULL, - transaction_version BIGINT NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - description TEXT NOT NULL, - metadata_uri VARCHAR(512) NOT NULL, - supply NUMERIC NOT NULL, - maximum NUMERIC NOT NULL, - maximum_mutable BOOLEAN NOT NULL, - uri_mutable BOOLEAN NOT NULL, - description_mutable BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (collection_data_id_hash, transaction_version) -); -CREATE INDEX cd_crea_cn_index ON collection_datas (creator_address, collection_name); -CREATE INDEX cd_insat_index ON collection_datas (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/down.sql deleted file mode 100644 index fac2b2c3d5037..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/down.sql +++ /dev/null @@ -1,4 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS current_token_ownerships; -DROP TABLE IF EXISTS current_token_datas; -DROP TABLE IF EXISTS current_collection_datas; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/up.sql deleted file mode 100644 index 10dd6e595c660..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-20-055651_add_current_token_data/up.sql +++ /dev/null @@ -1,67 +0,0 @@ --- Your SQL goes here --- tracks tokens in owner's tokenstore -CREATE TABLE current_token_ownerships ( - -- sha256 of creator + collection_name + name - token_data_id_hash VARCHAR(64) NOT NULL, - property_version NUMERIC NOT NULL, - owner_address VARCHAR(66) NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - amount NUMERIC NOT NULL, - token_properties jsonb NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - token_data_id_hash, - property_version, - owner_address - ) -); -CREATE INDEX curr_to_crea_cn_name_index ON current_token_ownerships (creator_address, collection_name, name); -CREATE INDEX curr_to_owner_index ON current_token_ownerships (owner_address); -CREATE INDEX curr_to_insat_index ON current_token_ownerships (inserted_at); --- tracks latest token metadata -CREATE TABLE current_token_datas ( - -- sha256 of creator + collection_name + name - token_data_id_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - maximum NUMERIC NOT NULL, - supply NUMERIC NOT NULL, - largest_property_version NUMERIC NOT NULL, - metadata_uri VARCHAR(512) NOT NULL, - payee_address VARCHAR(66) NOT NULL, - royalty_points_numerator NUMERIC NOT NULL, - royalty_points_denominator NUMERIC NOT NULL, - maximum_mutable BOOLEAN NOT NULL, - uri_mutable BOOLEAN NOT NULL, - description_mutable BOOLEAN NOT NULL, - properties_mutable BOOLEAN NOT NULL, - royalty_mutable BOOLEAN NOT NULL, - default_properties jsonb NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX curr_td_crea_cn_name_index ON current_token_datas (creator_address, collection_name, name); -CREATE INDEX curr_td_insat_index ON current_token_datas (inserted_at); --- tracks latest collection metadata -CREATE TABLE current_collection_datas ( - -- sha256 of creator + collection_name - collection_data_id_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - description TEXT NOT NULL, - metadata_uri VARCHAR(512) NOT NULL, - supply NUMERIC NOT NULL, - maximum NUMERIC NOT NULL, - maximum_mutable BOOLEAN NOT NULL, - uri_mutable BOOLEAN NOT NULL, - description_mutable BOOLEAN NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX curr_cd_crea_cn_index ON current_collection_datas (creator_address, collection_name); -CREATE INDEX curr_cd_insat_index ON current_collection_datas (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/down.sql deleted file mode 100644 index 545b6fe1e3606..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/down.sql +++ /dev/null @@ -1,10 +0,0 @@ --- This file should undo anything in `up.sql` -ALTER TABLE current_token_ownerships DROP COLUMN collection_data_id_hash, - DROP COLUMN table_type; -ALTER TABLE current_token_datas DROP COLUMN collection_data_id_hash; -ALTER TABLE token_datas DROP COLUMN collection_data_id_hash; -ALTER TABLE tokens DROP COLUMN collection_data_id_hash; -ALTER TABLE token_ownerships DROP COLUMN collection_data_id_hash; -DROP INDEX IF EXISTS curr_to_owner_tt_am_index; -DROP TABLE IF EXISTS token_activities; -DROP TABLE IF EXISTS current_token_pending_claims; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/up.sql deleted file mode 100644 index 21e095a2c3fac..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-09-22-185845_token_offers/up.sql +++ /dev/null @@ -1,80 +0,0 @@ --- Your SQL goes here -ALTER TABLE current_token_ownerships -ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL, - ADD COLUMN table_type TEXT NOT NULL; -ALTER TABLE current_token_datas -ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; -ALTER TABLE token_datas -ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; -ALTER TABLE tokens -ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; -ALTER TABLE token_ownerships -ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; --- add indices for current ownership to speed up queries -CREATE INDEX curr_to_owner_tt_am_index ON current_token_ownerships (owner_address, table_type, amount); --- tracks all token activities -CREATE TABLE token_activities ( - transaction_version BIGINT NOT NULL, - event_account_address VARCHAR(66) NOT NULL, - event_creation_number BIGINT NOT NULL, - event_sequence_number BIGINT NOT NULL, - collection_data_id_hash VARCHAR(64) NOT NULL, - token_data_id_hash VARCHAR(64) NOT NULL, - property_version NUMERIC NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - transfer_type VARCHAR(50) NOT NULL, - from_address VARCHAR(66), - to_address VARCHAR(66), - token_amount NUMERIC NOT NULL, - coin_type TEXT, - coin_amount NUMERIC, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - transaction_version, - event_account_address, - event_creation_number, - event_sequence_number - ) -); -CREATE INDEX ta_from_ttyp_index ON token_activities (from_address, transfer_type); -CREATE INDEX ta_to_ttyp_index ON token_activities (to_address, transfer_type); -CREATE INDEX ta_addr_coll_name_pv_index ON token_activities ( - creator_address, - collection_name, - name, - property_version -); -CREATE INDEX ta_tdih_pv_index ON token_activities (token_data_id_hash, property_version); -CREATE INDEX ta_version_index ON token_activities (transaction_version); -CREATE INDEX ta_insat_index ON token_activities (inserted_at); --- Tracks current pending claims -CREATE TABLE current_token_pending_claims ( - token_data_id_hash VARCHAR(64) NOT NULL, - property_version NUMERIC NOT NULL, - from_address VARCHAR(66) NOT NULL, - to_address VARCHAR(66) NOT NULL, - collection_data_id_hash VARCHAR(64) NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - name VARCHAR(128) NOT NULL, - -- 0 means either claimed or canceled - amount NUMERIC NOT NULL, - table_handle VARCHAR(66) NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - -- This is basically the token offer id - token_data_id_hash, - property_version, - from_address, - to_address - ) -); -CREATE INDEX ctpc_th_index ON current_token_pending_claims (table_handle); -CREATE INDEX ctpc_from_am_index ON current_token_pending_claims (from_address, amount); -CREATE INDEX ctpc_to_am_index ON current_token_pending_claims (to_address, amount); -CREATE INDEX ctpc_insat_index ON current_token_pending_claims (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql deleted file mode 100644 index c1cd8feec62b7..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file should undo anything in `up.sql` -ALTER TABLE collection_datas -DROP COLUMN table_handle; -ALTER TABLE current_collection_datas -DROP COLUMN table_handle; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql deleted file mode 100644 index a2c110c13db6b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql +++ /dev/null @@ -1,6 +0,0 @@ --- Your SQL goes here -ALTER TABLE collection_datas -ADD COLUMN table_handle VARCHAR(66) NOT NULL; -ALTER TABLE current_collection_datas -ADD COLUMN table_handle VARCHAR(66) NOT NULL; -CREATE INDEX curr_cd_th_index ON current_collection_datas (table_handle); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/down.sql deleted file mode 100644 index ac543b764e8aa..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS current_ans_lookup; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/up.sql deleted file mode 100644 index 37520b8b95223..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-02-011020_ans_lookup_table/up.sql +++ /dev/null @@ -1,29 +0,0 @@ --- Your SQL goes here --- add indices for current ownership to speed up queries -CREATE INDEX curr_to_oa_tt_am_ltv_index ON current_token_ownerships ( - owner_address, - table_type, - amount, - last_transaction_version DESC -); -CREATE INDEX curr_to_oa_tt_ltv_index ON current_token_ownerships ( - owner_address, - table_type, - last_transaction_version DESC -); --- allows quick lookup for aptos name services registered address -CREATE TABLE current_ans_lookup ( - domain VARCHAR(64) NOT NULL, - -- if subdomain is null set to empty string - subdomain VARCHAR(64) NOT NULL, - registered_address VARCHAR(66), - expiration_timestamp TIMESTAMP NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (domain, subdomain) -); -CREATE INDEX ans_et_index ON current_ans_lookup (expiration_timestamp); -CREATE INDEX ans_ra_et_index ON current_ans_lookup (registered_address, expiration_timestamp); -CREATE INDEX ans_d_s_et_index ON current_ans_lookup (domain, subdomain, expiration_timestamp); -CREATE INDEX ans_insat_index ON current_ans_lookup (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/down.sql deleted file mode 100644 index 2aadda38b6e3b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/down.sql +++ /dev/null @@ -1,24 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS coin_infos; -DROP TABLE IF EXISTS coin_balances; -DROP TABLE IF EXISTS current_coin_balances; -DROP TABLE IF EXISTS coin_activities; -ALTER TABLE token_activities -DROP COLUMN IF EXISTS transaction_timestamp; -ALTER TABLE current_token_pending_claims -DROP COLUMN IF EXISTS last_transaction_timestamp; -ALTER TABLE current_token_ownerships -DROP COLUMN IF EXISTS last_transaction_timestamp; -ALTER TABLE current_token_datas -DROP COLUMN IF EXISTS last_transaction_timestamp; -ALTER TABLE current_collection_datas -DROP COLUMN IF EXISTS last_transaction_timestamp; -ALTER TABLE tokens -DROP COLUMN IF EXISTS transaction_timestamp; -ALTER TABLE token_ownerships -DROP COLUMN IF EXISTS transaction_timestamp; -ALTER TABLE token_datas -DROP COLUMN IF EXISTS transaction_timestamp; -ALTER TABLE collection_datas -DROP COLUMN IF EXISTS transaction_timestamp; -DROP VIEW IF EXISTS move_resources_view; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/up.sql deleted file mode 100644 index 1b21945d49651..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-04-073529_add_coin_tables/up.sql +++ /dev/null @@ -1,120 +0,0 @@ --- Your SQL goes here -CREATE VIEW move_resources_view AS -SELECT transaction_version, - write_set_change_index, - transaction_block_height, - name, - address, - "type", - "module", - generic_type_params, - data#>>'{}' as json_data, - is_deleted, - inserted_at -FROM move_resources; --- adding timestamp to all token tables -ALTER TABLE token_activities -ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE current_token_pending_claims -ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE current_token_ownerships -ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE current_token_datas -ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE current_collection_datas -ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE tokens -ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE token_ownerships -ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE token_datas -ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; -ALTER TABLE collection_datas -ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; --- coin infos. Only first transaction matters -CREATE TABLE coin_infos ( - -- Hash of the non-truncated coin type - coin_type_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL, - -- creator_address::name::symbol - coin_type VARCHAR(5000) NOT NULL, - -- transaction version where coin info was first defined - transaction_version_created BIGINT NOT NULL, - creator_address VARCHAR(66) NOT NULL, - name VARCHAR(32) NOT NULL, - symbol VARCHAR(10) NOT NULL, - decimals INT NOT NULL, - transaction_created_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX ci_ct_index on coin_infos (coin_type); -CREATE INDEX ci_ca_name_symbol_index on coin_infos (creator_address, name, symbol); -CREATE INDEX ci_insat_index ON coin_infos (inserted_at); --- current coin owned by user -CREATE TABLE coin_balances ( - transaction_version BIGINT NOT NULL, - owner_address VARCHAR(66) NOT NULL, - -- Hash of the non-truncated coin type - coin_type_hash VARCHAR(64) NOT NULL, - -- creator_address::name::symbol - coin_type VARCHAR(5000) NOT NULL, - amount NUMERIC NOT NULL, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - transaction_version, - owner_address, - coin_type_hash - ) -); -CREATE INDEX cb_tv_oa_ct_index on coin_balances (transaction_version, owner_address, coin_type); -CREATE INDEX cb_oa_ct_index on coin_balances (owner_address, coin_type); -CREATE INDEX cb_ct_a_index on coin_balances (coin_type, amount); -CREATE INDEX cb_insat_index ON coin_balances (inserted_at); --- current coin owned by user -CREATE TABLE current_coin_balances ( - owner_address VARCHAR(66) NOT NULL, - -- Hash of the non-truncated coin type - coin_type_hash VARCHAR(64) NOT NULL, - -- creator_address::name::symbol - coin_type VARCHAR(5000) NOT NULL, - amount NUMERIC NOT NULL, - last_transaction_version BIGINT NOT NULL, - last_transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (owner_address, coin_type_hash) -); -CREATE INDEX ccb_oa_ct_index on current_coin_balances (owner_address, coin_type); -CREATE INDEX ccb_ct_a_index on current_coin_balances (coin_type, amount); -CREATE INDEX ccb_insat_index on current_coin_balances (inserted_at); --- coinstore activities (send, receive, gas fees). Mint/burn not supported because event missing -CREATE TABLE coin_activities ( - transaction_version BIGINT NOT NULL, - event_account_address VARCHAR(66) NOT NULL, - event_creation_number BIGINT NOT NULL, - event_sequence_number BIGINT NOT NULL, - owner_address VARCHAR(66) NOT NULL, - -- creator_address::name::symbol - coin_type VARCHAR(5000) NOT NULL, - amount NUMERIC NOT NULL, - activity_type VARCHAR(200) NOT NULL, - is_gas_fee BOOLEAN NOT NULL, - is_transaction_success BOOLEAN NOT NULL, - entry_function_id_str VARCHAR(100), - block_height BIGINT NOT NULL, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - transaction_version, - event_account_address, - event_creation_number, - event_sequence_number - ) -); -CREATE INDEX ca_oa_ct_at_index on coin_activities (owner_address, coin_type, activity_type, amount); -CREATE INDEX ca_oa_igf_index on coin_activities (owner_address, is_gas_fee); -CREATE INDEX ca_ct_at_a_index on coin_activities (coin_type, activity_type, amount); -CREATE INDEX ca_ct_a_index on coin_activities (coin_type, amount); -CREATE INDEX ca_insat_index on coin_activities (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/down.sql deleted file mode 100644 index 145a770fc9892..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/down.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS indexer_status; -DROP VIEW IF EXISTS events_view; -DROP VIEW IF EXISTS table_items_view; -DROP VIEW IF EXISTS transactions_view; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/up.sql deleted file mode 100644 index d0a1460fe5641..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-06-193846_add_indexer_status/up.sql +++ /dev/null @@ -1,48 +0,0 @@ --- Your SQL goes here --- manually toggle indexer status on/off -CREATE TABLE indexer_status ( - db VARCHAR(50) UNIQUE PRIMARY KEY NOT NULL, - is_indexer_up BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); --- Create event view to avoid large jsonb -CREATE VIEW events_view AS -SELECT sequence_number, - creation_number, - account_address, - transaction_version, - transaction_block_height, - "type", - "data"#>>'{}' AS json_data, - inserted_at -FROM events; --- Create table_items view to avoid large jsonb -CREATE VIEW table_items_view AS -SELECT "key", - transaction_version, - write_set_change_index, - transaction_block_height, - table_handle, - decoded_key#>>'{}' AS json_decoded_key, - decoded_value#>>'{}' AS json_decoded_value, - is_deleted, - inserted_at -FROM table_items; --- Create transactions view to avoid large jsonb -CREATE VIEW transactions_view AS -SELECT "version", - block_height, - "hash", - "type", - payload#>>'{}' AS json_payload, - state_change_hash, - event_root_hash, - state_checkpoint_hash, - gas_used, - success, - vm_status, - accumulator_root_hash, - num_events, - num_write_set_changes, - inserted_at -FROM transactions; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/down.sql deleted file mode 100644 index 453b6cdb4ede9..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/down.sql +++ /dev/null @@ -1,12 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS coin_supply; -DROP INDEX IF EXISTS cs_ct_tv_index; -DROP INDEX IF EXISTS cs_epoch_index; -ALTER TABLE coin_infos DROP COLUMN IF EXISTS supply_aggregator_table_handle, - DROP COLUMN IF EXISTS supply_aggregator_table_key; -ALTER TABLE token_datas DROP COLUMN IF EXISTS description; -ALTER TABLE current_token_datas DROP COLUMN IF EXISTS description; -ALTER TABLE user_transactions DROP COLUMN IF EXISTS epoch; -ALTER TABLE transactions DROP COLUMN IF EXISTS epoch; -DROP INDEX IF EXISTS ut_epoch_index; -DROP INDEX IF EXISTS txn_epoch_index; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/up.sql deleted file mode 100644 index ba200264ee152..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-07-231825_add_coin_supply/up.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Your SQL goes here --- coin supply, currently aptos coin only -CREATE TABLE coin_supply ( - transaction_version BIGINT NOT NULL, - -- Hash of the non-truncated coin type - coin_type_hash VARCHAR(64) NOT NULL, - coin_type VARCHAR(5000) NOT NULL, - supply NUMERIC NOT NULL, - transaction_timestamp TIMESTAMP NOT NULL, - transaction_epoch BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, coin_type_hash) -); -CREATE INDEX cs_ct_tv_index on coin_supply (coin_type, transaction_version desc); -CREATE INDEX cs_epoch_index on coin_supply (transaction_epoch); --- Add coin supply aggregator handle to coin infos to be able to access total supply data -ALTER TABLE coin_infos -ADD COLUMN supply_aggregator_table_handle VARCHAR(66), - ADD COLUMN supply_aggregator_table_key TEXT; --- Add description to token_datas and current_token_datas -ALTER TABLE token_datas -ADD COLUMN description TEXT NOT NULL; -ALTER TABLE current_token_datas -ADD COLUMN description TEXT NOT NULL; --- Add epoch to user transactions and transactions -ALTER TABLE user_transactions -ADD COLUMN epoch BIGINT NOT NULL; -ALTER TABLE transactions -ADD COLUMN epoch BIGINT NOT NULL; --- Create index on epoch for easy queries -CREATE INDEX ut_epoch_index ON user_transactions (epoch); -CREATE INDEX txn_epoch_index ON transactions (epoch); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/down.sql deleted file mode 100644 index 05524e94c18a5..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS processor_status; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/up.sql deleted file mode 100644 index 0ad64122d962b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-15-185912_improve_processor_recovery/up.sql +++ /dev/null @@ -1,7 +0,0 @@ --- Your SQL goes here --- Tracks latest processed version per processor -CREATE TABLE processor_status ( - processor VARCHAR(50) UNIQUE PRIMARY KEY NOT NULL, - last_success_version BIGINT NOT NULL, - last_updated TIMESTAMP NOT NULL DEFAULT NOW() -); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/down.sql deleted file mode 100644 index cb3805945d4dd..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS current_staking_pool_voter; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/up.sql deleted file mode 100644 index 7a44aa574ac0b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-21-055518_stake_to_voter/up.sql +++ /dev/null @@ -1,11 +0,0 @@ --- Your SQL goes here --- allows quick lookup of staking pool address to voter address and vice versa. Each staking pool --- can only be mapped to one voter address at a time. -CREATE TABLE current_staking_pool_voter ( - staking_pool_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - voter_address VARCHAR(66) NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX ctpv_va_index ON current_staking_pool_voter (voter_address); -CREATE INDEX ctpv_insat_index ON current_staking_pool_voter (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/down.sql deleted file mode 100644 index 929411b9235f8..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/down.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file should undo anything in `up.sql` -DROP INDEX IF EXISTS ans_tn_index; -ALTER TABLE current_ans_lookup DROP COLUMN IF EXISTS token_name; -DROP INDEX IF EXISTS pv_pi_va_index; -DROP INDEX IF EXISTS pv_va_index; -DROP INDEX IF EXISTS pv_spa_index; -DROP INDEX IF EXISTS pv_ia_index; -DROP TABLE IF EXISTS proposal_votes; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/up.sql deleted file mode 100644 index 2f13904c1c805..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-10-30-053525_add_vote_data/up.sql +++ /dev/null @@ -1,22 +0,0 @@ --- Your SQL goes here --- Add token_name to join with token tables, {subdomain}.{domain}.apt -ALTER TABLE current_ans_lookup -ADD COLUMN token_name VARCHAR(140) NOT NULL DEFAULT ''; -CREATE INDEX ans_tn_index ON current_ans_lookup (token_name); --- Add voting table -CREATE TABLE proposal_votes ( - transaction_version BIGINT NOT NULL, - proposal_id BIGINT NOT NULL, - voter_address VARCHAR(66) NOT NULL, - staking_pool_address VARCHAR(66) NOT NULL, - num_votes NUMERIC NOT NULL, - should_pass BOOLEAN NOT NULL, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, proposal_id, voter_address) -); -CREATE INDEX pv_pi_va_index ON proposal_votes (proposal_id, voter_address); -CREATE INDEX pv_va_index ON proposal_votes (voter_address); -CREATE INDEX pv_spa_index ON proposal_votes (staking_pool_address); -CREATE INDEX pv_ia_index ON proposal_votes (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/down.sql deleted file mode 100644 index 459971ea0fbe5..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/down.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file should undo anything in `up.sql` -DROP VIEW IF EXISTS current_table_items_view; -DROP INDEX IF EXISTS cti_insat_index; -DROP TABLE IF EXISTS current_table_items; -ALTER TABLE events DROP COLUMN IF EXISTS event_index; -ALTER TABLE token_activities DROP COLUMN IF EXISTS event_index; -ALTER TABLE coin_activities DROP COLUMN IF EXISTS event_index; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/up.sql deleted file mode 100644 index b4c7de689c59c..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2022-12-29-222902_curr_table_items/up.sql +++ /dev/null @@ -1,35 +0,0 @@ --- Your SQL goes here -CREATE TABLE current_table_items ( - table_handle VARCHAR(66) NOT NULL, - -- Hash of the key for pk since key is unbounded - key_hash VARCHAR(64) NOT NULL, - key text NOT NULL, - decoded_key jsonb NOT NULL, - decoded_value jsonb, - is_deleted BOOLEAN NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY ( - table_handle, - key_hash - ) -); -CREATE INDEX cti_insat_index ON current_table_items (inserted_at); --- Create view to avoid large jsonb in bigquery -CREATE VIEW current_table_items_view AS -SELECT "key", - table_handle, - key_hash, - decoded_key#>>'{}' AS json_decoded_key, - decoded_value#>>'{}' AS json_decoded_value, - is_deleted, - last_transaction_version, - inserted_at -FROM current_table_items; -ALTER TABLE events -ADD COLUMN event_index BIGINT; -ALTER TABLE token_activities -ADD COLUMN event_index BIGINT; -ALTER TABLE coin_activities -ADD COLUMN event_index BIGINT; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/down.sql deleted file mode 100644 index 8e9f8b0bc9df7..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS delegated_staking_activities; -DROP INDEX IF EXISTS dsa_pa_da_index; -DROP INDEX IF EXISTS dsa_insat_index; -DROP TABLE IF EXISTS current_delegator_balances; -DROP INDEX IF EXISTS cdb_insat_index; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/up.sql deleted file mode 100644 index 6343a2af2b412..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-02-15-070116_stake_delegation/up.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Your SQL goes here --- get delegated staking events such as withdraw , unlock, add stake, etc. -CREATE TABLE delegated_staking_activities ( - transaction_version BIGINT NOT NULL, - event_index BIGINT NOT NULL, - delegator_address VARCHAR(66) NOT NULL, - pool_address VARCHAR(66) NOT NULL, - event_type text NOT NULL, - amount NUMERIC NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, event_index) -); -CREATE INDEX dsa_pa_da_index ON delegated_staking_activities ( - pool_address, - delegator_address, - transaction_version asc, - event_index asc -); -CREATE INDEX dsa_insat_index ON delegated_staking_activities (inserted_at); --- estimates how much delegator has staked in a pool (currently supports active only) -CREATE TABLE current_delegator_balances ( - delegator_address VARCHAR(66) NOT NULL, - pool_address VARCHAR(66) NOT NULL, - pool_type VARCHAR(100) NOT NULL, - table_handle VARCHAR(66) NOT NULL, - amount NUMERIC NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (delegator_address, pool_address, pool_type) -); -CREATE INDEX cdb_insat_index ON delegated_staking_activities (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/down.sql deleted file mode 100644 index 5a0b650f1d648..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/down.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file should undo anything in `up.sql`\ -DROP TABLE IF EXISTS nft_points; -DROP INDEX IF EXISTS np_oa_idx; -DROP INDEX IF EXISTS np_tt_oa_idx; -DROP INDEX IF EXISTS np_insat_idx; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/up.sql deleted file mode 100644 index 5a458b84761e5..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-03-08-205402_nft_points/up.sql +++ /dev/null @@ -1,13 +0,0 @@ --- Your SQL goes here -CREATE TABLE nft_points ( - transaction_version BIGINT UNIQUE PRIMARY KEY NOT NULL, - owner_address VARCHAR(66) NOT NULL, - token_name TEXT NOT NULL, - point_type TEXT NOT NULL, - amount NUMERIC NOT NULL, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX np_oa_idx ON nft_points (owner_address); -CREATE INDEX np_tt_oa_idx ON nft_points (transaction_timestamp, owner_address); -CREATE INDEX np_insat_idx ON nft_points (inserted_at); diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/down.sql deleted file mode 100644 index 271dec8e6bb77..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS delegated_staking_pools; -DROP INDEX IF EXISTS dsp_oa_index; -DROP INDEX IF EXISTS dsp_insat_index; -ALTER TABLE current_staking_pool_voter -DROP COLUMN IF EXISTS operator_address; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/up.sql deleted file mode 100644 index 9fca47c1fd895..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-02-032121_delegator_pools/up.sql +++ /dev/null @@ -1,9 +0,0 @@ --- Your SQL goes here -CREATE TABLE IF NOT EXISTS delegated_staking_pools ( - staking_pool_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - first_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX dsp_insat_index ON delegated_staking_pools (inserted_at); -ALTER TABLE current_staking_pool_voter -ADD COLUMN IF NOT EXISTS operator_address VARCHAR(66) NOT NULL; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/down.sql deleted file mode 100644 index fad4eba1abe86..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file should undo anything in `up.sql` -DROP VIEW IF EXISTS address_version_from_events; -DROP VIEW IF EXISTS address_version_from_move_resources; -DROP VIEW IF EXISTS current_collection_ownership_view; -DROP VIEW IF EXISTS num_active_delegator_per_pool; -DROP INDEX IF EXISTS curr_to_collection_hash_owner_index; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/up.sql deleted file mode 100644 index 286edb5e6d09f..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-14-033932_optimize_queries/up.sql +++ /dev/null @@ -1,38 +0,0 @@ --- Your SQL goes here --- need this to query transactions that touch an account's events -CREATE OR REPLACE VIEW address_version_from_events AS -SELECT account_address, - transaction_version -FROM events -GROUP BY 1, - 2; --- need this to query transactions that touch an account's move resources -CREATE OR REPLACE VIEW address_version_from_move_resources AS -SELECT address, - transaction_version -FROM move_resources -GROUP BY 1, - 2; --- need this for getting NFTs grouped by collections -CREATE OR REPLACE VIEW current_collection_ownership_view AS -SELECT owner_address, - creator_address, - collection_name, - collection_data_id_hash, - MAX(last_transaction_version) AS last_transaction_version, - COUNT(DISTINCT name) AS distinct_tokens -FROM current_token_ownerships -WHERE amount > 0 -GROUP BY 1, - 2, - 3, - 4; --- need this for delegation staking -CREATE OR REPLACE VIEW num_active_delegator_per_pool AS -SELECT pool_address, - COUNT(DISTINCT delegator_address) AS num_active_delegator -FROM current_delegator_balances -WHERE amount > 0 -GROUP BY 1; --- indices -CREATE INDEX IF NOT EXISTS curr_to_collection_hash_owner_index ON current_token_ownerships (collection_data_id_hash, owner_address); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/down.sql deleted file mode 100644 index 6160df2ba68fc..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/down.sql +++ /dev/null @@ -1,14 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS delegated_staking_pool_balances; -DROP TABLE IF EXISTS current_delegated_staking_pool_balances; -DROP INDEX IF EXISTS dspb_insat_index; -ALTER TABLE current_delegator_balances -ADD COLUMN IF NOT EXISTS amount NUMERIC NOT NULL DEFAULT 0; --- need this for delegation staking, changing to amount -CREATE OR REPLACE VIEW num_active_delegator_per_pool AS -SELECT pool_address, - COUNT(DISTINCT delegator_address) AS num_active_delegator -FROM current_delegator_balances -WHERE amount > 0 -GROUP BY 1; -ALTER TABLE current_delegator_balances DROP COLUMN IF EXISTS shares; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/up.sql deleted file mode 100644 index 13d7ae07926bc..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-27-233343_delegation_pool_balances/up.sql +++ /dev/null @@ -1,29 +0,0 @@ --- Your SQL goes here -CREATE TABLE IF NOT EXISTS delegated_staking_pool_balances ( - transaction_version BIGINT NOT NULL, - staking_pool_address VARCHAR(66) NOT NULL, - total_coins NUMERIC NOT NULL, - total_shares NUMERIC NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- Constraints - PRIMARY KEY (transaction_version, staking_pool_address) -); -CREATE INDEX dspb_insat_index ON delegated_staking_pool_balances (inserted_at); -CREATE TABLE IF NOT EXISTS current_delegated_staking_pool_balances ( - staking_pool_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - total_coins NUMERIC NOT NULL, - total_shares NUMERIC NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX cdspb_insat_index ON current_delegated_staking_pool_balances (inserted_at); -ALTER TABLE current_delegator_balances -ADD COLUMN IF NOT EXISTS shares NUMERIC NOT NULL; --- need this for delegation staking, changing to shares -CREATE OR REPLACE VIEW num_active_delegator_per_pool AS -SELECT pool_address, - COUNT(DISTINCT delegator_address) AS num_active_delegator -FROM current_delegator_balances -WHERE shares > 0 -GROUP BY 1; -ALTER TABLE current_delegator_balances DROP COLUMN IF EXISTS amount; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/down.sql deleted file mode 100644 index bce48fd6ab577..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/down.sql +++ /dev/null @@ -1,36 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS objects; -DROP INDEX IF EXISTS o_owner_idx; -DROP INDEX IF EXISTS o_object_skh_idx; -DROP INDEX IF EXISTS o_skh_idx; -DROP INDEX IF EXISTS o_insat_idx; -DROP TABLE IF EXISTS current_objects; -DROP INDEX IF EXISTS co_owner_idx; -DROP INDEX IF EXISTS co_object_skh_idx; -DROP INDEX IF EXISTS co_skh_idx; -DROP INDEX IF EXISTS co_insat_idx; -ALTER TABLE move_resources DROP COLUMN IF EXISTS state_key_hash; -DROP TABLE IF EXISTS token_ownerships_v2; -DROP INDEX IF EXISTS to2_id_index; -DROP INDEX IF EXISTS to2_owner_index; -DROP INDEX IF EXISTS to2_insat_index; -DROP TABLE IF EXISTS current_token_ownerships_v2; -DROP INDEX IF EXISTS curr_to2_owner_index; -DROP INDEX IF EXISTS curr_to2_wa_index; -DROP INDEX IF EXISTS curr_to2_insat_index; -DROP TABLE IF EXISTS collections_v2; -DROP INDEX IF EXISTS col2_id_index; -DROP INDEX IF EXISTS col2_crea_cn_index; -DROP INDEX IF EXISTS col2_insat_index; -DROP TABLE IF EXISTS current_collections_v2; -DROP INDEX IF EXISTS cur_col2_crea_cn_index; -DROP INDEX IF EXISTS cur_col2_insat_index; -DROP TABLE IF EXISTS token_datas_v2; -DROP INDEX IF EXISTS td2_id_index; -DROP INDEX IF EXISTS td2_cid_name_index; -DROP INDEX IF EXISTS td2_insat_index; -DROP TABLE IF EXISTS current_token_datas_v2; -DROP INDEX IF EXISTS cur_td2_cid_name_index; -DROP INDEX IF EXISTS cur_td2_insat_index; -ALTER TABLE current_token_pending_claims DROP COLUMN IF EXISTS token_data_id; -ALTER TABLE current_token_pending_claims DROP COLUMN IF EXISTS collection_id; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/up.sql deleted file mode 100644 index bf8f5f4d7929e..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-04-28-053048_object_token_v2/up.sql +++ /dev/null @@ -1,170 +0,0 @@ --- Your SQL goes here --- objects, basically normalizing ObjectCore -CREATE TABLE IF NOT EXISTS objects ( - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - object_address VARCHAR(66) NOT NULL, - owner_address VARCHAR(66), - state_key_hash VARCHAR(66) NOT NULL, - guid_creation_num NUMERIC, - allow_ungated_transfer BOOLEAN, - is_deleted BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- constraints - PRIMARY KEY (transaction_version, write_set_change_index) -); -CREATE INDEX IF NOT EXISTS o_owner_idx ON objects (owner_address); -CREATE INDEX IF NOT EXISTS o_object_skh_idx ON objects (object_address, state_key_hash); -CREATE INDEX IF NOT EXISTS o_skh_idx ON objects (state_key_hash); -CREATE INDEX IF NOT EXISTS o_insat_idx ON objects (inserted_at); --- latest instance of objects -CREATE TABLE IF NOT EXISTS current_objects ( - object_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - owner_address VARCHAR(66) NOT NULL, - state_key_hash VARCHAR(66) NOT NULL, - allow_ungated_transfer BOOLEAN NOT NULL, - last_guid_creation_num NUMERIC NOT NULL, - last_transaction_version BIGINT NOT NULL, - is_deleted BOOLEAN NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX IF NOT EXISTS co_owner_idx ON current_objects (owner_address); -CREATE INDEX IF NOT EXISTS co_object_skh_idx ON current_objects (object_address, state_key_hash); -CREATE INDEX IF NOT EXISTS co_skh_idx ON current_objects (state_key_hash); -CREATE INDEX IF NOT EXISTS co_insat_idx ON current_objects (inserted_at); --- Add this so that we can find resource groups by their state_key_hash -ALTER TABLE move_resources -ADD COLUMN IF NOT EXISTS state_key_hash VARCHAR(66) NOT NULL DEFAULT ''; --- NFT stuff --- tracks who owns tokens -CREATE TABLE IF NOT EXISTS token_ownerships_v2 ( - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - token_data_id VARCHAR(66) NOT NULL, - property_version_v1 NUMERIC NOT NULL, - owner_address VARCHAR(66), - storage_id VARCHAR(66) NOT NULL, - amount NUMERIC NOT NULL, - table_type_v1 VARCHAR(66), - token_properties_mutated_v1 JSONB, - is_soulbound_v2 BOOLEAN, - token_standard VARCHAR(10) NOT NULL, - is_fungible_v2 BOOLEAN, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - PRIMARY KEY (transaction_version, write_set_change_index) -); -CREATE INDEX IF NOT EXISTS to2_id_index ON token_ownerships_v2 (token_data_id); -CREATE INDEX IF NOT EXISTS to2_owner_index ON token_ownerships_v2 (owner_address); -CREATE INDEX IF NOT EXISTS to2_insat_index ON token_ownerships_v2 (inserted_at); -CREATE TABLE IF NOT EXISTS current_token_ownerships_v2 ( - token_data_id VARCHAR(66) NOT NULL, - property_version_v1 NUMERIC NOT NULL, - owner_address VARCHAR(66) NOT NULL, - storage_id VARCHAR(66) NOT NULL, - amount NUMERIC NOT NULL, - table_type_v1 VARCHAR(66), - token_properties_mutated_v1 JSONB, - is_soulbound_v2 BOOLEAN, - token_standard VARCHAR(10) NOT NULL, - is_fungible_v2 BOOLEAN, - last_transaction_version BIGINT NOT NULL, - last_transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - PRIMARY KEY ( - token_data_id, - property_version_v1, - owner_address, - storage_id - ) -); -CREATE INDEX IF NOT EXISTS curr_to2_owner_index ON current_token_ownerships_v2 (owner_address); -CREATE INDEX IF NOT EXISTS curr_to2_wa_index ON current_token_ownerships_v2 (storage_id); -CREATE INDEX IF NOT EXISTS curr_to2_insat_index ON current_token_ownerships_v2 (inserted_at); --- tracks collections -CREATE TABLE IF NOT EXISTS collections_v2 ( - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - collection_id VARCHAR(66) NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - description TEXT NOT NULL, - uri VARCHAR(512) NOT NULL, - current_supply NUMERIC NOT NULL, - max_supply NUMERIC, - total_minted_v2 NUMERIC, - mutable_description BOOLEAN, - mutable_uri BOOLEAN, - table_handle_v1 VARCHAR(66), - token_standard VARCHAR(10) NOT NULL, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - PRIMARY KEY (transaction_version, write_set_change_index) -); -CREATE INDEX IF NOT EXISTS col2_id_index ON collections_v2 (collection_id); -CREATE INDEX IF NOT EXISTS col2_crea_cn_index ON collections_v2 (creator_address, collection_name); -CREATE INDEX IF NOT EXISTS col2_insat_index ON collections_v2 (inserted_at); -CREATE TABLE IF NOT EXISTS current_collections_v2 ( - collection_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - creator_address VARCHAR(66) NOT NULL, - collection_name VARCHAR(128) NOT NULL, - description TEXT NOT NULL, - uri VARCHAR(512) NOT NULL, - current_supply NUMERIC NOT NULL, - max_supply NUMERIC, - total_minted_v2 NUMERIC, - mutable_description BOOLEAN, - mutable_uri BOOLEAN, - table_handle_v1 VARCHAR(66), - token_standard VARCHAR(10) NOT NULL, - last_transaction_version BIGINT NOT NULL, - last_transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX IF NOT EXISTS cur_col2_crea_cn_index ON current_collections_v2 (creator_address, collection_name); -CREATE INDEX IF NOT EXISTS cur_col2_insat_index ON current_collections_v2 (inserted_at); --- tracks token metadata -CREATE TABLE IF NOT EXISTS token_datas_v2 ( - transaction_version BIGINT NOT NULL, - write_set_change_index BIGINT NOT NULL, - token_data_id VARCHAR(66) NOT NULL, - collection_id VARCHAR(66) NOT NULL, - token_name VARCHAR(128) NOT NULL, - maximum NUMERIC, - supply NUMERIC NOT NULL, - largest_property_version_v1 NUMERIC, - token_uri VARCHAR(512) NOT NULL, - token_properties JSONB NOT NULL, - description TEXT NOT NULL, - token_standard VARCHAR(10) NOT NULL, - is_fungible_v2 BOOLEAN, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - PRIMARY KEY (transaction_version, write_set_change_index) -); -CREATE INDEX IF NOT EXISTS td2_id_index ON token_datas_v2 (token_data_id); -CREATE INDEX IF NOT EXISTS td2_cid_name_index ON token_datas_v2 (collection_id, token_name); -CREATE INDEX IF NOT EXISTS td2_insat_index ON token_datas_v2 (inserted_at); -CREATE TABLE IF NOT EXISTS current_token_datas_v2 ( - token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, - collection_id VARCHAR(66) NOT NULL, - token_name VARCHAR(128) NOT NULL, - maximum NUMERIC, - supply NUMERIC NOT NULL, - largest_property_version_v1 NUMERIC, - token_uri VARCHAR(512) NOT NULL, - description TEXT NOT NULL, - token_properties JSONB NOT NULL, - token_standard VARCHAR(10) NOT NULL, - is_fungible_v2 BOOLEAN, - last_transaction_version BIGINT NOT NULL, - last_transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW() -); -CREATE INDEX IF NOT EXISTS cur_td2_cid_name_index ON current_token_datas_v2 (collection_id, token_name); -CREATE INDEX IF NOT EXISTS cur_td2_insat_index ON current_token_datas_v2 (inserted_at); --- Add ID (with 0x prefix) -ALTER TABLE current_token_pending_claims -ADD COLUMN IF NOT EXISTS token_data_id VARCHAR(66) NOT NULL DEFAULT ''; -ALTER TABLE current_token_pending_claims -ADD COLUMN IF NOT EXISTS collection_id VARCHAR(66) NOT NULL DEFAULT ''; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/down.sql deleted file mode 100644 index eac13ae1d581e..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/down.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file should undo anything in `up.sql` -DROP TABLE IF EXISTS token_activities_v2; -DROP INDEX IF EXISTS ta2_owner_type_index; -DROP INDEX IF EXISTS ta2_from_type_index; -DROP INDEX IF EXISTS ta2_to_type_index; -DROP INDEX IF EXISTS ta2_tid_index; -DROP INDEX IF EXISTS ta2_cid_index; -DROP INDEX IF EXISTS ta2_insat_index; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/up.sql deleted file mode 100644 index 6a753b82f9b11..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-17-010107_activities_v2/up.sql +++ /dev/null @@ -1,25 +0,0 @@ --- Your SQL goes here -CREATE TABLE IF NOT EXISTS token_activities_v2 ( - transaction_version BIGINT NOT NULL, - event_index BIGINT NOT NULL, - event_account_address VARCHAR(66) NOT NULL, - token_data_id VARCHAR(66) NOT NULL, - property_version_v1 NUMERIC NOT NULL, - type VARCHAR(50) NOT NULL, - from_address VARCHAR(66), - to_address VARCHAR(66), - token_amount NUMERIC NOT NULL, - before_value TEXT, - after_value TEXT, - entry_function_id_str VARCHAR(100), - token_standard VARCHAR(10) NOT NULL, - is_fungible_v2 BOOLEAN, - transaction_timestamp TIMESTAMP NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - PRIMARY KEY (transaction_version, event_index) -); -CREATE INDEX IF NOT EXISTS ta2_owner_type_index ON token_activities_v2 (event_account_address, type); -CREATE INDEX IF NOT EXISTS ta2_from_type_index ON token_activities_v2 (from_address, type); -CREATE INDEX IF NOT EXISTS ta2_to_type_index ON token_activities_v2 (to_address, type); -CREATE INDEX IF NOT EXISTS ta2_tid_index ON token_activities_v2 (token_data_id); -CREATE INDEX IF NOT EXISTS ta2_insat_index ON token_activities_v2 (inserted_at); \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql deleted file mode 100644 index 4337e764c79f1..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql +++ /dev/null @@ -1,24 +0,0 @@ --- This file should undo anything in `up.sql` -ALTER TABLE current_delegated_staking_pool_balances DROP COLUMN IF EXISTS operator_commission_percentage, - DROP COLUMN IF EXISTS inactive_table_handle, - DROP COLUMN IF EXISTS active_table_handle; -DROP INDEX IF EXISTS cdspb_inactive_index; -ALTER TABLE delegated_staking_pool_balances DROP COLUMN IF EXISTS operator_commission_percentage, - DROP COLUMN IF EXISTS inactive_table_handle, - DROP COLUMN IF EXISTS active_table_handle; -ALTER TABLE current_delegator_balances DROP COLUMN IF EXISTS parent_table_handle; -ALTER TABLE current_delegator_balances DROP CONSTRAINT current_delegator_balances_pkey; -ALTER TABLE current_delegator_balances -ADD CONSTRAINT current_delegator_balances_pkey PRIMARY KEY ( - delegator_address, - pool_address, - pool_type - ); -CREATE OR REPLACE VIEW num_active_delegator_per_pool AS -SELECT pool_address, - COUNT(DISTINCT delegator_address) AS num_active_delegator -FROM current_delegator_balances -WHERE shares > 0 -GROUP BY 1; -DROP VIEW IF EXISTS delegator_distinct_pool; -DROP VIEW IF EXISTS address_events_summary; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql deleted file mode 100644 index 4b5c32ad9e749..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql +++ /dev/null @@ -1,46 +0,0 @@ --- Your SQL goes here --- adding new fields to staking pool balances for display and handling inactive pools -ALTER TABLE current_delegated_staking_pool_balances -ADD COLUMN IF NOT EXISTS operator_commission_percentage NUMERIC NOT NULL, - ADD COLUMN IF NOT EXISTS inactive_table_handle VARCHAR(66) NOT NULL, - ADD COLUMN IF NOT EXISTS active_table_handle VARCHAR(66) NOT NULL; -CREATE INDEX IF NOT EXISTS cdspb_inactive_index ON current_delegated_staking_pool_balances (inactive_table_handle); --- adding new fields to staking pool balances for display and handling inactive pools -ALTER TABLE delegated_staking_pool_balances -ADD COLUMN IF NOT EXISTS operator_commission_percentage NUMERIC NOT NULL, - ADD COLUMN IF NOT EXISTS inactive_table_handle VARCHAR(66) NOT NULL, - ADD COLUMN IF NOT EXISTS active_table_handle VARCHAR(66) NOT NULL; --- add new field to composite primary key because technically a user could have inactive pools -ALTER TABLE current_delegator_balances -ADD COLUMN IF NOT EXISTS parent_table_handle VARCHAR(66) NOT NULL; -ALTER TABLE current_delegator_balances DROP CONSTRAINT current_delegator_balances_pkey; -ALTER TABLE current_delegator_balances -ADD CONSTRAINT current_delegator_balances_pkey PRIMARY KEY ( - delegator_address, - pool_address, - pool_type, - table_handle - ); --- need this for delegation staking -CREATE OR REPLACE VIEW num_active_delegator_per_pool AS -SELECT pool_address, - COUNT(DISTINCT delegator_address) AS num_active_delegator -FROM current_delegator_balances -WHERE shares > 0 - AND pool_type = 'active_shares' -GROUP BY 1; --- need this for delegation staking -CREATE OR REPLACE VIEW delegator_distinct_pool AS -SELECT delegator_address, - pool_address -FROM current_delegator_balances -WHERE shares > 0 -GROUP BY 1, - 2; --- new query for wallet -CREATE OR REPLACE VIEW address_events_summary AS -SELECT account_address, - min(transaction_block_height) AS min_block_height, - count(DISTINCT transaction_version) AS num_distinct_versions -FROM events -GROUP BY 1 \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/down.sql deleted file mode 100644 index 759d57bc385cb..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/down.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file should undo anything in `up.sql` -DROP VIEW IF EXISTS current_collection_ownership_v2_view; -DROP TABLE IF EXISTS current_token_v2_metadata; -ALTER TABLE token_datas_v2 DROP COLUMN IF EXISTS decimals; -ALTER TABLE current_token_datas_v2 DROP COLUMN IF EXISTS decimals; -ALTER TABLE token_ownerships_v2 DROP COLUMN IF EXISTS non_transferrable_by_owner; -ALTER TABLE current_token_ownerships_v2 DROP COLUMN IF EXISTS non_transferrable_by_owner; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/up.sql deleted file mode 100644 index 9703d87425be2..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-05-24-052435_token_properties_v2/up.sql +++ /dev/null @@ -1,58 +0,0 @@ --- Your SQL goes here --- need this for getting NFTs grouped by collections -create or replace view current_collection_ownership_v2_view as -select owner_address, - b.collection_id, - MAX(a.last_transaction_version) as last_transaction_version, - COUNT(distinct a.token_data_id) as distinct_tokens -from current_token_ownerships_v2 a - join current_token_datas_v2 b on a.token_data_id = b.token_data_id -where a.amount > 0 -group by 1, - 2; --- create table for all structs in token object core -CREATE TABLE IF NOT EXISTS current_token_v2_metadata ( - object_address VARCHAR(66) NOT NULL, - resource_type VARCHAR(128) NOT NULL, - data jsonb NOT NULL, - state_key_hash VARCHAR(66) NOT NULL, - last_transaction_version BIGINT NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - -- constraints - PRIMARY KEY (object_address, resource_type) -); --- create table for all structs in token object core -ALTER TABLE token_datas_v2 -ADD COLUMN IF NOT EXISTS decimals BIGINT NOT NULL DEFAULT 0; -ALTER TABLE current_token_datas_v2 -ADD COLUMN IF NOT EXISTS decimals BIGINT NOT NULL DEFAULT 0; -ALTER TABLE token_ownerships_v2 -ADD COLUMN IF NOT EXISTS non_transferrable_by_owner BOOLEAN; -ALTER TABLE current_token_ownerships_v2 -ADD COLUMN IF NOT EXISTS non_transferrable_by_owner BOOLEAN; --- These are needed b/c for some reason we're getting build errors when setting --- type field with a length limit -ALTER TABLE signatures -ALTER COLUMN type TYPE VARCHAR; -ALTER TABLE token_activities_v2 -ALTER COLUMN type TYPE VARCHAR; -DROP VIEW IF EXISTS transactions_view; -ALTER TABLE transactions -ALTER COLUMN type TYPE VARCHAR; -CREATE VIEW transactions_view AS -SELECT "version", - block_height, - "hash", - "type", - payload#>>'{}' AS json_payload, - state_change_hash, - event_root_hash, - state_checkpoint_hash, - gas_used, - success, - vm_status, - accumulator_root_hash, - num_events, - num_write_set_changes, - inserted_at -FROM transactions; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/down.sql deleted file mode 100644 index 81190795c2fa0..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -DROP INDEX IF EXISTS mr_ver_index; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/up.sql deleted file mode 100644 index c40d650f08008..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-06-042159_minor_optimizations/up.sql +++ /dev/null @@ -1,29 +0,0 @@ --- Your SQL goes here --- This is needed to improve performance when querying an account with a large number of transactions -CREATE INDEX IF NOT EXISTS mr_ver_index ON move_resources(transaction_version DESC); --- These are needed b/c for some reason we're getting build errors when setting --- type field with a length limit -ALTER TABLE signatures -ALTER COLUMN type TYPE VARCHAR; -ALTER TABLE token_activities_v2 -ALTER COLUMN type TYPE VARCHAR; -DROP VIEW IF EXISTS transactions_view; -ALTER TABLE transactions -ALTER COLUMN type TYPE VARCHAR; -CREATE VIEW transactions_view AS -SELECT "version", - block_height, - "hash", - "type", - payload#>>'{}' AS json_payload, - state_change_hash, - event_root_hash, - state_checkpoint_hash, - gas_used, - success, - vm_status, - accumulator_root_hash, - num_events, - num_write_set_changes, - inserted_at -FROM transactions; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/down.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/down.sql deleted file mode 100644 index 4f1714472cfee..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/down.sql +++ /dev/null @@ -1,10 +0,0 @@ --- This file should undo anything in `up.sql` -DROP INDEX IF EXISTS at_version_index; -DROP INDEX IF EXISTS at_insat_index; -DROP TABLE IF EXISTS account_transactions; -ALTER TABLE objects -ALTER COLUMN owner_address DROP NOT NULL; -ALTER TABLE objects -ALTER COLUMN guid_creation_num DROP NOT NULL; -ALTER TABLE objects -ALTER COLUMN allow_ungated_transfer DROP NOT NULL; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/up.sql b/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/up.sql deleted file mode 100644 index 8672514c8c377..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/migrations/2023-07-13-060328_transactions_by_address/up.sql +++ /dev/null @@ -1,20 +0,0 @@ --- Your SQL goes here --- Records transactions - account pairs. Account here can represent --- user account, resource account, or object account. -CREATE TABLE IF NOT EXISTS account_transactions ( - transaction_version BIGINT NOT NULL, - account_address VARCHAR(66) NOT NULL, - inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), - PRIMARY KEY (account_address, transaction_version) -); -CREATE INDEX IF NOT EXISTS at_version_index ON account_transactions (transaction_version DESC); -CREATE INDEX IF NOT EXISTS at_insat_index ON account_transactions (inserted_at); -ALTER TABLE objects -ALTER COLUMN owner_address -SET NOT NULL; -ALTER TABLE objects -ALTER COLUMN guid_creation_num -SET NOT NULL; -ALTER TABLE objects -ALTER COLUMN allow_ungated_transfer -SET NOT NULL; \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/parser.yaml b/ecosystem/indexer-grpc/indexer-grpc-parser/parser.yaml deleted file mode 100644 index 1bdb9c6a73736..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/parser.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# This is a template yaml for the indexer-grpc parser. -health_check_port: 8084 -server_config: - processor_name: default_processor - postgres_connection_string: postgresql://postgres:@localhost:5432/postgres_v2 - indexer_grpc_data_service_address: 127.0.0.1:50051:50051 - auth_token: AUTH_TOKEN \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/lib.rs deleted file mode 100644 index d2e7b5ddc8594..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// Increase recursion limit for `serde_json::json!` macro parsing -#![recursion_limit = "256"] - -// #[macro_use] -// extern crate diesel_migrations; - -// Need to use this for because src/schema.rs uses the macros and is autogenerated -#[macro_use] -extern crate diesel; - -pub mod models; -pub mod processors; -pub mod schema; -mod utils; -pub mod worker; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/main.rs deleted file mode 100644 index 2171b9dbc538e..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/main.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use anyhow::{Ok, Result}; -use aptos_indexer_grpc_parser::worker::Worker; -use aptos_indexer_grpc_server_framework::{RunnableConfig, ServerArgs}; -use clap::Parser; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct IndexerGrpcProcessorConfig { - pub processor_name: String, - pub postgres_connection_string: String, - // TODO: add tls support. - pub indexer_grpc_data_service_address: String, - // Indexer GRPC http2 ping interval in seconds; default to 30. - // tonic ref: https://docs.rs/tonic/latest/tonic/transport/channel/struct.Endpoint.html#method.http2_keep_alive_interval - pub indexer_grpc_http2_ping_interval_in_secs: Option, - // Indexer GRPC http2 ping timeout in seconds; default to 10. - pub indexer_grpc_http2_ping_timeout_in_secs: Option, - pub auth_token: String, - pub starting_version: Option, - pub ending_version: Option, - pub number_concurrent_processing_tasks: Option, - pub ans_address: Option, - pub nft_points_contract: Option, -} - -#[async_trait::async_trait] -impl RunnableConfig for IndexerGrpcProcessorConfig { - async fn run(&self) -> Result<()> { - let mut worker = Worker::new( - self.processor_name.clone(), - self.postgres_connection_string.clone(), - self.indexer_grpc_data_service_address.clone(), - std::time::Duration::from_secs( - self.indexer_grpc_http2_ping_interval_in_secs.unwrap_or(30), - ), - std::time::Duration::from_secs( - self.indexer_grpc_http2_ping_timeout_in_secs.unwrap_or(10), - ), - self.auth_token.clone(), - self.starting_version, - self.ending_version, - self.number_concurrent_processing_tasks, - self.ans_address.clone(), - self.nft_points_contract.clone(), - ) - .await; - worker.run().await; - Ok(()) - } - - fn get_server_name(&self) -> String { - "idxproc".to_string() - } -} - -#[tokio::main] -async fn main() -> Result<()> { - let args = ServerArgs::parse(); - args.run::().await -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/account_transactions.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/account_transactions.rs deleted file mode 100644 index 40a68f4a3035c..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/account_transactions.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use crate::{ - models::{ - default_models::user_transactions::UserTransaction, - token_models::v2_token_utils::ObjectWithMetadata, - }, - schema::account_transactions, - utils::util::standardize_address, -}; -use aptos_protos::transaction::v1::{ - transaction::TxnData, write_set_change::Change, DeleteResource, Event, Transaction, - WriteResource, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -pub type AccountTransactionPK = (String, i64); - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(account_address, transaction_version))] -#[diesel(table_name = account_transactions)] -pub struct AccountTransaction { - pub transaction_version: i64, - pub account_address: String, -} - -impl AccountTransaction { - /// This table will record every transaction that touch an account which could be - /// a user account, an object, or a resource account. - /// We will consider all transactions that modify a resource or event associated with a particular account. - /// We will do 1 level of redirection for now (e.g. if it's an object, we will record the owner as account address). - /// We will also consider transactions that the account signed or is part of a multi sig / multi agent. - /// TODO: recursively find the parent account of an object - /// TODO: include table items in the detection path - pub fn from_transaction(transaction: &Transaction) -> HashMap { - let txn_version = transaction.version as i64; - let txn_data = transaction - .txn_data - .as_ref() - .unwrap_or_else(|| panic!("Txn Data doesn't exit for version {}", txn_version)); - let transaction_info = transaction.info.as_ref().unwrap_or_else(|| { - panic!("Transaction info doesn't exist for version {}", txn_version) - }); - let wscs = &transaction_info.changes; - let (events, signatures) = match txn_data { - TxnData::User(inner) => ( - &inner.events, - UserTransaction::get_signatures( - inner.request.as_ref().unwrap_or_else(|| { - panic!("User request doesn't exist for version {}", txn_version) - }), - txn_version, - transaction.block_height as i64, - ), - ), - TxnData::Genesis(inner) => (&inner.events, vec![]), - TxnData::BlockMetadata(inner) => (&inner.events, vec![]), - _ => { - return HashMap::new(); - }, - }; - let mut account_transactions = HashMap::new(); - for sig in &signatures { - account_transactions.insert((sig.signer.clone(), txn_version), Self { - transaction_version: txn_version, - account_address: sig.signer.clone(), - }); - } - for event in events { - account_transactions.extend(Self::from_event(event, txn_version)); - } - for wsc in wscs { - match wsc.change.as_ref().unwrap() { - Change::DeleteResource(res) => { - account_transactions - .extend(Self::from_delete_resource(res, txn_version).unwrap()); - }, - Change::WriteResource(res) => { - account_transactions - .extend(Self::from_write_resource(res, txn_version).unwrap()); - }, - _ => {}, - } - } - account_transactions - } - - /// Base case, record event account address. We don't really have to worry about - /// objects here because it'll be taken care of in the resource section - fn from_event(event: &Event, txn_version: i64) -> HashMap { - let account_address = - standardize_address(event.key.as_ref().unwrap().account_address.as_str()); - HashMap::from([((account_address.clone(), txn_version), Self { - transaction_version: txn_version, - account_address, - })]) - } - - /// Base case, record resource account. If the resource is an object, then we record the owner as well - /// This handles partial deletes as well - fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let mut result = HashMap::new(); - let account_address = standardize_address(write_resource.address.as_str()); - result.insert((account_address.clone(), txn_version), Self { - transaction_version: txn_version, - account_address, - }); - if let Some(inner) = &ObjectWithMetadata::from_write_resource(write_resource, txn_version)? - { - result.insert((inner.object_core.get_owner_address(), txn_version), Self { - transaction_version: txn_version, - account_address: inner.object_core.get_owner_address(), - }); - } - Ok(result) - } - - /// Base case, record resource account. - /// TODO: If the resource is an object, then we need to look for the latest owner. This isn't really possible - /// right now given we have parallel threads so it'll be very difficult to ensure that we have the correct - /// latest owner - fn from_delete_resource( - delete_resource: &DeleteResource, - txn_version: i64, - ) -> anyhow::Result> { - let mut result = HashMap::new(); - let account_address = standardize_address(delete_resource.address.as_str()); - result.insert((account_address.clone(), txn_version), Self { - transaction_version: txn_version, - account_address, - }); - Ok(result) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_activities.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_activities.rs deleted file mode 100644 index 45a28a9e9107b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_activities.rs +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - coin_balances::{CoinBalance, CurrentCoinBalance}, - coin_infos::{CoinInfo, CoinInfoQuery}, - coin_supply::CoinSupply, - coin_utils::{CoinEvent, EventGuidResource}, -}; -use crate::{ - schema::coin_activities, - utils::util::{get_entry_function_from_user_request, standardize_address}, -}; -use aptos_protos::transaction::v1::{ - transaction::TxnData, write_set_change::Change as WriteSetChangeEnum, Event as EventPB, - Transaction as TransactionPB, TransactionInfo, UserTransactionRequest, -}; -use bigdecimal::BigDecimal; -use chrono::NaiveDateTime; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -const GAS_FEE_EVENT: &str = "0x1::aptos_coin::GasFeeEvent"; -const APTOS_COIN_TYPE_STR: &str = "0x1::aptos_coin::AptosCoin"; -// We will never have a negative number on chain so this will avoid collision in postgres -const BURN_GAS_EVENT_CREATION_NUM: i64 = -1; -const BURN_GAS_EVENT_INDEX: i64 = -1; - -type OwnerAddress = String; -type CoinType = String; -// Primary key of the current_coin_balances table, i.e. (owner_address, coin_type) -pub type CurrentCoinBalancePK = (OwnerAddress, CoinType); -pub type EventToCoinType = HashMap; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key( - transaction_version, - event_account_address, - event_creation_number, - event_sequence_number -))] -#[diesel(table_name = coin_activities)] -pub struct CoinActivity { - pub transaction_version: i64, - pub event_account_address: String, - pub event_creation_number: i64, - pub event_sequence_number: i64, - pub owner_address: String, - pub coin_type: String, - pub amount: BigDecimal, - pub activity_type: String, - pub is_gas_fee: bool, - pub is_transaction_success: bool, - pub entry_function_id_str: Option, - pub block_height: i64, - pub transaction_timestamp: chrono::NaiveDateTime, - pub event_index: Option, -} - -impl CoinActivity { - /// There are different objects containing different information about balances and coins. - /// Events: Withdraw and Deposit event containing amounts. There is no coin type so we need to get that from Resources. (from event guid) - /// CoinInfo Resource: Contains name, symbol, decimals and supply. (if supply is aggregator, however, actual supply amount will live in a separate table) - /// CoinStore Resource: Contains owner address and coin type information used to complete events - /// Aggregator Table Item: Contains current supply of a coin - /// Note, we're not currently tracking supply - pub fn from_transaction( - transaction: &TransactionPB, - maybe_aptos_coin_info: &Option, - ) -> ( - Vec, - Vec, - HashMap, - HashMap, - Vec, - ) { - // All the items we want to track - let mut coin_activities = Vec::new(); - let mut coin_balances = Vec::new(); - let mut coin_infos: HashMap = HashMap::new(); - let mut current_coin_balances: HashMap = - HashMap::new(); - let mut all_event_to_coin_type: EventToCoinType = HashMap::new(); - let mut all_coin_supply = Vec::new(); - - // Extracts events and user request from genesis and user transactions. Other transactions won't have coin events - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - let (events, maybe_user_request): (&Vec, Option<&UserTransactionRequest>) = - match txn_data { - TxnData::Genesis(inner) => (&inner.events, None), - TxnData::User(inner) => (&inner.events, inner.request.as_ref()), - _ => return Default::default(), - }; - - // The rest are fields common to all transactions - let txn_version = transaction.version as i64; - let txn_epoch = transaction.epoch as i64; - let block_height = transaction.block_height as i64; - let transaction_info = transaction - .info - .as_ref() - .expect("Transaction info doesn't exist!"); - let txn_timestamp = transaction - .timestamp - .as_ref() - .expect("Transaction timestamp doesn't exist!") - .seconds; - let txn_timestamp = NaiveDateTime::from_timestamp(txn_timestamp, 0); - - // Handling gas first - let mut entry_function_id_str = None; - if let Some(user_request) = maybe_user_request { - entry_function_id_str = get_entry_function_from_user_request(user_request); - coin_activities.push(Self::get_gas_event( - transaction_info, - user_request, - &entry_function_id_str, - txn_version, - txn_timestamp, - block_height, - )); - } - - // Need coin info from move resources - for wsc in &transaction_info.changes { - let (maybe_coin_info, maybe_coin_balance_data) = - if let WriteSetChangeEnum::WriteResource(write_resource) = - &wsc.change.as_ref().unwrap() - { - ( - CoinInfo::from_write_resource(write_resource, txn_version, txn_timestamp) - .unwrap(), - CoinBalance::from_write_resource( - write_resource, - txn_version, - txn_timestamp, - ) - .unwrap(), - ) - } else { - (None, None) - }; - - let maybe_coin_supply = if let WriteSetChangeEnum::WriteTableItem(table_item) = - wsc.change.as_ref().unwrap() - { - CoinSupply::from_write_table_item( - table_item, - maybe_aptos_coin_info, - txn_version, - txn_timestamp, - txn_epoch, - ) - .unwrap() - } else { - None - }; - - if let Some(coin_info) = maybe_coin_info { - coin_infos.insert(coin_info.coin_type.clone(), coin_info); - } - if let Some((coin_balance, current_coin_balance, event_to_coin_type)) = - maybe_coin_balance_data - { - current_coin_balances.insert( - ( - coin_balance.owner_address.clone(), - coin_balance.coin_type.clone(), - ), - current_coin_balance, - ); - coin_balances.push(coin_balance); - all_event_to_coin_type.extend(event_to_coin_type); - } - if let Some(coin_supply) = maybe_coin_supply { - all_coin_supply.push(coin_supply); - } - } - for (index, event) in events.iter().enumerate() { - let event_type = event.type_str.clone(); - if let Some(parsed_event) = - CoinEvent::from_event(event_type.as_str(), &event.data, txn_version).unwrap() - { - coin_activities.push(Self::from_parsed_event( - &event_type, - event, - &parsed_event, - txn_version, - &all_event_to_coin_type, - block_height, - &entry_function_id_str, - txn_timestamp, - index as i64, - )); - }; - } - ( - coin_activities, - coin_balances, - coin_infos, - current_coin_balances, - all_coin_supply, - ) - } - - fn from_parsed_event( - event_type: &str, - event: &EventPB, - coin_event: &CoinEvent, - txn_version: i64, - event_to_coin_type: &EventToCoinType, - block_height: i64, - entry_function_id_str: &Option, - transaction_timestamp: chrono::NaiveDateTime, - event_index: i64, - ) -> Self { - let amount = match coin_event { - CoinEvent::WithdrawCoinEvent(inner) => inner.amount.clone(), - CoinEvent::DepositCoinEvent(inner) => inner.amount.clone(), - }; - let event_move_guid = EventGuidResource { - addr: standardize_address(event.key.as_ref().unwrap().account_address.as_str()), - creation_num: event.key.as_ref().unwrap().creation_number as i64, - }; - let coin_type = - event_to_coin_type - .get(&event_move_guid) - .unwrap_or_else(|| { - panic!( - "Could not find event in resources (CoinStore), version: {}, event guid: {:?}, mapping: {:?}", - txn_version, event_move_guid, event_to_coin_type - ) - }).clone(); - - Self { - transaction_version: txn_version, - event_account_address: standardize_address( - &event.key.as_ref().unwrap().account_address, - ), - event_creation_number: event.key.as_ref().unwrap().creation_number as i64, - event_sequence_number: event.sequence_number as i64, - owner_address: standardize_address(&event.key.as_ref().unwrap().account_address), - coin_type, - amount, - activity_type: event_type.to_string(), - is_gas_fee: false, - is_transaction_success: true, - entry_function_id_str: entry_function_id_str.clone(), - block_height, - transaction_timestamp, - event_index: Some(event_index), - } - } - - fn get_gas_event( - txn_info: &TransactionInfo, - user_transaction_request: &UserTransactionRequest, - entry_function_id_str: &Option, - transaction_version: i64, - transaction_timestamp: chrono::NaiveDateTime, - block_height: i64, - ) -> Self { - let aptos_coin_burned = - BigDecimal::from(txn_info.gas_used * user_transaction_request.gas_unit_price); - - Self { - transaction_version, - event_account_address: standardize_address( - &user_transaction_request.sender.to_string(), - ), - event_creation_number: BURN_GAS_EVENT_CREATION_NUM, - event_sequence_number: user_transaction_request.sequence_number as i64, - owner_address: standardize_address(&user_transaction_request.sender.to_string()), - coin_type: APTOS_COIN_TYPE_STR.to_string(), - amount: aptos_coin_burned, - activity_type: GAS_FEE_EVENT.to_string(), - is_gas_fee: true, - is_transaction_success: txn_info.success, - entry_function_id_str: entry_function_id_str.clone(), - block_height, - transaction_timestamp, - event_index: Some(BURN_GAS_EVENT_INDEX), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_balances.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_balances.rs deleted file mode 100644 index 634261529f970..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_balances.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - coin_activities::EventToCoinType, - coin_utils::{CoinInfoType, CoinResource}, -}; -use crate::{ - schema::{coin_balances, current_coin_balances}, - utils::util::standardize_address, -}; -use aptos_protos::transaction::v1::WriteResource; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, owner_address, coin_type))] -#[diesel(table_name = coin_balances)] -pub struct CoinBalance { - pub transaction_version: i64, - pub owner_address: String, - pub coin_type_hash: String, - pub coin_type: String, - pub amount: BigDecimal, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(owner_address, coin_type))] -#[diesel(table_name = current_coin_balances)] -pub struct CurrentCoinBalance { - pub owner_address: String, - pub coin_type_hash: String, - pub coin_type: String, - pub amount: BigDecimal, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, -} - -impl CoinBalance { - /// Getting coin balances from resources - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - ) -> anyhow::Result> { - match &CoinResource::from_write_resource(write_resource, txn_version)? { - Some(CoinResource::CoinStoreResource(inner)) => { - let coin_info_type = &CoinInfoType::from_move_type( - &write_resource.r#type.as_ref().unwrap().generic_type_params[0], - write_resource.type_str.as_ref(), - txn_version, - ); - let owner_address = standardize_address(write_resource.address.as_str()); - let coin_balance = Self { - transaction_version: txn_version, - owner_address: owner_address.clone(), - coin_type_hash: coin_info_type.to_hash(), - coin_type: coin_info_type.get_coin_type_trunc(), - amount: inner.coin.value.clone(), - transaction_timestamp: txn_timestamp, - }; - let current_coin_balance = CurrentCoinBalance { - owner_address, - coin_type_hash: coin_info_type.to_hash(), - coin_type: coin_info_type.get_coin_type_trunc(), - amount: inner.coin.value.clone(), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - }; - let event_to_coin_mapping: EventToCoinType = HashMap::from([ - ( - inner.withdraw_events.guid.id.get_standardized(), - coin_balance.coin_type.clone(), - ), - ( - inner.deposit_events.guid.id.get_standardized(), - coin_balance.coin_type.clone(), - ), - ]); - Ok(Some(( - coin_balance, - current_coin_balance, - event_to_coin_mapping, - ))) - }, - _ => Ok(None), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_infos.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_infos.rs deleted file mode 100644 index 40e0ab146beed..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_infos.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::coin_utils::{CoinInfoType, CoinResource}; -use crate::{schema::coin_infos, utils::database::PgPoolConnection}; -use aptos_protos::transaction::v1::WriteResource; -use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(coin_type_hash))] -#[diesel(table_name = coin_infos)] -pub struct CoinInfo { - pub coin_type_hash: String, - pub coin_type: String, - pub transaction_version_created: i64, - pub creator_address: String, - pub name: String, - pub symbol: String, - pub decimals: i32, - pub transaction_created_timestamp: chrono::NaiveDateTime, - pub supply_aggregator_table_handle: Option, - pub supply_aggregator_table_key: Option, -} - -#[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(primary_key(coin_type_hash))] -#[diesel(table_name = coin_infos)] -pub struct CoinInfoQuery { - pub coin_type_hash: String, - pub coin_type: String, - pub transaction_version_created: i64, - pub creator_address: String, - pub name: String, - pub symbol: String, - pub decimals: i32, - pub transaction_created_timestamp: chrono::NaiveDateTime, - pub inserted_at: chrono::NaiveDateTime, - pub supply_aggregator_table_handle: Option, - pub supply_aggregator_table_key: Option, -} - -impl CoinInfo { - /// We can find coin info from resources. If the coin info appears multiple times we will only keep the first transaction because it can't be modified. - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - ) -> anyhow::Result> { - match &CoinResource::from_write_resource(write_resource, txn_version)? { - Some(CoinResource::CoinInfoResource(inner)) => { - let coin_info_type = &CoinInfoType::from_move_type( - &write_resource.r#type.as_ref().unwrap().generic_type_params[0], - write_resource.type_str.as_ref(), - txn_version, - ); - let (supply_aggregator_table_handle, supply_aggregator_table_key) = inner - .get_aggregator_metadata() - .map(|agg| (Some(agg.handle), Some(agg.key))) - .unwrap_or((None, None)); - - Ok(Some(Self { - coin_type_hash: coin_info_type.to_hash(), - coin_type: coin_info_type.get_coin_type_trunc(), - transaction_version_created: txn_version, - creator_address: coin_info_type.get_creator_address(), - name: inner.get_name_trunc(), - symbol: inner.get_symbol_trunc(), - decimals: inner.decimals, - transaction_created_timestamp: txn_timestamp, - supply_aggregator_table_handle, - supply_aggregator_table_key, - })) - }, - _ => Ok(None), - } - } -} - -impl CoinInfoQuery { - pub fn get_by_coin_type( - coin_type: String, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult> { - coin_infos::table - .filter(coin_infos::coin_type.eq(coin_type)) - .first::(conn) - .optional() - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_supply.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_supply.rs deleted file mode 100644 index b2a60374d467f..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_supply.rs +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::coin_infos::CoinInfoQuery; -use crate::{models::default_models::move_tables::TableItem, schema::coin_supply}; -use anyhow::Context; -use aptos_protos::transaction::v1::WriteTableItem; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, coin_type_hash))] -#[diesel(table_name = coin_supply)] -pub struct CoinSupply { - pub transaction_version: i64, - pub coin_type_hash: String, - pub coin_type: String, - pub supply: BigDecimal, - pub transaction_timestamp: chrono::NaiveDateTime, - pub transaction_epoch: i64, -} - -impl CoinSupply { - /// Currently only supports aptos_coin. Aggregator table detail is in CoinInfo which for aptos coin appears during genesis. - /// We query for the aggregator table details (handle and key) once upon indexer initiation and use it to fetch supply. - pub fn from_write_table_item( - write_table_item: &WriteTableItem, - maybe_aptos_coin_info: &Option, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - txn_epoch: i64, - ) -> anyhow::Result> { - if let Some(aptos_coin_info) = maybe_aptos_coin_info { - // Return early if we don't have the aptos aggregator table info - if aptos_coin_info.supply_aggregator_table_key.is_none() - || aptos_coin_info.supply_aggregator_table_handle.is_none() - { - return Ok(None); - } - if let Some(data) = &write_table_item.data { - // Return early if not aggregator table type - if !(data.key_type == "address" && data.value_type == "u128") { - return Ok(None); - } - // Return early if not aggregator table handle - if &write_table_item.handle.to_string() - != aptos_coin_info - .supply_aggregator_table_handle - .as_ref() - .unwrap() - { - return Ok(None); - } - - // Convert to TableItem model. Some fields are just placeholders - let (table_item_model, _) = - TableItem::from_write_table_item(write_table_item, 0, txn_version, 0); - - // Return early if not aptos coin aggregator key - let table_key = &table_item_model.decoded_key.as_str().unwrap(); - if table_key - != aptos_coin_info - .supply_aggregator_table_key - .as_ref() - .unwrap() - { - return Ok(None); - } - // Everything matches. Get the coin supply - let supply = table_item_model - .decoded_value - .as_ref() - .unwrap() - .as_str() - .unwrap() - .parse::() - .context(format!( - "cannot parse string as u128: {:?}, version {}", - table_item_model.decoded_value.as_ref(), - txn_version - ))?; - return Ok(Some(Self { - transaction_version: txn_version, - coin_type_hash: aptos_coin_info.coin_type_hash.clone(), - coin_type: aptos_coin_info.coin_type.clone(), - supply, - transaction_timestamp: txn_timestamp, - transaction_epoch: txn_epoch, - })); - } - } - Ok(None) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_utils.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_utils.rs deleted file mode 100644 index 9ee426d6e5cf4..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/coin_utils.rs +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use crate::{ - models::default_models::move_resources::MoveResource, - utils::util::{deserialize_from_string, hash_str, standardize_address, truncate_str}, -}; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::{move_type::Content, MoveType, WriteResource}; -use bigdecimal::BigDecimal; -use regex::Regex; -use serde::{Deserialize, Serialize}; -use tracing::error; - -pub const COIN_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000001"; -const COIN_TYPE_HASH_LENGTH: usize = 5000; -/** - * This file defines deserialized coin types as defined in our 0x1 contracts. - */ -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CoinInfoResource { - name: String, - symbol: String, - pub decimals: i32, - pub supply: OptionalAggregatorWrapperResource, -} - -impl CoinInfoResource { - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, 32) - } - - pub fn get_symbol_trunc(&self) -> String { - truncate_str(&self.symbol, 10) - } - - /// Getting the table item location of the supply aggregator - pub fn get_aggregator_metadata(&self) -> Option { - if let Some(inner) = self.supply.vec.get(0) { - inner.aggregator.get_aggregator_metadata() - } else { - None - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct OptionalAggregatorWrapperResource { - pub vec: Vec, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct OptionalAggregatorResource { - pub aggregator: AggregatorWrapperResource, - pub integer: IntegerWrapperResource, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct AggregatorWrapperResource { - pub vec: Vec, -} - -impl AggregatorWrapperResource { - /// In case we do want to track supply - pub fn get_aggregator_metadata(&self) -> Option { - self.vec.get(0).cloned() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct IntegerWrapperResource { - pub vec: Vec, -} - -impl IntegerWrapperResource { - /// In case we do want to track supply - pub fn get_supply(&self) -> Option { - self.vec.get(0).map(|inner| inner.value.clone()) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct AggregatorResource { - pub handle: String, - pub key: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct IntegerResource { - #[serde(deserialize_with = "deserialize_from_string")] - pub value: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CoinStoreResource { - pub coin: Coin, - pub deposit_events: DepositEventResource, - pub withdraw_events: WithdrawEventResource, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Coin { - #[serde(deserialize_with = "deserialize_from_string")] - pub value: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DepositEventResource { - pub guid: EventGuidResourceWrapper, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct WithdrawEventResource { - pub guid: EventGuidResourceWrapper, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct EventGuidResourceWrapper { - pub id: EventGuidResource, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Hash, Eq, PartialEq)] -pub struct EventGuidResource { - pub addr: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub creation_num: i64, -} - -impl EventGuidResource { - pub fn get_address(&self) -> String { - standardize_address(&self.addr) - } - - pub fn get_standardized(&self) -> Self { - Self { - addr: self.get_address(), - creation_num: self.creation_num, - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct WithdrawCoinEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DepositCoinEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, -} - -pub struct CoinInfoType { - coin_type: String, - creator_address: String, -} - -impl CoinInfoType { - /// get creator address from move_type, and get coin type from move_type_str - /// Since move_type_str will contain things we don't need, e.g. 0x1::coin::CoinInfo. We will use - /// regex to extract T. - pub fn from_move_type(move_type: &MoveType, move_type_str: &str, txn_version: i64) -> Self { - if let Content::Struct(struct_tag) = move_type.content.as_ref().unwrap() { - let re = Regex::new(r"(<(.*)>)").unwrap(); - - let matched = re.captures(move_type_str).unwrap_or_else(|| { - error!( - txn_version = txn_version, - move_type_str = move_type_str, - "move_type should look like 0x1::coin::CoinInfo" - ); - panic!(); - }); - let coin_type = matched.get(2).unwrap().as_str(); - Self { - coin_type: coin_type.to_string(), - creator_address: struct_tag.address.clone(), - } - } else { - error!(txn_version = txn_version, move_type = ?move_type, "Expected struct tag"); - panic!(); - } - } - - pub fn get_creator_address(&self) -> String { - standardize_address(&self.creator_address) - } - - pub fn to_hash(&self) -> String { - hash_str(&self.coin_type.to_string()) - } - - pub fn get_coin_type_trunc(&self) -> String { - truncate_str(&self.coin_type, COIN_TYPE_HASH_LENGTH) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum CoinResource { - CoinInfoResource(CoinInfoResource), - CoinStoreResource(CoinStoreResource), -} - -impl CoinResource { - pub fn is_resource_supported(data_type: &str) -> bool { - [ - format!("{}::coin::CoinInfo", COIN_ADDR), - format!("{}::coin::CoinStore", COIN_ADDR), - ] - .contains(&data_type.to_string()) - } - - pub fn from_resource( - data_type: &str, - data: &serde_json::Value, - txn_version: i64, - ) -> Result { - match data_type { - x if x == format!("{}::coin::CoinInfo", COIN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(CoinResource::CoinInfoResource(inner))) - }, - x if x == format!("{}::coin::CoinStore", COIN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(CoinResource::CoinStoreResource(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - ))? - .context(format!( - "Resource unsupported! Call is_resource_supported first. version {} type {}", - txn_version, data_type - )) - } - - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !CoinResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - Ok(Some(Self::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )?)) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum CoinEvent { - WithdrawCoinEvent(WithdrawCoinEvent), - DepositCoinEvent(DepositCoinEvent), -} - -impl CoinEvent { - pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { - match data_type { - "0x1::coin::WithdrawEvent" => { - serde_json::from_str(data).map(|inner| Some(CoinEvent::WithdrawCoinEvent(inner))) - }, - "0x1::coin::DepositEvent" => { - serde_json::from_str(data).map(|inner| Some(CoinEvent::DepositCoinEvent(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/mod.rs deleted file mode 100644 index 81fcbf9aa0a91..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod account_transactions; -pub mod coin_activities; -pub mod coin_balances; -pub mod coin_infos; -pub mod coin_supply; -pub mod coin_utils; -pub mod v2_fungible_asset_utils; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/v2_fungible_asset_utils.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/v2_fungible_asset_utils.rs deleted file mode 100644 index 510094b7bac5e..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/coin_models/v2_fungible_asset_utils.rs +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::coin_utils::COIN_ADDR; -use crate::{ - models::{ - default_models::move_resources::MoveResource, - token_models::{token_utils::URI_LENGTH, v2_token_utils::ResourceReference}, - }, - utils::util::{deserialize_from_string, truncate_str}, -}; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::WriteResource; -use bigdecimal::BigDecimal; -use serde::{Deserialize, Serialize}; - -const FUNGIBLE_ASSET_LENGTH: usize = 32; -const FUNGIBLE_ASSET_SYMBOL: usize = 10; - -/* Section on fungible assets resources */ -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct FungibleAssetMetadata { - name: String, - symbol: String, - pub decimals: i32, - icon_uri: String, - project_uri: String, -} - -impl FungibleAssetMetadata { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2FungibleAssetResource::FungibleAssetMetadata(inner) = - V2FungibleAssetResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } - - pub fn get_name(&self) -> String { - truncate_str(&self.name, FUNGIBLE_ASSET_LENGTH) - } - - pub fn get_symbol(&self) -> String { - truncate_str(&self.name, FUNGIBLE_ASSET_SYMBOL) - } - - pub fn get_icon_uri(&self) -> String { - truncate_str(&self.icon_uri, URI_LENGTH) - } - - pub fn get_project_uri(&self) -> String { - truncate_str(&self.project_uri, URI_LENGTH) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct FungibleAssetStore { - pub metadata: ResourceReference, - #[serde(deserialize_with = "deserialize_from_string")] - pub balance: BigDecimal, - pub frozen: bool, -} - -impl FungibleAssetStore { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2FungibleAssetResource::FungibleAssetStore(inner) = - V2FungibleAssetResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct FungibleAssetSupply { - #[serde(deserialize_with = "deserialize_from_string")] - pub current: BigDecimal, - pub maximum: OptionalBigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct OptionalBigDecimal { - vec: Vec, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -struct BigDecimalWrapper(#[serde(deserialize_with = "deserialize_from_string")] pub BigDecimal); - -impl FungibleAssetSupply { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str: String = MoveResource::get_outer_type_from_resource(write_resource); - if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2FungibleAssetResource::FungibleAssetSupply(inner) = - V2FungibleAssetResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } - - pub fn get_maximum(&self) -> Option { - self.maximum.vec.first().map(|x| x.0.clone()) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DepositEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct WithdrawEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum V2FungibleAssetResource { - FungibleAssetMetadata(FungibleAssetMetadata), - FungibleAssetStore(FungibleAssetStore), - FungibleAssetSupply(FungibleAssetSupply), -} - -impl V2FungibleAssetResource { - pub fn is_resource_supported(data_type: &str) -> bool { - [ - format!("{}::fungible_asset::Supply", COIN_ADDR), - format!("{}::fungible_asset::Metadata", COIN_ADDR), - format!("{}::fungible_asset::FungibleStore", COIN_ADDR), - ] - .contains(&data_type.to_string()) - } - - pub fn from_resource( - data_type: &str, - data: &serde_json::Value, - txn_version: i64, - ) -> Result { - match data_type { - x if x == format!("{}::fungible_asset::Supply", COIN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(Self::FungibleAssetSupply(inner))) - }, - x if x == format!("{}::fungible_asset::Metadata", COIN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(Self::FungibleAssetMetadata(inner))) - }, - x if x == format!("{}::fungible_asset::FungibleStore", COIN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(Self::FungibleAssetStore(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - ))? - .context(format!( - "Resource unsupported! Call is_resource_supported first. version {} type {}", - txn_version, data_type - )) - } -} - -pub enum FungibleAssetEvent { - DepositEvent(DepositEvent), - WithdrawEvent(WithdrawEvent), -} - -impl FungibleAssetEvent { - pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { - match data_type { - "0x1::fungible_asset::DepositEvent" => { - serde_json::from_str(data).map(|inner| Some(Self::DepositEvent(inner))) - }, - "0x1::fungible_asset::WithdrawEvent" => { - serde_json::from_str(data).map(|inner| Some(Self::WithdrawEvent(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_fungible_asset_supply_null() { - let test = r#"{"current": "0", "maximum": {"vec": []}}"#; - let test: serde_json::Value = serde_json::from_str(test).unwrap(); - let supply = serde_json::from_value(test) - .map(V2FungibleAssetResource::FungibleAssetSupply) - .unwrap(); - if let V2FungibleAssetResource::FungibleAssetSupply(supply) = supply { - assert_eq!(supply.current, BigDecimal::from(0)); - assert_eq!(supply.get_maximum(), None); - } else { - panic!("Wrong type") - } - } - - #[test] - fn test_fungible_asset_supply_nonnull() { - let test = r#"{"current": "100", "maximum": {"vec": ["5000"]}}"#; - let test: serde_json::Value = serde_json::from_str(test).unwrap(); - let supply = serde_json::from_value(test) - .map(V2FungibleAssetResource::FungibleAssetSupply) - .unwrap(); - if let V2FungibleAssetResource::FungibleAssetSupply(supply) = supply { - assert_eq!(supply.current, BigDecimal::from(100)); - assert_eq!(supply.get_maximum(), Some(BigDecimal::from(5000))); - } else { - panic!("Wrong type") - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/block_metadata_transactions.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/block_metadata_transactions.rs deleted file mode 100644 index 4446f706fe00c..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/block_metadata_transactions.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::transactions::{Transaction, TransactionQuery}; -use crate::{ - schema::block_metadata_transactions, - utils::util::{parse_timestamp, standardize_address}, -}; -use aptos_protos::{ - transaction::v1::BlockMetadataTransaction as BlockMetadataTransactionPB, - util::timestamp::Timestamp, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive( - Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, -)] -#[diesel(belongs_to(Transaction, foreign_key = version))] -#[diesel(primary_key(version))] -#[diesel(table_name = block_metadata_transactions)] -pub struct BlockMetadataTransaction { - pub version: i64, - pub block_height: i64, - pub id: String, - pub round: i64, - pub epoch: i64, - pub previous_block_votes_bitvec: serde_json::Value, - pub proposer: String, - pub failed_proposer_indices: serde_json::Value, - pub timestamp: chrono::NaiveDateTime, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive( - Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Queryable, Serialize, -)] -#[diesel(belongs_to(TransactionQuery, foreign_key = version))] -#[diesel(primary_key(version))] -#[diesel(table_name = block_metadata_transactions)] -pub struct BlockMetadataTransactionQuery { - pub version: i64, - pub block_height: i64, - pub id: String, - pub round: i64, - pub epoch: i64, - pub previous_block_votes_bitvec: serde_json::Value, - pub proposer: String, - pub failed_proposer_indices: serde_json::Value, - pub timestamp: chrono::NaiveDateTime, - pub inserted_at: chrono::NaiveDateTime, -} - -impl BlockMetadataTransaction { - pub fn from_transaction( - txn: &BlockMetadataTransactionPB, - version: i64, - block_height: i64, - epoch: i64, - timestamp: &Timestamp, - ) -> Self { - Self { - version, - block_height, - id: txn.id.to_string(), - epoch, - round: txn.round as i64, - proposer: standardize_address(txn.proposer.as_str()), - failed_proposer_indices: serde_json::to_value(&txn.failed_proposer_indices).unwrap(), - previous_block_votes_bitvec: serde_json::to_value(&txn.previous_block_votes_bitvec) - .unwrap(), - // time is in microseconds - timestamp: parse_timestamp(timestamp, version), - } - } -} - -// Prevent conflicts with other things named `Transaction` -pub type BlockMetadataTransactionModel = BlockMetadataTransaction; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/events.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/events.rs deleted file mode 100644 index 9a84ef5a42169..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/events.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use super::transactions::{Transaction, TransactionQuery}; -use crate::{schema::events, utils::util::standardize_address}; -use aptos_protos::transaction::v1::Event as EventPB; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Associations, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] -#[diesel(primary_key(account_address, creation_number, sequence_number))] -#[diesel(table_name = events)] -pub struct Event { - pub sequence_number: i64, - pub creation_number: i64, - pub account_address: String, - pub transaction_version: i64, - pub transaction_block_height: i64, - pub type_: String, - pub data: serde_json::Value, - pub event_index: Option, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive(Associations, Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(belongs_to(TransactionQuery, foreign_key = transaction_version))] -#[diesel(primary_key(account_address, creation_number, sequence_number))] -#[diesel(table_name = events)] -pub struct EventQuery { - pub sequence_number: i64, - pub creation_number: i64, - pub account_address: String, - pub transaction_version: i64, - pub transaction_block_height: i64, - pub type_: String, - pub data: serde_json::Value, - pub inserted_at: chrono::NaiveDateTime, - pub event_index: Option, -} - -impl Event { - pub fn from_event( - event: &EventPB, - transaction_version: i64, - transaction_block_height: i64, - event_index: i64, - ) -> Self { - Event { - account_address: standardize_address( - event.key.as_ref().unwrap().account_address.as_str(), - ), - creation_number: event.key.as_ref().unwrap().creation_number as i64, - sequence_number: event.sequence_number as i64, - transaction_version, - transaction_block_height, - type_: event.type_str.clone(), - data: serde_json::from_str(event.data.as_str()).unwrap(), - event_index: Some(event_index), - } - } - - pub fn from_events( - events: &[EventPB], - transaction_version: i64, - transaction_block_height: i64, - ) -> Vec { - events - .iter() - .enumerate() - .map(|(index, event)| { - Self::from_event( - event, - transaction_version, - transaction_block_height, - index as i64, - ) - }) - .collect::>() - } -} - -// Prevent conflicts with other things named `Event` -pub type EventModel = Event; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/mod.rs deleted file mode 100644 index c4cd682b69370..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod block_metadata_transactions; -pub mod events; -pub mod move_modules; -pub mod move_resources; -pub mod move_tables; -pub mod signatures; -pub mod transactions; -pub mod user_transactions; -pub mod v2_objects; -pub mod write_set_changes; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_modules.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_modules.rs deleted file mode 100644 index 95e930d704b58..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_modules.rs +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use super::transactions::Transaction; -use crate::{schema::move_modules, utils::util::standardize_address}; -use aptos_protos::transaction::v1::{ - DeleteModule, MoveModule as MoveModulePB, MoveModuleBytecode, WriteModule, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive( - Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, -)] -#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = move_modules)] -pub struct MoveModule { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub transaction_block_height: i64, - pub name: String, - pub address: String, - pub bytecode: Option>, - pub exposed_functions: Option, - pub friends: Option, - pub structs: Option, - pub is_deleted: bool, -} - -pub struct MoveModuleByteCodeParsed { - pub address: String, - pub name: String, - pub bytecode: Vec, - pub exposed_functions: serde_json::Value, - pub friends: serde_json::Value, - pub structs: serde_json::Value, -} - -impl MoveModule { - pub fn from_write_module( - write_module: &WriteModule, - write_set_change_index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> Self { - let parsed_data = Self::convert_move_module_bytecode(write_module.data.as_ref().unwrap()); - Self { - transaction_version, - transaction_block_height, - write_set_change_index, - name: parsed_data - .as_ref() - .map(|d| d.name.clone()) - .unwrap_or_default(), - address: standardize_address(&write_module.address.to_string()), - bytecode: parsed_data.as_ref().map(|d| d.bytecode.clone()), - exposed_functions: parsed_data.as_ref().map(|d| d.exposed_functions.clone()), - friends: parsed_data.as_ref().map(|d| d.friends.clone()), - structs: parsed_data.as_ref().map(|d| d.structs.clone()), - is_deleted: false, - } - } - - pub fn from_delete_module( - delete_module: &DeleteModule, - write_set_change_index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> Self { - Self { - transaction_version, - transaction_block_height, - write_set_change_index, - name: delete_module.module.as_ref().unwrap().name.to_string(), - address: standardize_address(&delete_module.address.to_string()), - bytecode: None, - exposed_functions: None, - friends: None, - structs: None, - is_deleted: true, - } - } - - pub fn convert_move_module_bytecode( - mmb: &MoveModuleBytecode, - ) -> Option { - Some(Self::convert_move_module( - mmb.abi.as_ref().unwrap(), - mmb.bytecode.clone(), - )) - } - - pub fn convert_move_module( - move_module: &MoveModulePB, - bytecode: Vec, - ) -> MoveModuleByteCodeParsed { - MoveModuleByteCodeParsed { - address: standardize_address(&move_module.address.to_string()), - name: move_module.name.clone(), - bytecode, - exposed_functions: move_module - .exposed_functions - .iter() - .map(|move_func| serde_json::to_value(move_func).unwrap()) - .collect(), - friends: move_module - .friends - .iter() - .map(|move_module_id| serde_json::to_value(move_module_id).unwrap()) - .collect(), - structs: move_module - .structs - .iter() - .map(|move_struct| serde_json::to_value(move_struct).unwrap()) - .collect(), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_resources.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_resources.rs deleted file mode 100644 index 39971fa4fb7d4..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_resources.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use super::transactions::Transaction; -use crate::{schema::move_resources, utils::util::standardize_address}; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::{ - DeleteResource, MoveStructTag as MoveStructTagPB, WriteResource, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -#[derive( - Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, -)] -#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = move_resources)] -pub struct MoveResource { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub transaction_block_height: i64, - pub name: String, - pub type_: String, - pub address: String, - pub module: String, - pub generic_type_params: Option, - pub data: Option, - pub is_deleted: bool, - pub state_key_hash: String, -} - -pub struct MoveStructTag { - address: String, - pub module: String, - pub name: String, - pub generic_type_params: Option, -} - -impl MoveResource { - pub fn from_write_resource( - write_resource: &WriteResource, - write_set_change_index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> Self { - let parsed_data = Self::convert_move_struct_tag( - write_resource - .r#type - .as_ref() - .expect("MoveStructTag Not Exists."), - ); - Self { - transaction_version, - transaction_block_height, - write_set_change_index, - type_: write_resource.type_str.clone(), - name: parsed_data.name.clone(), - address: standardize_address(&write_resource.address.to_string()), - module: parsed_data.module.clone(), - generic_type_params: parsed_data.generic_type_params, - data: Some(serde_json::from_str(write_resource.data.as_str()).unwrap()), - is_deleted: false, - state_key_hash: standardize_address( - hex::encode(write_resource.state_key_hash.as_slice()).as_str(), - ), - } - } - - pub fn from_delete_resource( - delete_resource: &DeleteResource, - write_set_change_index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> Self { - let parsed_data = Self::convert_move_struct_tag( - delete_resource - .r#type - .as_ref() - .expect("MoveStructTag Not Exists."), - ); - Self { - transaction_version, - transaction_block_height, - write_set_change_index, - type_: delete_resource.type_str.clone(), - name: parsed_data.name.clone(), - address: standardize_address(&delete_resource.address.to_string()), - module: parsed_data.module.clone(), - generic_type_params: parsed_data.generic_type_params, - data: None, - is_deleted: true, - state_key_hash: standardize_address( - hex::encode(delete_resource.state_key_hash.as_slice()).as_str(), - ), - } - } - - pub fn convert_move_struct_tag(struct_tag: &MoveStructTagPB) -> MoveStructTag { - MoveStructTag { - address: standardize_address(struct_tag.address.as_str()), - module: struct_tag.module.to_string(), - name: struct_tag.name.to_string(), - generic_type_params: struct_tag - .generic_type_params - .iter() - .map(|move_type| -> Result> { - Ok(Some( - serde_json::to_value(move_type).context("Failed to parse move type")?, - )) - }) - .collect::>>() - .unwrap_or(None), - } - } - - pub fn get_outer_type_from_resource(write_resource: &WriteResource) -> String { - let move_struct_tag = - Self::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); - - format!( - "{}::{}::{}", - move_struct_tag.get_address(), - move_struct_tag.module, - move_struct_tag.name, - ) - } -} - -impl MoveStructTag { - pub fn get_address(&self) -> String { - standardize_address(self.address.as_str()) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_tables.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_tables.rs deleted file mode 100644 index a75b65177ab46..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/move_tables.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use super::transactions::Transaction; -use crate::{ - schema::{current_table_items, table_items, table_metadatas}, - utils::util::{hash_str, standardize_address}, -}; -use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(table_handle, key_hash))] -#[diesel(table_name = current_table_items)] -pub struct CurrentTableItem { - pub table_handle: String, - pub key_hash: String, - pub key: String, - pub decoded_key: serde_json::Value, - pub decoded_value: Option, - pub last_transaction_version: i64, - pub is_deleted: bool, -} - -#[derive( - Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, -)] -#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = table_items)] -pub struct TableItem { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub transaction_block_height: i64, - pub key: String, - pub table_handle: String, - pub decoded_key: serde_json::Value, - pub decoded_value: Option, - pub is_deleted: bool, -} - -#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(handle))] -#[diesel(table_name = table_metadatas)] -pub struct TableMetadata { - pub handle: String, - pub key_type: String, - pub value_type: String, -} - -impl TableItem { - pub fn from_write_table_item( - write_table_item: &WriteTableItem, - write_set_change_index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> (Self, CurrentTableItem) { - ( - Self { - transaction_version, - write_set_change_index, - transaction_block_height, - key: write_table_item.key.to_string(), - table_handle: standardize_address(&write_table_item.handle.to_string()), - decoded_key: serde_json::from_str( - write_table_item.data.as_ref().unwrap().key.as_str(), - ) - .unwrap(), - decoded_value: serde_json::from_str( - write_table_item.data.as_ref().unwrap().value.as_str(), - ) - .unwrap(), - is_deleted: false, - }, - CurrentTableItem { - table_handle: standardize_address(&write_table_item.handle.to_string()), - key_hash: hash_str(&write_table_item.key.to_string()), - key: write_table_item.key.to_string(), - decoded_key: serde_json::from_str( - write_table_item.data.as_ref().unwrap().key.as_str(), - ) - .unwrap(), - decoded_value: serde_json::from_str( - write_table_item.data.as_ref().unwrap().value.as_str(), - ) - .unwrap(), - last_transaction_version: transaction_version, - is_deleted: false, - }, - ) - } - - pub fn from_delete_table_item( - delete_table_item: &DeleteTableItem, - write_set_change_index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> (Self, CurrentTableItem) { - ( - Self { - transaction_version, - write_set_change_index, - transaction_block_height, - key: delete_table_item.key.to_string(), - table_handle: standardize_address(&delete_table_item.handle.to_string()), - decoded_key: serde_json::from_str( - delete_table_item.data.as_ref().unwrap().key.as_str(), - ) - .unwrap(), - - decoded_value: None, - is_deleted: true, - }, - CurrentTableItem { - table_handle: standardize_address(&delete_table_item.handle.to_string()), - key_hash: hash_str(&delete_table_item.key.to_string()), - key: delete_table_item.key.to_string(), - decoded_key: serde_json::from_str( - delete_table_item.data.as_ref().unwrap().key.as_str(), - ) - .unwrap(), - decoded_value: None, - last_transaction_version: transaction_version, - is_deleted: true, - }, - ) - } -} - -impl TableMetadata { - pub fn from_write_table_item(table_item: &WriteTableItem) -> Self { - Self { - handle: table_item.handle.to_string(), - key_type: table_item.data.as_ref().unwrap().key_type.clone(), - value_type: table_item.data.as_ref().unwrap().value_type.clone(), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/signatures.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/signatures.rs deleted file mode 100644 index bfaa17e1d7b96..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/signatures.rs +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] - -use super::transactions::Transaction; -use crate::{schema::signatures, utils::util::standardize_address}; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::{ - account_signature::Signature as AccountSignatureEnum, signature::Signature as SignatureEnum, - AccountSignature as ProtoAccountSignature, Ed25519Signature as Ed25519SignaturePB, - FeePayerSignature as ProtoFeePayerSignature, MultiAgentSignature as ProtoMultiAgentSignature, - MultiEd25519Signature as ProtoMultiEd25519Signature, Signature as TransactionSignaturePB, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive( - Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, -)] -#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] -#[diesel(primary_key( - transaction_version, - multi_agent_index, - multi_sig_index, - is_sender_primary -))] -#[diesel(table_name = signatures)] -pub struct Signature { - pub transaction_version: i64, - pub multi_agent_index: i64, - pub multi_sig_index: i64, - pub transaction_block_height: i64, - pub signer: String, - pub is_sender_primary: bool, - pub type_: String, - pub public_key: String, - pub signature: String, - pub threshold: i64, - pub public_key_indices: serde_json::Value, -} - -impl Signature { - /// Returns a flattened list of signatures. If signature is a Ed25519Signature, then return a vector of 1 signature - pub fn from_user_transaction( - s: &TransactionSignaturePB, - sender: &String, - transaction_version: i64, - transaction_block_height: i64, - ) -> Result> { - match s.signature.as_ref().unwrap() { - SignatureEnum::Ed25519(sig) => Ok(vec![Self::parse_single_signature( - sig, - sender, - transaction_version, - transaction_block_height, - true, - 0, - None, - )]), - SignatureEnum::MultiEd25519(sig) => Ok(Self::parse_multi_signature( - sig, - sender, - transaction_version, - transaction_block_height, - true, - 0, - None, - )), - SignatureEnum::MultiAgent(sig) => Self::parse_multi_agent_signature( - sig, - sender, - transaction_version, - transaction_block_height, - ), - SignatureEnum::FeePayer(sig) => Self::parse_fee_payer_signature( - sig, - sender, - transaction_version, - transaction_block_height, - ), - } - } - - pub fn get_signature_type(t: &TransactionSignaturePB) -> String { - match t.signature.as_ref().unwrap() { - SignatureEnum::Ed25519(_) => String::from("ed25519_signature"), - SignatureEnum::MultiEd25519(_) => String::from("multi_ed25519_signature"), - SignatureEnum::MultiAgent(_) => String::from("multi_agent_signature"), - SignatureEnum::FeePayer(_) => String::from("fee_payer_signature"), - } - } - - fn parse_single_signature( - s: &Ed25519SignaturePB, - sender: &String, - transaction_version: i64, - transaction_block_height: i64, - is_sender_primary: bool, - multi_agent_index: i64, - override_address: Option<&String>, - ) -> Self { - let signer = standardize_address(override_address.unwrap_or(sender)); - Self { - transaction_version, - transaction_block_height, - signer, - is_sender_primary, - type_: String::from("ed25519_signature"), - public_key: format!("0x{}", hex::encode(s.public_key.as_slice())), - threshold: 1, - public_key_indices: serde_json::Value::Array(vec![]), - signature: format!("0x{}", hex::encode(s.signature.as_slice())), - multi_agent_index, - multi_sig_index: 0, - } - } - - fn parse_multi_signature( - s: &ProtoMultiEd25519Signature, - sender: &String, - transaction_version: i64, - transaction_block_height: i64, - is_sender_primary: bool, - multi_agent_index: i64, - override_address: Option<&String>, - ) -> Vec { - let mut signatures = Vec::default(); - let signer = standardize_address(override_address.unwrap_or(sender)); - - let public_key_indices: Vec = s - .public_key_indices - .iter() - .map(|index| *index as usize) - .collect(); - for (index, signature) in s.signatures.iter().enumerate() { - let public_key = s - .public_keys - .get(public_key_indices.clone()[index]) - .unwrap() - .clone(); - signatures.push(Self { - transaction_version, - transaction_block_height, - signer: signer.clone(), - is_sender_primary, - type_: String::from("multi_ed25519_signature"), - public_key: format!("0x{}", hex::encode(public_key.as_slice())), - threshold: s.threshold as i64, - signature: format!("0x{}", hex::encode(signature.as_slice())), - public_key_indices: serde_json::Value::Array( - public_key_indices - .iter() - .map(|index| { - serde_json::Value::Number(serde_json::Number::from(*index as i64)) - }) - .collect(), - ), - multi_agent_index, - multi_sig_index: index as i64, - }); - } - signatures - } - - fn parse_multi_agent_signature( - s: &ProtoMultiAgentSignature, - sender: &String, - transaction_version: i64, - transaction_block_height: i64, - ) -> Result> { - let mut signatures = Vec::default(); - // process sender signature - signatures.append(&mut Self::parse_multi_agent_signature_helper( - s.sender.as_ref().unwrap(), - sender, - transaction_version, - transaction_block_height, - true, - 0, - None, - )); - for (index, address) in s.secondary_signer_addresses.iter().enumerate() { - let secondary_sig = s.secondary_signers.get(index).context(format!( - "Failed to parse index {} for multi agent secondary signers", - index - ))?; - signatures.append(&mut Self::parse_multi_agent_signature_helper( - secondary_sig, - sender, - transaction_version, - transaction_block_height, - false, - index as i64, - Some(&address.to_string()), - )); - } - Ok(signatures) - } - - fn parse_fee_payer_signature( - s: &ProtoFeePayerSignature, - sender: &String, - transaction_version: i64, - transaction_block_height: i64, - ) -> Result> { - let mut signatures = Vec::default(); - // process sender signature - signatures.append(&mut Self::parse_multi_agent_signature_helper( - s.sender.as_ref().unwrap(), - sender, - transaction_version, - transaction_block_height, - true, - 0, - None, - )); - for (index, address) in s.secondary_signer_addresses.iter().enumerate() { - let secondary_sig = s.secondary_signers.get(index).context(format!( - "Failed to parse index {} for multi agent secondary signers", - index - ))?; - signatures.append(&mut Self::parse_multi_agent_signature_helper( - secondary_sig, - sender, - transaction_version, - transaction_block_height, - false, - index as i64, - Some(&address.to_string()), - )); - } - signatures.append(&mut Self::parse_multi_agent_signature_helper( - s.fee_payer_signer.as_ref().unwrap(), - sender, - transaction_version, - transaction_block_height, - true, - (s.secondary_signer_addresses.len() + 1) as i64, - Some(&s.fee_payer_address.to_string()), - )); - Ok(signatures) - } - - fn parse_multi_agent_signature_helper( - s: &ProtoAccountSignature, - sender: &String, - transaction_version: i64, - transaction_block_height: i64, - is_sender_primary: bool, - multi_agent_index: i64, - override_address: Option<&String>, - ) -> Vec { - let signature = s.signature.as_ref().unwrap(); - match signature { - AccountSignatureEnum::Ed25519(sig) => vec![Self::parse_single_signature( - sig, - sender, - transaction_version, - transaction_block_height, - is_sender_primary, - multi_agent_index, - override_address, - )], - AccountSignatureEnum::MultiEd25519(sig) => Self::parse_multi_signature( - sig, - sender, - transaction_version, - transaction_block_height, - is_sender_primary, - multi_agent_index, - override_address, - ), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/transactions.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/transactions.rs deleted file mode 100644 index bb975a8ab55ca..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/transactions.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - block_metadata_transactions::{BlockMetadataTransaction, BlockMetadataTransactionQuery}, - events::{EventModel, EventQuery}, - signatures::Signature, - user_transactions::{UserTransaction, UserTransactionQuery}, - write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel, WriteSetChangeQuery}, -}; -use crate::{ - schema::{block_metadata_transactions, transactions, user_transactions}, - utils::{ - database::PgPoolConnection, - util::{get_clean_payload, get_clean_writeset, standardize_address, u64_to_bigdecimal}, - }, -}; -use aptos_protos::transaction::v1::{ - transaction::{TransactionType, TxnData}, - Transaction as TransactionPB, TransactionInfo, -}; -use bigdecimal::BigDecimal; -use diesel::{ - BelongingToDsl, ExpressionMethods, GroupedBy, OptionalExtension, QueryDsl, RunQueryDsl, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(version))] -#[diesel(table_name = transactions)] -pub struct Transaction { - pub version: i64, - pub block_height: i64, - pub hash: String, - pub type_: String, - pub payload: Option, - pub state_change_hash: String, - pub event_root_hash: String, - pub state_checkpoint_hash: Option, - pub gas_used: BigDecimal, - pub success: bool, - pub vm_status: String, - pub accumulator_root_hash: String, - pub num_events: i64, - pub num_write_set_changes: i64, - pub epoch: i64, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(primary_key(version))] -#[diesel(table_name = transactions)] -pub struct TransactionQuery { - pub version: i64, - pub block_height: i64, - pub hash: String, - pub type_: String, - pub payload: Option, - pub state_change_hash: String, - pub event_root_hash: String, - pub state_checkpoint_hash: Option, - pub gas_used: BigDecimal, - pub success: bool, - pub vm_status: String, - pub accumulator_root_hash: String, - pub num_events: i64, - pub num_write_set_changes: i64, - pub inserted_at: chrono::NaiveDateTime, - pub epoch: i64, -} - -impl Transaction { - fn from_transaction_info( - info: &TransactionInfo, - payload: Option, - version: i64, - type_: String, - num_events: i64, - block_height: i64, - epoch: i64, - ) -> Self { - Self { - type_, - payload, - version, - block_height, - hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), - state_change_hash: standardize_address( - hex::encode(info.state_change_hash.as_slice()).as_str(), - ), - event_root_hash: standardize_address( - hex::encode(info.event_root_hash.as_slice()).as_str(), - ), - state_checkpoint_hash: info - .state_checkpoint_hash - .as_ref() - .map(|hash| standardize_address(hex::encode(hash).as_str())), - gas_used: u64_to_bigdecimal(info.gas_used), - success: info.success, - vm_status: info.vm_status.clone(), - accumulator_root_hash: standardize_address( - hex::encode(info.accumulator_root_hash.as_slice()).as_str(), - ), - num_events, - num_write_set_changes: info.changes.len() as i64, - epoch, - } - } - - pub fn from_transaction( - transaction: &TransactionPB, - ) -> ( - Self, - Option, - Vec, - Vec, - Vec, - ) { - let block_height = transaction.block_height as i64; - let epoch = transaction.epoch as i64; - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - let version = transaction.version as i64; - let transaction_type = TransactionType::from_i32(transaction.r#type) - .expect("Transaction type doesn't exist!") - .as_str_name() - .to_string(); - let transaction_info = transaction - .info - .as_ref() - .expect("Transaction info doesn't exist!"); - let timestamp = transaction - .timestamp - .as_ref() - .expect("Transaction timestamp doesn't exist!"); - match txn_data { - TxnData::User(user_txn) => { - let (user_txn_output, signatures) = UserTransaction::from_transaction( - user_txn, - timestamp, - block_height, - epoch, - version, - ); - - let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( - &transaction_info.changes, - version, - block_height, - ); - let payload = user_txn - .request - .as_ref() - .expect("Getting user request failed.") - .payload - .as_ref() - .expect("Getting payload failed."); - let payload_cleaned = get_clean_payload(payload, version); - - ( - Self::from_transaction_info( - transaction_info, - payload_cleaned, - version, - transaction_type, - user_txn.events.len() as i64, - block_height, - epoch, - ), - Some(TransactionDetail::User(user_txn_output, signatures)), - EventModel::from_events(&user_txn.events, version, block_height), - wsc, - wsc_detail, - ) - }, - TxnData::Genesis(genesis_txn) => { - let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( - &transaction_info.changes, - version, - block_height, - ); - let payload = genesis_txn.payload.as_ref().unwrap(); - let payload_cleaned = get_clean_writeset(payload, version); - ( - Self::from_transaction_info( - transaction_info, - payload_cleaned, - version, - transaction_type, - genesis_txn.events.len() as i64, - block_height, - epoch, - ), - None, - EventModel::from_events(&genesis_txn.events, version, block_height), - wsc, - wsc_detail, - ) - }, - TxnData::BlockMetadata(block_metadata_txn) => { - let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( - &transaction_info.changes, - version, - block_height, - ); - ( - Self::from_transaction_info( - transaction_info, - None, - version, - transaction_type, - block_metadata_txn.events.len() as i64, - block_height, - epoch, - ), - Some(TransactionDetail::BlockMetadata( - BlockMetadataTransaction::from_transaction( - block_metadata_txn, - version, - block_height, - epoch, - timestamp, - ), - )), - EventModel::from_events(&block_metadata_txn.events, version, block_height), - wsc, - wsc_detail, - ) - }, - TxnData::StateCheckpoint(_state_checkpoint_txn) => ( - Self::from_transaction_info( - transaction_info, - None, - version, - transaction_type, - 0, - block_height, - epoch, - ), - None, - vec![], - vec![], - vec![], - ), - } - } - - pub fn from_transactions( - transactions: &[TransactionPB], - ) -> ( - Vec, - Vec, - Vec, - Vec, - Vec, - ) { - let mut txns = vec![]; - let mut txn_details = vec![]; - let mut events = vec![]; - let mut wscs = vec![]; - let mut wsc_details = vec![]; - - for txn in transactions { - let (txn, txn_detail, mut event_list, mut wsc_list, mut wsc_detail_list) = - Self::from_transaction(txn); - txns.push(txn); - if let Some(a) = txn_detail { - txn_details.push(a); - } - events.append(&mut event_list); - wscs.append(&mut wsc_list); - wsc_details.append(&mut wsc_detail_list); - } - (txns, txn_details, events, wscs, wsc_details) - } -} - -impl TransactionQuery { - pub fn get_many_by_version( - start_version: u64, - number_to_get: i64, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult< - Vec<( - Self, - Option, - Option, - Vec, - Vec, - )>, - > { - let mut txs = transactions::table - .filter(transactions::version.ge(start_version as i64)) - .order(transactions::version.asc()) - .limit(number_to_get) - .load::(conn)?; - - let mut user_transactions: Vec> = - UserTransactionQuery::belonging_to(&txs) - .load::(conn)? - .grouped_by(&txs); - - let mut block_metadata_transactions: Vec> = - BlockMetadataTransactionQuery::belonging_to(&txs) - .load::(conn)? - .grouped_by(&txs); - - let mut events: Vec> = EventQuery::belonging_to(&txs) - .load::(conn)? - .grouped_by(&txs); - - let mut write_set_changes: Vec> = - WriteSetChangeQuery::belonging_to(&txs) - .load::(conn)? - .grouped_by(&txs); - - // Convert to the nice result tuple - let mut result = vec![]; - while !txs.is_empty() { - result.push(( - txs.pop().unwrap(), - user_transactions.pop().unwrap().pop(), - block_metadata_transactions.pop().unwrap().pop(), - events.pop().unwrap(), - write_set_changes.pop().unwrap(), - )) - } - - Ok(result) - } - - pub fn get_by_version( - version: u64, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult<( - Self, - Option, - Option, - Vec, - Vec, - )> { - let transaction = transactions::table - .filter(transactions::version.eq(version as i64)) - .first::(conn)?; - - let (user_transaction, block_metadata_transaction, events, write_set_changes) = - transaction.get_details_for_transaction(conn)?; - - Ok(( - transaction, - user_transaction, - block_metadata_transaction, - events, - write_set_changes, - )) - } - - pub fn get_by_hash( - transaction_hash: &str, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult<( - Self, - Option, - Option, - Vec, - Vec, - )> { - let transaction = transactions::table - .filter(transactions::hash.eq(&transaction_hash)) - .first::(conn)?; - - let (user_transaction, block_metadata_transaction, events, write_set_changes) = - transaction.get_details_for_transaction(conn)?; - - Ok(( - transaction, - user_transaction, - block_metadata_transaction, - events, - write_set_changes, - )) - } - - fn get_details_for_transaction( - &self, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult<( - Option, - Option, - Vec, - Vec, - )> { - let mut user_transaction: Option = None; - let mut block_metadata_transaction: Option = None; - - let events = crate::schema::events::table - .filter(crate::schema::events::transaction_version.eq(&self.version)) - .load::(conn)?; - - let write_set_changes = crate::schema::write_set_changes::table - .filter(crate::schema::write_set_changes::transaction_version.eq(&self.version)) - .load::(conn)?; - - match self.type_.as_str() { - "user_transaction" => { - user_transaction = user_transactions::table - .filter(user_transactions::version.eq(&self.version)) - .first::(conn) - .optional()?; - }, - "block_metadata_transaction" => { - block_metadata_transaction = block_metadata_transactions::table - .filter(block_metadata_transactions::version.eq(&self.version)) - .first::(conn) - .optional()?; - }, - "genesis_transaction" => {}, - "state_checkpoint_transaction" => {}, - _ => unreachable!("Unknown transaction type: {}", &self.type_), - }; - Ok(( - user_transaction, - block_metadata_transaction, - events, - write_set_changes, - )) - } -} - -#[derive(Deserialize, Serialize)] -pub enum TransactionDetail { - User(UserTransaction, Vec), - BlockMetadata(BlockMetadataTransaction), -} - -// Prevent conflicts with other things named `Transaction` -pub type TransactionModel = Transaction; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/user_transactions.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/user_transactions.rs deleted file mode 100644 index ac6c5e2033da6..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/user_transactions.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright © Aptos Foundation - -// Copyright (c) Aptos -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - signatures::Signature, - transactions::{Transaction, TransactionQuery}, -}; -use crate::{ - schema::user_transactions, - utils::util::{ - get_entry_function_from_user_request, parse_timestamp, standardize_address, - u64_to_bigdecimal, - }, -}; -use aptos_protos::{ - transaction::v1::{UserTransaction as UserTransactionPB, UserTransactionRequest}, - util::timestamp::Timestamp, -}; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive( - Associations, Clone, Deserialize, Debug, FieldCount, Identifiable, Insertable, Serialize, -)] -#[diesel(belongs_to(Transaction, foreign_key = version))] -#[diesel(primary_key(version))] -#[diesel(table_name = user_transactions)] -pub struct UserTransaction { - pub version: i64, - pub block_height: i64, - pub parent_signature_type: String, - pub sender: String, - pub sequence_number: i64, - pub max_gas_amount: BigDecimal, - pub expiration_timestamp_secs: chrono::NaiveDateTime, - pub gas_unit_price: BigDecimal, - pub timestamp: chrono::NaiveDateTime, - pub entry_function_id_str: String, - pub epoch: i64, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive(Associations, Clone, Deserialize, Debug, Identifiable, Queryable, Serialize)] -#[diesel(belongs_to(TransactionQuery, foreign_key = version))] -#[diesel(primary_key(version))] -#[diesel(table_name = user_transactions)] -pub struct UserTransactionQuery { - pub version: i64, - pub block_height: i64, - pub parent_signature_type: String, - pub sender: String, - pub sequence_number: i64, - pub max_gas_amount: BigDecimal, - pub expiration_timestamp_secs: chrono::NaiveDateTime, - pub gas_unit_price: BigDecimal, - pub timestamp: chrono::NaiveDateTime, - pub entry_function_id_str: String, - pub inserted_at: chrono::NaiveDateTime, - pub epoch: i64, -} - -impl UserTransaction { - pub fn from_transaction( - txn: &UserTransactionPB, - timestamp: &Timestamp, - block_height: i64, - epoch: i64, - version: i64, - ) -> (Self, Vec) { - let user_request = txn - .request - .as_ref() - .expect("Sends is not present in user txn"); - ( - Self { - version, - block_height, - parent_signature_type: txn - .request - .as_ref() - .unwrap() - .signature - .as_ref() - .map(Signature::get_signature_type) - .unwrap_or_default(), - sender: standardize_address(&user_request.sender), - sequence_number: user_request.sequence_number as i64, - max_gas_amount: u64_to_bigdecimal(user_request.max_gas_amount), - expiration_timestamp_secs: parse_timestamp( - user_request - .expiration_timestamp_secs - .as_ref() - .expect("Expiration timestamp is not present in user txn"), - version, - ), - gas_unit_price: u64_to_bigdecimal(user_request.gas_unit_price), - timestamp: parse_timestamp(timestamp, version), - entry_function_id_str: get_entry_function_from_user_request(user_request) - .unwrap_or_default(), - epoch, - }, - Self::get_signatures(user_request, version, block_height), - ) - } - - /// Empty vec if signature is None - pub fn get_signatures( - user_request: &UserTransactionRequest, - version: i64, - block_height: i64, - ) -> Vec { - user_request - .signature - .as_ref() - .map(|s| { - Signature::from_user_transaction(s, &user_request.sender, version, block_height) - .unwrap() - }) - .unwrap_or_default() - } -} - -// Prevent conflicts with other things named `Transaction` -pub type UserTransactionModel = UserTransaction; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/v2_objects.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/v2_objects.rs deleted file mode 100644 index 404e8a14e524c..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/v2_objects.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::move_resources::MoveResource; -use crate::{ - models::token_models::{ - collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - v2_token_utils::ObjectWithMetadata, - }, - schema::{current_objects, objects}, - utils::database::PgPoolConnection, -}; -use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; -use bigdecimal::BigDecimal; -use diesel::prelude::*; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -// PK of current_objects, i.e. object_address -pub type CurrentObjectPK = String; - -#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = objects)] -pub struct Object { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub object_address: String, - pub owner_address: String, - pub state_key_hash: String, - pub guid_creation_num: BigDecimal, - pub allow_ungated_transfer: bool, - pub is_deleted: bool, -} - -#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(object_address))] -#[diesel(table_name = current_objects)] -pub struct CurrentObject { - pub object_address: String, - pub owner_address: String, - pub state_key_hash: String, - pub allow_ungated_transfer: bool, - pub last_guid_creation_num: BigDecimal, - pub last_transaction_version: i64, - pub is_deleted: bool, -} - -#[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(primary_key(object_address))] -#[diesel(table_name = current_objects)] -pub struct CurrentObjectQuery { - pub object_address: String, - pub owner_address: String, - pub state_key_hash: String, - pub allow_ungated_transfer: bool, - pub last_guid_creation_num: BigDecimal, - pub last_transaction_version: i64, - pub is_deleted: bool, - pub inserted_at: chrono::NaiveDateTime, -} - -impl Object { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - write_set_change_index: i64, - ) -> anyhow::Result> { - if let Some(inner) = ObjectWithMetadata::from_write_resource(write_resource, txn_version)? { - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - let object_core = &inner.object_core; - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - object_address: resource.address.clone(), - owner_address: object_core.get_owner_address(), - state_key_hash: resource.state_key_hash.clone(), - guid_creation_num: object_core.guid_creation_num.clone(), - allow_ungated_transfer: object_core.allow_ungated_transfer, - is_deleted: false, - }, - CurrentObject { - object_address: resource.address, - owner_address: object_core.get_owner_address(), - state_key_hash: resource.state_key_hash, - allow_ungated_transfer: object_core.allow_ungated_transfer, - last_guid_creation_num: object_core.guid_creation_num.clone(), - last_transaction_version: txn_version, - is_deleted: false, - }, - ))) - } else { - Ok(None) - } - } - - /// This handles the case where the entire object is deleted - /// TODO: We need to detect if an object is only partially deleted - /// using KV store - pub fn from_delete_resource( - delete_resource: &DeleteResource, - txn_version: i64, - write_set_change_index: i64, - object_mapping: &HashMap, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - if delete_resource.type_str == "0x1::object::ObjectGroup" { - let resource = MoveResource::from_delete_resource( - delete_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - let previous_object = if let Some(object) = object_mapping.get(&resource.address) { - object.clone() - } else { - match Self::get_object_owner(conn, &resource.address) { - Ok(owner) => owner, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = &resource.address, - "Missing object owner for object. You probably should backfill db.", - ); - return Ok(None); - }, - } - }; - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - object_address: resource.address.clone(), - owner_address: previous_object.owner_address.clone(), - state_key_hash: resource.state_key_hash.clone(), - guid_creation_num: previous_object.last_guid_creation_num.clone(), - allow_ungated_transfer: previous_object.allow_ungated_transfer, - is_deleted: true, - }, - CurrentObject { - object_address: resource.address, - owner_address: previous_object.owner_address.clone(), - state_key_hash: resource.state_key_hash, - last_guid_creation_num: previous_object.last_guid_creation_num.clone(), - allow_ungated_transfer: previous_object.allow_ungated_transfer, - last_transaction_version: txn_version, - is_deleted: true, - }, - ))) - } else { - Ok(None) - } - } - - /// This is actually not great because object owner can change. The best we can do now though - fn get_object_owner( - conn: &mut PgPoolConnection, - object_address: &str, - ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; - match CurrentObjectQuery::get_by_address(object_address, conn) { - Ok(res) => { - return Ok(CurrentObject { - object_address: res.object_address, - owner_address: res.owner_address, - state_key_hash: res.state_key_hash, - allow_ungated_transfer: res.allow_ungated_transfer, - last_guid_creation_num: res.last_guid_creation_num, - last_transaction_version: res.last_transaction_version, - is_deleted: res.is_deleted, - }) - }, - Err(_) => { - std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); - }, - } - } - Err(anyhow::anyhow!("Failed to get object owner")) - } -} - -impl CurrentObjectQuery { - /// TODO: Change this to a KV store - pub fn get_by_address( - object_address: &str, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult { - current_objects::table - .filter(current_objects::object_address.eq(object_address)) - .first::(conn) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/write_set_changes.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/write_set_changes.rs deleted file mode 100644 index 3a5d93003a295..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/default_models/write_set_changes.rs +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use super::{ - move_modules::MoveModule, - move_resources::MoveResource, - move_tables::{CurrentTableItem, TableItem, TableMetadata}, - transactions::{Transaction, TransactionQuery}, -}; -use crate::{schema::write_set_changes, utils::util::standardize_address}; -use aptos_protos::transaction::v1::{ - write_set_change::{Change as WriteSetChangeEnum, Type as WriteSetChangeTypeEnum}, - WriteSetChange as WriteSetChangePB, -}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Associations, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] -#[diesel(primary_key(transaction_version, index))] -#[diesel(table_name = write_set_changes)] -pub struct WriteSetChange { - pub transaction_version: i64, - pub index: i64, - pub hash: String, - transaction_block_height: i64, - pub type_: String, - pub address: String, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive(Associations, Debug, Deserialize, Identifiable, Queryable, Serialize)] -#[diesel(belongs_to(TransactionQuery, foreign_key = transaction_version))] -#[diesel(primary_key(transaction_version, index))] -#[diesel(table_name = write_set_changes)] -pub struct WriteSetChangeQuery { - pub transaction_version: i64, - pub index: i64, - pub hash: String, - transaction_block_height: i64, - pub type_: String, - pub address: String, - pub inserted_at: chrono::NaiveDateTime, -} - -impl WriteSetChange { - pub fn from_write_set_change( - write_set_change: &WriteSetChangePB, - index: i64, - transaction_version: i64, - transaction_block_height: i64, - ) -> (Self, WriteSetChangeDetail) { - let type_ = Self::get_write_set_change_type(write_set_change); - let change = write_set_change - .change - .as_ref() - .expect("WriteSetChange must have a change"); - match change { - WriteSetChangeEnum::WriteModule(inner) => ( - Self { - transaction_version, - hash: standardize_address( - hex::encode(inner.state_key_hash.as_slice()).as_str(), - ), - transaction_block_height, - type_, - address: standardize_address(&inner.address.to_string()), - index, - }, - WriteSetChangeDetail::Module(MoveModule::from_write_module( - inner, - index, - transaction_version, - transaction_block_height, - )), - ), - WriteSetChangeEnum::DeleteModule(inner) => ( - Self { - transaction_version, - hash: standardize_address( - hex::encode(inner.state_key_hash.as_slice()).as_str(), - ), - transaction_block_height, - type_, - address: standardize_address(&inner.address.to_string()), - index, - }, - WriteSetChangeDetail::Module(MoveModule::from_delete_module( - inner, - index, - transaction_version, - transaction_block_height, - )), - ), - WriteSetChangeEnum::WriteResource(inner) => ( - Self { - transaction_version, - hash: standardize_address( - hex::encode(inner.state_key_hash.as_slice()).as_str(), - ), - transaction_block_height, - type_, - address: standardize_address(&inner.address.to_string()), - index, - }, - WriteSetChangeDetail::Resource(MoveResource::from_write_resource( - inner, - index, - transaction_version, - transaction_block_height, - )), - ), - WriteSetChangeEnum::DeleteResource(inner) => ( - Self { - transaction_version, - hash: standardize_address( - hex::encode(inner.state_key_hash.as_slice()).as_str(), - ), - transaction_block_height, - type_, - address: standardize_address(&inner.address.to_string()), - index, - }, - WriteSetChangeDetail::Resource(MoveResource::from_delete_resource( - inner, - index, - transaction_version, - transaction_block_height, - )), - ), - WriteSetChangeEnum::WriteTableItem(inner) => { - let (ti, cti) = TableItem::from_write_table_item( - inner, - index, - transaction_version, - transaction_block_height, - ); - ( - Self { - transaction_version, - hash: standardize_address( - hex::encode(inner.state_key_hash.as_slice()).as_str(), - ), - transaction_block_height, - type_, - address: String::default(), - index, - }, - WriteSetChangeDetail::Table( - ti, - cti, - Some(TableMetadata::from_write_table_item(inner)), - ), - ) - }, - WriteSetChangeEnum::DeleteTableItem(inner) => { - let (ti, cti) = TableItem::from_delete_table_item( - inner, - index, - transaction_version, - transaction_block_height, - ); - ( - Self { - transaction_version, - hash: standardize_address( - hex::encode(inner.state_key_hash.as_slice()).as_str(), - ), - transaction_block_height, - type_, - address: String::default(), - index, - }, - WriteSetChangeDetail::Table(ti, cti, None), - ) - }, - } - } - - pub fn from_write_set_changes( - write_set_changes: &[WriteSetChangePB], - transaction_version: i64, - transaction_block_height: i64, - ) -> (Vec, Vec) { - write_set_changes - .iter() - .enumerate() - .map(|(index, write_set_change)| { - Self::from_write_set_change( - write_set_change, - index as i64, - transaction_version, - transaction_block_height, - ) - }) - .collect::>() - .into_iter() - .unzip() - } - - fn get_write_set_change_type(t: &WriteSetChangePB) -> String { - match WriteSetChangeTypeEnum::from_i32(t.r#type) - .expect("WriteSetChange must have a valid type.") - { - WriteSetChangeTypeEnum::DeleteModule => "delete_module".to_string(), - WriteSetChangeTypeEnum::DeleteResource => "delete_resource".to_string(), - WriteSetChangeTypeEnum::DeleteTableItem => "delete_table_item".to_string(), - WriteSetChangeTypeEnum::WriteModule => "write_module".to_string(), - WriteSetChangeTypeEnum::WriteResource => "write_resource".to_string(), - WriteSetChangeTypeEnum::WriteTableItem => "write_table_item".to_string(), - WriteSetChangeTypeEnum::Unspecified => { - panic!("WriteSetChange type must be specified.") - }, - } - } -} - -#[derive(Deserialize, Serialize)] -pub enum WriteSetChangeDetail { - Module(MoveModule), - Resource(MoveResource), - Table(TableItem, CurrentTableItem, Option), -} - -// Prevent conflicts with other things named `WriteSetChange` -pub type WriteSetChangeModel = WriteSetChange; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/ledger_info.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/ledger_info.rs deleted file mode 100644 index 3e8510e034883..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/ledger_info.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use crate::{schema::ledger_infos, utils::database::PgPoolConnection}; -use diesel::{OptionalExtension, QueryDsl, RunQueryDsl}; - -#[derive(Debug, Identifiable, Insertable, Queryable)] -#[diesel(table_name = ledger_infos)] -#[diesel(primary_key(chain_id))] -pub struct LedgerInfo { - pub chain_id: i64, -} - -impl LedgerInfo { - pub fn get(conn: &mut PgPoolConnection) -> diesel::QueryResult> { - ledger_infos::table - .select(ledger_infos::all_columns) - .first::(conn) - .optional() - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/mod.rs deleted file mode 100644 index 24f8152e86c99..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod coin_models; -pub mod default_models; -pub mod ledger_info; -pub mod processor_status; -pub mod property_map; -pub mod stake_models; -pub mod token_models; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/processor_status.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/processor_status.rs deleted file mode 100644 index 2a836e3f4e297..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/processor_status.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![allow(clippy::extra_unused_lifetimes)] -use crate::{schema::processor_status, utils::database::PgPoolConnection}; -use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl}; - -#[derive(AsChangeset, Debug, Insertable)] -#[diesel(table_name = processor_status)] -/// Only tracking the latest version successfully processed -pub struct ProcessorStatus { - pub processor: String, - pub last_success_version: i64, -} - -#[derive(AsChangeset, Debug, Queryable)] -#[diesel(table_name = processor_status)] -/// Only tracking the latest version successfully processed -pub struct ProcessorStatusQuery { - pub processor: String, - pub last_success_version: i64, - pub last_updated: chrono::NaiveDateTime, -} - -impl ProcessorStatusQuery { - pub fn get_by_processor( - processor_name: &String, - conn: &mut PgPoolConnection, - ) -> diesel::QueryResult> { - processor_status::table - .filter(processor_status::processor.eq(processor_name)) - .first::(conn) - .optional() - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/property_map.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/property_map.rs deleted file mode 100644 index ddec272ec1e7d..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/property_map.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::utils::util; -use serde::{Deserialize, Serialize}; -use serde_json::{Result, Value}; -use std::collections::HashMap; - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct PropertyValue { - value: String, - typ: String, -} - -pub fn create_property_value(typ: String, value: String) -> Result { - Ok(PropertyValue { - value: util::convert_bcs_hex(typ.clone(), value.clone()).unwrap_or(value), - typ, - }) -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct PropertyMap { - data: HashMap, -} - -impl PropertyMap { - /// Deserializes PropertyValue from bcs encoded json - pub fn from_bcs_encode_str(val: Value) -> Option { - let mut pm = PropertyMap { - data: HashMap::new(), - }; - let records: &Vec = val.get("map")?.get("data")?.as_array()?; - for entry in records { - let key = entry.get("key")?.as_str()?; - let val = entry.get("value")?.get("value")?.as_str()?; - let typ = entry.get("value")?.get("type")?.as_str()?; - let pv = create_property_value(typ.to_string(), val.to_string()).ok()?; - pm.data.insert(key.to_string(), pv); - } - Some(Self::to_flat_json(pm)) - } - - /// Flattens PropertyMap which can't be easily consumable by downstream. - /// For example: Object {"data": Object {"creation_time_sec": Object {"value": String("1666125588")}}} - /// becomes Object {"creation_time_sec": "1666125588"} - fn to_flat_json(val: PropertyMap) -> Value { - let mut map = HashMap::new(); - for (k, v) in val.data { - map.insert(k, v.value); - } - serde_json::to_value(map).unwrap() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenObjectPropertyValue { - value: String, - typ: u8, -} - -pub fn create_token_object_property_value( - typ: u8, - value: String, -) -> Result { - Ok(TokenObjectPropertyValue { - value: util::convert_bcs_hex_new(typ, value.clone()).unwrap_or(value), - typ, - }) -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenObjectPropertyMap { - data: HashMap, -} - -impl TokenObjectPropertyMap { - /// Deserializes PropertyValue from bcs encoded json - pub fn from_bcs_encode_str(val: Value) -> Option { - let mut pm = TokenObjectPropertyMap { - data: HashMap::new(), - }; - let records: &Vec = val.get("data")?.as_array()?; - for entry in records { - let key = entry.get("key")?.as_str()?; - let val = entry.get("value")?.get("value")?.as_str()?; - let typ = entry.get("value")?.get("type")?.as_u64()?; - let pv = create_token_object_property_value(typ as u8, val.to_string()).ok()?; - pm.data.insert(key.to_string(), pv); - } - Some(Self::to_flat_json_new(pm)) - } - - /// Flattens PropertyMap which can't be easily consumable by downstream. - /// For example: Object {"data": Object {"creation_time_sec": Object {"value": String("1666125588")}}} - /// becomes Object {"creation_time_sec": "1666125588"} - fn to_flat_json_new(val: TokenObjectPropertyMap) -> Value { - let mut map = HashMap::new(); - for (k, v) in val.data { - map.insert(k, v.value); - } - serde_json::to_value(map).unwrap() - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_activities.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_activities.rs deleted file mode 100644 index 74420a5ac7969..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_activities.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright © Aptos Foundation - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::stake_utils::StakeEvent; -use crate::{ - schema::delegated_staking_activities, - utils::util::{standardize_address, u64_to_bigdecimal}, -}; -use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, event_index))] -#[diesel(table_name = delegated_staking_activities)] -pub struct DelegatedStakingActivity { - pub transaction_version: i64, - pub event_index: i64, - pub delegator_address: String, - pub pool_address: String, - pub event_type: String, - pub amount: BigDecimal, -} - -impl DelegatedStakingActivity { - /// Pretty straightforward parsing from known delegated staking events - pub fn from_transaction(transaction: &Transaction) -> anyhow::Result> { - let mut delegator_activities = vec![]; - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - let txn_version = transaction.version as i64; - let events = match txn_data { - TxnData::User(txn) => &txn.events, - TxnData::BlockMetadata(txn) => &txn.events, - _ => return Ok(delegator_activities), - }; - for (index, event) in events.iter().enumerate() { - let event_index = index as i64; - if let Some(staking_event) = - StakeEvent::from_event(event.type_str.as_str(), &event.data, txn_version)? - { - let activity = match staking_event { - StakeEvent::AddStakeEvent(inner) => DelegatedStakingActivity { - transaction_version: txn_version, - event_index, - delegator_address: standardize_address(&inner.delegator_address), - pool_address: standardize_address(&inner.pool_address), - event_type: event.type_str.clone(), - amount: u64_to_bigdecimal(inner.amount_added), - }, - StakeEvent::UnlockStakeEvent(inner) => DelegatedStakingActivity { - transaction_version: txn_version, - event_index, - delegator_address: standardize_address(&inner.delegator_address), - pool_address: standardize_address(&inner.pool_address), - event_type: event.type_str.clone(), - amount: u64_to_bigdecimal(inner.amount_unlocked), - }, - StakeEvent::WithdrawStakeEvent(inner) => DelegatedStakingActivity { - transaction_version: txn_version, - event_index, - delegator_address: standardize_address(&inner.delegator_address), - pool_address: standardize_address(&inner.pool_address), - event_type: event.type_str.clone(), - amount: u64_to_bigdecimal(inner.amount_withdrawn), - }, - StakeEvent::ReactivateStakeEvent(inner) => DelegatedStakingActivity { - transaction_version: txn_version, - event_index, - delegator_address: standardize_address(&inner.delegator_address), - pool_address: standardize_address(&inner.pool_address), - event_type: event.type_str.clone(), - amount: u64_to_bigdecimal(inner.amount_reactivated), - }, - StakeEvent::DistributeRewardsEvent(inner) => DelegatedStakingActivity { - transaction_version: txn_version, - event_index, - delegator_address: "".to_string(), - pool_address: standardize_address(&inner.pool_address), - event_type: event.type_str.clone(), - amount: u64_to_bigdecimal(inner.rewards_amount), - }, - _ => continue, - }; - delegator_activities.push(activity); - } - } - Ok(delegator_activities) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_balances.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_balances.rs deleted file mode 100644 index e174fa3fa1971..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_balances.rs +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright © Aptos Foundation - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::delegator_pools::{DelegatorPool, DelegatorPoolBalanceMetadata, PoolBalanceMetadata}; -use crate::{ - models::{ - default_models::move_tables::TableItem, - token_models::collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - }, - schema::current_delegator_balances, - utils::{database::PgPoolConnection, util::standardize_address}, -}; -use anyhow::Context; -use aptos_protos::transaction::v1::{ - write_set_change::Change, DeleteTableItem, Transaction, WriteResource, WriteTableItem, -}; -use bigdecimal::{BigDecimal, Zero}; -use diesel::{prelude::*, ExpressionMethods}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -pub type TableHandle = String; -pub type Address = String; -pub type ShareToStakingPoolMapping = HashMap; -pub type ShareToPoolMapping = HashMap; -pub type CurrentDelegatorBalancePK = (Address, Address, String); -pub type CurrentDelegatorBalanceMap = HashMap; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(delegator_address, pool_address, pool_type))] -#[diesel(table_name = current_delegator_balances)] -pub struct CurrentDelegatorBalance { - pub delegator_address: String, - pub pool_address: String, - pub pool_type: String, - pub table_handle: String, - pub last_transaction_version: i64, - pub shares: BigDecimal, - pub parent_table_handle: String, -} - -#[derive(Debug, Identifiable, Queryable)] -#[diesel(primary_key(delegator_address, pool_address, pool_type))] -#[diesel(table_name = current_delegator_balances)] -pub struct CurrentDelegatorBalanceQuery { - pub delegator_address: String, - pub pool_address: String, - pub pool_type: String, - pub table_handle: String, - pub last_transaction_version: i64, - pub inserted_at: chrono::NaiveDateTime, - pub shares: BigDecimal, - pub parent_table_handle: String, -} - -impl CurrentDelegatorBalance { - /// Getting active share balances. Only 1 active pool per staking pool tracked in a single table - pub fn get_active_share_from_write_table_item( - write_table_item: &WriteTableItem, - txn_version: i64, - active_pool_to_staking_pool: &ShareToStakingPoolMapping, - ) -> anyhow::Result> { - let table_handle = standardize_address(&write_table_item.handle.to_string()); - // The mapping will tell us if the table item is an active share table - if let Some(pool_balance) = active_pool_to_staking_pool.get(&table_handle) { - let pool_address = pool_balance.staking_pool_address.clone(); - let delegator_address = standardize_address(&write_table_item.key.to_string()); - - // Convert to TableItem model. Some fields are just placeholders - let (table_item_model, _) = - TableItem::from_write_table_item(write_table_item, 0, txn_version, 0); - - let shares: BigDecimal = table_item_model - .decoded_value - .as_ref() - .unwrap() - .as_str() - .unwrap() - .parse::() - .context(format!( - "cannot parse string as u128: {:?}, version {}", - table_item_model.decoded_value.as_ref(), - txn_version - ))?; - let shares = shares / &pool_balance.scaling_factor; - Ok(Some(Self { - delegator_address, - pool_address, - pool_type: "active_shares".to_string(), - table_handle: table_handle.clone(), - last_transaction_version: txn_version, - shares, - parent_table_handle: table_handle, - })) - } else { - Ok(None) - } - } - - /// Getting inactive share balances. There could be multiple inactive pool per staking pool so we have - /// 2 layers of mapping (table w/ all inactive pools -> staking pool, table w/ delegator inactive shares -> each inactive pool) - pub fn get_inactive_share_from_write_table_item( - write_table_item: &WriteTableItem, - txn_version: i64, - inactive_pool_to_staking_pool: &ShareToStakingPoolMapping, - inactive_share_to_pool: &ShareToPoolMapping, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - let table_handle = standardize_address(&write_table_item.handle.to_string()); - // The mapping will tell us if the table item belongs to an inactive pool - if let Some(pool_balance) = inactive_share_to_pool.get(&table_handle) { - // If it is, we need to get the inactive staking pool handle and use it to look up the staking pool - let inactive_pool_handle = pool_balance.parent_table_handle.clone(); - - let pool_address = match inactive_pool_to_staking_pool - .get(&inactive_pool_handle) - .map(|metadata| metadata.staking_pool_address.clone()) - { - Some(pool_address) => pool_address, - None => { - match Self::get_staking_pool_from_inactive_share_handle( - conn, - &inactive_pool_handle, - ) { - Ok(pool) => pool, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = &inactive_pool_handle, - "Failed to get staking pool address from inactive share handle. You probably should backfill db.", - ); - return Ok(None); - }, - } - }, - }; - let delegator_address = standardize_address(&write_table_item.key.to_string()); - // Convert to TableItem model. Some fields are just placeholders - let (table_item_model, _) = - TableItem::from_write_table_item(write_table_item, 0, txn_version, 0); - - let shares: BigDecimal = table_item_model - .decoded_value - .as_ref() - .unwrap() - .as_str() - .unwrap() - .parse::() - .context(format!( - "cannot parse string as u128: {:?}, version {}", - table_item_model.decoded_value.as_ref(), - txn_version - ))?; - let shares = shares / &pool_balance.scaling_factor; - Ok(Some(Self { - delegator_address, - pool_address, - pool_type: "inactive_shares".to_string(), - table_handle: table_handle.clone(), - last_transaction_version: txn_version, - shares, - parent_table_handle: inactive_pool_handle, - })) - } else { - Ok(None) - } - } - - // Setting amount to 0 if table item is deleted - pub fn get_active_share_from_delete_table_item( - delete_table_item: &DeleteTableItem, - txn_version: i64, - active_pool_to_staking_pool: &ShareToStakingPoolMapping, - ) -> anyhow::Result> { - let table_handle = standardize_address(&delete_table_item.handle.to_string()); - // The mapping will tell us if the table item is an active share table - if let Some(pool_balance) = active_pool_to_staking_pool.get(&table_handle) { - let delegator_address = standardize_address(&delete_table_item.key.to_string()); - - return Ok(Some(Self { - delegator_address, - pool_address: pool_balance.staking_pool_address.clone(), - pool_type: "active_shares".to_string(), - table_handle: table_handle.clone(), - last_transaction_version: txn_version, - shares: BigDecimal::zero(), - parent_table_handle: table_handle, - })); - } - Ok(None) - } - - // Setting amount to 0 if table item is deleted - pub fn get_inactive_share_from_delete_table_item( - delete_table_item: &DeleteTableItem, - txn_version: i64, - inactive_pool_to_staking_pool: &ShareToStakingPoolMapping, - inactive_share_to_pool: &ShareToPoolMapping, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - let table_handle = standardize_address(&delete_table_item.handle.to_string()); - // The mapping will tell us if the table item belongs to an inactive pool - if let Some(pool_balance) = inactive_share_to_pool.get(&table_handle) { - // If it is, we need to get the inactive staking pool handle and use it to look up the staking pool - let inactive_pool_handle = pool_balance.parent_table_handle.clone(); - - let pool_address = match inactive_pool_to_staking_pool - .get(&inactive_pool_handle) - .map(|metadata| metadata.staking_pool_address.clone()) - { - Some(pool_address) => pool_address, - None => { - Self::get_staking_pool_from_inactive_share_handle(conn, &inactive_pool_handle) - .context(format!("Failed to get staking pool address from inactive share handle {}, txn version {}", - inactive_pool_handle, txn_version - ))? - }, - }; - let delegator_address = standardize_address(&delete_table_item.key.to_string()); - - return Ok(Some(Self { - delegator_address, - pool_address, - pool_type: "inactive_shares".to_string(), - table_handle: table_handle.clone(), - last_transaction_version: txn_version, - shares: BigDecimal::zero(), - parent_table_handle: table_handle, - })); - } - Ok(None) - } - - /// Key is the inactive share table handle obtained from 0x1::delegation_pool::DelegationPool - /// Value is the same metadata although it's not really used - pub fn get_active_pool_to_staking_pool_mapping( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - if let Some(balance) = DelegatorPool::get_delegated_pool_metadata_from_write_resource( - write_resource, - txn_version, - )? { - Ok(Some(HashMap::from([( - balance.active_share_table_handle.clone(), - balance, - )]))) - } else { - Ok(None) - } - } - - /// Key is the inactive share table handle obtained from 0x1::delegation_pool::DelegationPool - /// Value is the same metadata although it's not really used - pub fn get_inactive_pool_to_staking_pool_mapping( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - if let Some(balance) = DelegatorPool::get_delegated_pool_metadata_from_write_resource( - write_resource, - txn_version, - )? { - Ok(Some(HashMap::from([( - balance.inactive_share_table_handle.clone(), - balance, - )]))) - } else { - Ok(None) - } - } - - /// Key is the inactive share table handle obtained from 0x1::pool_u64_unbound::Pool - /// Value is the 0x1::pool_u64_unbound::Pool metadata that will be used to populate a user's inactive balance - pub fn get_inactive_share_to_pool_mapping( - write_table_item: &WriteTableItem, - txn_version: i64, - ) -> anyhow::Result> { - if let Some(balance) = DelegatorPool::get_inactive_pool_metadata_from_write_table_item( - write_table_item, - txn_version, - )? { - Ok(Some(HashMap::from([( - balance.shares_table_handle.clone(), - balance, - )]))) - } else { - Ok(None) - } - } - - pub fn get_staking_pool_from_inactive_share_handle( - conn: &mut PgPoolConnection, - table_handle: &str, - ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; - match CurrentDelegatorBalanceQuery::get_by_inactive_share_handle(conn, table_handle) { - Ok(current_delegator_balance) => return Ok(current_delegator_balance.pool_address), - Err(_) => { - std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); - }, - } - } - Err(anyhow::anyhow!( - "Failed to get staking pool address from inactive share handle" - )) - } - - pub fn from_transaction( - transaction: &Transaction, - conn: &mut PgPoolConnection, - ) -> anyhow::Result { - let mut active_pool_to_staking_pool: ShareToStakingPoolMapping = HashMap::new(); - let mut inactive_pool_to_staking_pool: ShareToStakingPoolMapping = HashMap::new(); - let mut inactive_share_to_pool: ShareToPoolMapping = HashMap::new(); - let mut current_delegator_balances: CurrentDelegatorBalanceMap = HashMap::new(); - let txn_version = transaction.version as i64; - - // Do a first pass to get the mapping of active_share table handles to staking pool resource let txn_version = transaction.version as i64; - for wsc in &transaction.info.as_ref().unwrap().changes { - if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { - if let Some(map) = - Self::get_active_pool_to_staking_pool_mapping(write_resource, txn_version) - .unwrap() - { - active_pool_to_staking_pool.extend(map); - } - if let Some(map) = - Self::get_inactive_pool_to_staking_pool_mapping(write_resource, txn_version) - .unwrap() - { - inactive_pool_to_staking_pool.extend(map); - } - } - - if let Change::WriteTableItem(table_item) = wsc.change.as_ref().unwrap() { - if let Some(map) = - Self::get_inactive_share_to_pool_mapping(table_item, txn_version).unwrap() - { - inactive_share_to_pool.extend(map); - } - } - } - // Now make a pass through table items to get the actual delegator balances - for wsc in &transaction.info.as_ref().unwrap().changes { - let maybe_delegator_balance = match wsc.change.as_ref().unwrap() { - Change::DeleteTableItem(table_item) => { - if let Some(balance) = Self::get_active_share_from_delete_table_item( - table_item, - txn_version, - &active_pool_to_staking_pool, - ) - .unwrap() - { - Some(balance) - } else { - Self::get_inactive_share_from_delete_table_item( - table_item, - txn_version, - &inactive_pool_to_staking_pool, - &inactive_share_to_pool, - conn, - ) - .unwrap() - } - }, - Change::WriteTableItem(table_item) => { - if let Some(balance) = Self::get_active_share_from_write_table_item( - table_item, - txn_version, - &active_pool_to_staking_pool, - ) - .unwrap() - { - Some(balance) - } else { - Self::get_inactive_share_from_write_table_item( - table_item, - txn_version, - &inactive_pool_to_staking_pool, - &inactive_share_to_pool, - conn, - ) - .unwrap() - } - }, - _ => None, - }; - if let Some(delegator_balance) = maybe_delegator_balance { - current_delegator_balances.insert( - ( - delegator_balance.delegator_address.clone(), - delegator_balance.pool_address.clone(), - delegator_balance.pool_type.clone(), - ), - delegator_balance, - ); - } - } - Ok(current_delegator_balances) - } -} - -impl CurrentDelegatorBalanceQuery { - pub fn get_by_inactive_share_handle( - conn: &mut PgPoolConnection, - table_handle: &str, - ) -> diesel::QueryResult { - current_delegator_balances::table - .filter(current_delegator_balances::parent_table_handle.eq(table_handle)) - .first::(conn) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_pools.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_pools.rs deleted file mode 100644 index 915c1e9a14285..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/delegator_pools.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::stake_utils::{StakeResource, StakeTableItem}; -use crate::{ - schema::{ - current_delegated_staking_pool_balances, delegated_staking_pool_balances, - delegated_staking_pools, - }, - utils::util::standardize_address, -}; -use aptos_protos::transaction::v1::{ - transaction::TxnData, write_set_change::Change, Transaction, WriteResource, WriteTableItem, -}; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -type StakingPoolAddress = String; -pub type DelegatorPoolMap = HashMap; -pub type DelegatorPoolBalanceMap = HashMap; - -// All pools -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(staking_pool_address))] -#[diesel(table_name = delegated_staking_pools)] -pub struct DelegatorPool { - pub staking_pool_address: String, - pub first_transaction_version: i64, -} - -// Metadata to fill pool balances and delegator balance -#[derive(Debug, Deserialize, Serialize)] -pub struct DelegatorPoolBalanceMetadata { - pub transaction_version: i64, - pub staking_pool_address: String, - pub total_coins: BigDecimal, - pub total_shares: BigDecimal, - pub scaling_factor: BigDecimal, - pub operator_commission_percentage: BigDecimal, - pub active_share_table_handle: String, - pub inactive_share_table_handle: String, -} - -// Similar metadata but specifically for 0x1::pool_u64_unbound::Pool -#[derive(Debug, Deserialize, Serialize)] -pub struct PoolBalanceMetadata { - pub transaction_version: i64, - pub total_coins: BigDecimal, - pub total_shares: BigDecimal, - pub scaling_factor: BigDecimal, - pub shares_table_handle: String, - pub parent_table_handle: String, -} - -// Pools balances -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, staking_pool_address))] -#[diesel(table_name = delegated_staking_pool_balances)] -pub struct DelegatorPoolBalance { - pub transaction_version: i64, - pub staking_pool_address: String, - pub total_coins: BigDecimal, - pub total_shares: BigDecimal, - pub operator_commission_percentage: BigDecimal, - pub inactive_table_handle: String, - pub active_table_handle: String, -} - -// All pools w latest balances (really a more comprehensive version than DelegatorPool) -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(staking_pool_address))] -#[diesel(table_name = current_delegated_staking_pool_balances)] -pub struct CurrentDelegatorPoolBalance { - pub staking_pool_address: String, - pub total_coins: BigDecimal, - pub total_shares: BigDecimal, - pub last_transaction_version: i64, - pub operator_commission_percentage: BigDecimal, - pub inactive_table_handle: String, - pub active_table_handle: String, -} - -impl DelegatorPool { - pub fn from_transaction( - transaction: &Transaction, - ) -> anyhow::Result<( - DelegatorPoolMap, - Vec, - DelegatorPoolBalanceMap, - )> { - let mut delegator_pool_map = HashMap::new(); - let mut delegator_pool_balances = vec![]; - let mut delegator_pool_balances_map = HashMap::new(); - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - let txn_version = transaction.version as i64; - - // Do a first pass to get the mapping of active_share table handles to staking pool addresses - if let TxnData::User(_) = txn_data { - let changes = &transaction - .info - .as_ref() - .expect("Transaction info doesn't exist!") - .changes; - for wsc in changes { - if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { - let maybe_write_resource = - Self::from_write_resource(write_resource, txn_version)?; - if let Some((pool, pool_balances, current_pool_balances)) = maybe_write_resource - { - let staking_pool_address = pool.staking_pool_address.clone(); - delegator_pool_map.insert(staking_pool_address.clone(), pool); - delegator_pool_balances.push(pool_balances); - delegator_pool_balances_map - .insert(staking_pool_address.clone(), current_pool_balances); - } - } - } - } - Ok(( - delegator_pool_map, - delegator_pool_balances, - delegator_pool_balances_map, - )) - } - - pub fn get_delegated_pool_metadata_from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - if let Some(StakeResource::DelegationPool(inner)) = - StakeResource::from_write_resource(write_resource, txn_version)? - { - let staking_pool_address = standardize_address(&write_resource.address.to_string()); - let total_coins = inner.active_shares.total_coins; - let total_shares = - &inner.active_shares.total_shares / &inner.active_shares.scaling_factor; - Ok(Some(DelegatorPoolBalanceMetadata { - transaction_version: txn_version, - staking_pool_address, - total_coins, - total_shares, - scaling_factor: inner.active_shares.scaling_factor, - operator_commission_percentage: inner.operator_commission_percentage.clone(), - active_share_table_handle: inner.active_shares.shares.inner.get_handle(), - inactive_share_table_handle: inner.inactive_shares.get_handle(), - })) - } else { - Ok(None) - } - } - - pub fn get_inactive_pool_metadata_from_write_table_item( - write_table_item: &WriteTableItem, - txn_version: i64, - ) -> anyhow::Result> { - let table_item_data = write_table_item.data.as_ref().unwrap(); - - if let Some(StakeTableItem::Pool(inner)) = &StakeTableItem::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - let total_coins = inner.total_coins.clone(); - let total_shares = &inner.total_shares / &inner.scaling_factor; - Ok(Some(PoolBalanceMetadata { - transaction_version: txn_version, - total_coins, - total_shares, - scaling_factor: inner.scaling_factor.clone(), - shares_table_handle: inner.shares.inner.get_handle(), - parent_table_handle: standardize_address(&write_table_item.handle.to_string()), - })) - } else { - Ok(None) - } - } - - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - if let Some(balance) = - &Self::get_delegated_pool_metadata_from_write_resource(write_resource, txn_version)? - { - let staking_pool_address = balance.staking_pool_address.clone(); - let total_coins = balance.total_coins.clone(); - let total_shares = balance.total_shares.clone(); - let transaction_version = balance.transaction_version; - Ok(Some(( - Self { - staking_pool_address: staking_pool_address.clone(), - first_transaction_version: transaction_version, - }, - DelegatorPoolBalance { - transaction_version, - staking_pool_address: staking_pool_address.clone(), - total_coins: total_coins.clone(), - total_shares: total_shares.clone(), - operator_commission_percentage: balance.operator_commission_percentage.clone(), - inactive_table_handle: balance.inactive_share_table_handle.clone(), - active_table_handle: balance.active_share_table_handle.clone(), - }, - CurrentDelegatorPoolBalance { - staking_pool_address, - total_coins, - total_shares, - last_transaction_version: transaction_version, - operator_commission_percentage: balance.operator_commission_percentage.clone(), - inactive_table_handle: balance.inactive_share_table_handle.clone(), - active_table_handle: balance.active_share_table_handle.clone(), - }, - ))) - } else { - Ok(None) - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/mod.rs deleted file mode 100644 index 0f790d4f6bcae..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod delegator_activities; -pub mod delegator_balances; -pub mod delegator_pools; -pub mod proposal_votes; -pub mod stake_utils; -pub mod staking_pool_voter; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/proposal_votes.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/proposal_votes.rs deleted file mode 100644 index a8a77580c0276..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/proposal_votes.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::stake_utils::StakeEvent; -use crate::{ - schema::proposal_votes, - utils::util::{parse_timestamp, standardize_address}, -}; -use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, proposal_id, voter_address))] -#[diesel(table_name = proposal_votes)] -pub struct ProposalVote { - pub transaction_version: i64, - pub proposal_id: i64, - pub voter_address: String, - pub staking_pool_address: String, - pub num_votes: BigDecimal, - pub should_pass: bool, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -impl ProposalVote { - pub fn from_transaction(transaction: &Transaction) -> anyhow::Result> { - let mut proposal_votes = vec![]; - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - let txn_version = transaction.version as i64; - - if let TxnData::User(user_txn) = txn_data { - for event in &user_txn.events { - if let Some(StakeEvent::GovernanceVoteEvent(ev)) = - StakeEvent::from_event(event.type_str.as_str(), &event.data, txn_version)? - { - proposal_votes.push(Self { - transaction_version: txn_version, - proposal_id: ev.proposal_id as i64, - voter_address: standardize_address(&ev.voter), - staking_pool_address: standardize_address(&ev.stake_pool), - num_votes: ev.num_votes.clone(), - should_pass: ev.should_pass, - transaction_timestamp: parse_timestamp( - transaction.timestamp.as_ref().unwrap(), - txn_version, - ), - }); - } - } - } - Ok(proposal_votes) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/stake_utils.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/stake_utils.rs deleted file mode 100644 index cb64ecc83a580..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/stake_utils.rs +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - models::{default_models::move_resources::MoveResource, token_models::token_utils::Table}, - utils::util::{deserialize_from_string, standardize_address}, -}; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::WriteResource; -use bigdecimal::BigDecimal; -use serde::{Deserialize, Serialize}; - -const STAKE_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000001"; -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct StakePoolResource { - delegated_voter: String, - operator_address: String, -} - -impl StakePoolResource { - pub fn get_delegated_voter(&self) -> String { - standardize_address(&self.delegated_voter) - } - - pub fn get_operator_address(&self) -> String { - standardize_address(&self.operator_address) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DelegationPoolResource { - pub active_shares: PoolResource, - pub inactive_shares: Table, - #[serde(deserialize_with = "deserialize_from_string")] - pub operator_commission_percentage: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct PoolResource { - pub shares: SharesInnerResource, - #[serde(deserialize_with = "deserialize_from_string")] - pub total_coins: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub total_shares: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub scaling_factor: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SharesInnerResource { - pub inner: Table, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct GovernanceVoteEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub proposal_id: u64, - pub voter: String, - pub stake_pool: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub num_votes: BigDecimal, - pub should_pass: bool, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DistributeRewardsEvent { - pub pool_address: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub rewards_amount: u64, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct AddStakeEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount_added: u64, - pub delegator_address: String, - pub pool_address: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct UnlockStakeEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount_unlocked: u64, - pub delegator_address: String, - pub pool_address: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct WithdrawStakeEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount_withdrawn: u64, - pub delegator_address: String, - pub pool_address: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ReactivateStakeEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount_reactivated: u64, - pub delegator_address: String, - pub pool_address: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum StakeTableItem { - Pool(PoolResource), -} - -impl StakeTableItem { - pub fn from_table_item_type( - data_type: &str, - data: &str, - txn_version: i64, - ) -> Result> { - match data_type { - "0x1::pool_u64_unbound::Pool" => { - serde_json::from_str(data).map(|inner| Some(StakeTableItem::Pool(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum StakeResource { - StakePool(StakePoolResource), - DelegationPool(DelegationPoolResource), -} - -impl StakeResource { - fn is_resource_supported(data_type: &str) -> bool { - [ - format!("{}::stake::StakePool", STAKE_ADDR), - format!("{}::delegation_pool::DelegationPool", STAKE_ADDR), - ] - .contains(&data_type.to_string()) - } - - fn from_resource(data_type: &str, data: &serde_json::Value, txn_version: i64) -> Result { - match data_type { - x if x == format!("{}::stake::StakePool", STAKE_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(StakeResource::StakePool(inner))) - }, - x if x == format!("{}::delegation_pool::DelegationPool", STAKE_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(StakeResource::DelegationPool(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - ))? - .context(format!( - "Resource unsupported! Call is_resource_supported first. version {} type {}", - txn_version, data_type - )) - } - - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !Self::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - Ok(Some(Self::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )?)) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum StakeEvent { - GovernanceVoteEvent(GovernanceVoteEvent), - DistributeRewardsEvent(DistributeRewardsEvent), - AddStakeEvent(AddStakeEvent), - UnlockStakeEvent(UnlockStakeEvent), - WithdrawStakeEvent(WithdrawStakeEvent), - ReactivateStakeEvent(ReactivateStakeEvent), -} - -impl StakeEvent { - pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { - match data_type { - "0x1::aptos_governance::VoteEvent" => { - serde_json::from_str(data).map(|inner| Some(StakeEvent::GovernanceVoteEvent(inner))) - }, - "0x1::stake::DistributeRewardsEvent" => serde_json::from_str(data) - .map(|inner| Some(StakeEvent::DistributeRewardsEvent(inner))), - "0x1::delegation_pool::AddStakeEvent" => { - serde_json::from_str(data).map(|inner| Some(StakeEvent::AddStakeEvent(inner))) - }, - "0x1::delegation_pool::UnlockStakeEvent" => { - serde_json::from_str(data).map(|inner| Some(StakeEvent::UnlockStakeEvent(inner))) - }, - "0x1::delegation_pool::WithdrawStakeEvent" => { - serde_json::from_str(data).map(|inner| Some(StakeEvent::WithdrawStakeEvent(inner))) - }, - "0x1::delegation_pool::ReactivateStakeEvent" => serde_json::from_str(data) - .map(|inner| Some(StakeEvent::ReactivateStakeEvent(inner))), - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/staking_pool_voter.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/staking_pool_voter.rs deleted file mode 100644 index b5472d42fe337..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/stake_models/staking_pool_voter.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::stake_utils::StakeResource; -use crate::{schema::current_staking_pool_voter, utils::util::standardize_address}; -use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -type StakingPoolAddress = String; -pub type StakingPoolVoterMap = HashMap; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(staking_pool_address))] -#[diesel(table_name = current_staking_pool_voter)] -pub struct CurrentStakingPoolVoter { - pub staking_pool_address: String, - pub voter_address: String, - pub last_transaction_version: i64, - pub operator_address: String, -} - -impl CurrentStakingPoolVoter { - pub fn from_transaction(transaction: &Transaction) -> anyhow::Result { - let mut staking_pool_voters = HashMap::new(); - - let txn_version = transaction.version as i64; - for wsc in &transaction.info.as_ref().unwrap().changes { - if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { - if let Some(StakeResource::StakePool(inner)) = - StakeResource::from_write_resource(write_resource, txn_version)? - { - let staking_pool_address = - standardize_address(&write_resource.address.to_string()); - staking_pool_voters.insert(staking_pool_address.clone(), Self { - staking_pool_address, - voter_address: inner.get_delegated_voter(), - last_transaction_version: txn_version, - operator_address: inner.get_operator_address(), - }); - } - } - } - - Ok(staking_pool_voters) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/ans_lookup.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/ans_lookup.rs deleted file mode 100644 index 9898a928a6288..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/ans_lookup.rs +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use crate::{ - schema::current_ans_lookup, - utils::util::{ - bigdecimal_to_u64, deserialize_from_string, parse_timestamp_secs, standardize_address, - }, -}; -use aptos_protos::transaction::v1::{ - move_type::Content, transaction::TxnData, Transaction as TransactionPB, -}; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -type Domain = String; -type Subdomain = String; -// PK of current_ans_lookup, i.e. domain and subdomain name -pub type CurrentAnsLookupPK = (Domain, Subdomain); - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(domain, subdomain))] -#[diesel(table_name = current_ans_lookup)] -#[diesel(treat_none_as_null = true)] -pub struct CurrentAnsLookup { - pub domain: String, - pub subdomain: String, - pub registered_address: Option, - pub last_transaction_version: i64, - pub expiration_timestamp: chrono::NaiveDateTime, - pub token_name: String, -} - -pub enum ANSEvent { - SetNameAddressEventV1(SetNameAddressEventV1), - RegisterNameEventV1(RegisterNameEventV1), -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SetNameAddressEventV1 { - subdomain_name: OptionalString, - domain_name: String, - new_address: OptionalString, - #[serde(deserialize_with = "deserialize_from_string")] - expiration_time_secs: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct RegisterNameEventV1 { - subdomain_name: OptionalString, - domain_name: String, - #[serde(deserialize_with = "deserialize_from_string")] - expiration_time_secs: BigDecimal, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -struct OptionalString { - vec: Vec, -} - -impl OptionalString { - fn get_string(&self) -> Option { - if self.vec.is_empty() { - None - } else { - Some(self.vec[0].clone()) - } - } -} - -impl CurrentAnsLookup { - pub fn from_transaction( - transaction: &TransactionPB, - ans_contract_address: Option, - ) -> HashMap { - let mut current_ans_lookups: HashMap = HashMap::new(); - if let Some(addr) = ans_contract_address { - // Extracts events and user request from genesis and user transactions. Other transactions won't have coin events - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - if let TxnData::User(user_txn) = txn_data { - for event in &user_txn.events { - let (event_addr, event_type) = if let Content::Struct(inner) = - event.r#type.as_ref().unwrap().content.as_ref().unwrap() - { - ( - inner.address.to_string(), - format!("{}::{}", inner.module, inner.name), - ) - } else { - continue; - }; - if event_addr != addr { - continue; - } - let txn_version = transaction.version as i64; - let maybe_ans_event = match event_type.as_str() { - "domains::SetNameAddressEventV1" => { - serde_json::from_str(event.data.as_str()) - .map(|inner| Some(ANSEvent::SetNameAddressEventV1(inner))) - }, - "domains::RegisterNameEventV1" => serde_json::from_str(event.data.as_str()) - .map(|inner| Some(ANSEvent::RegisterNameEventV1(inner))), - _ => Ok(None), - } - .unwrap_or_else(|e| { - panic!( - "version {} failed! failed to parse type {}, data {:?}. Error: {:?}", - txn_version, event_type, event.data, e - ) - }); - if let Some(ans_event) = maybe_ans_event { - let current_ans_lookup = match ans_event { - ANSEvent::SetNameAddressEventV1(inner) => { - let expiration_timestamp = parse_timestamp_secs( - bigdecimal_to_u64(&inner.expiration_time_secs), - txn_version, - ); - let subdomain = - inner.subdomain_name.get_string().unwrap_or_default(); - let mut token_name = format!("{}.apt", &inner.domain_name); - if !subdomain.is_empty() { - token_name = format!("{}.{}", &subdomain, token_name); - } - Self { - domain: inner.domain_name, - subdomain, - registered_address: inner - .new_address - .get_string() - .map(|s| standardize_address(&s)), - last_transaction_version: txn_version, - expiration_timestamp, - token_name, - } - }, - ANSEvent::RegisterNameEventV1(inner) => { - let expiration_timestamp = parse_timestamp_secs( - bigdecimal_to_u64(&inner.expiration_time_secs), - txn_version, - ); - let subdomain = - inner.subdomain_name.get_string().unwrap_or_default(); - let mut token_name = format!("{}.apt", &inner.domain_name); - if !subdomain.is_empty() { - token_name = format!("{}.{}", &subdomain, token_name); - } - Self { - domain: inner.domain_name, - subdomain, - registered_address: None, - last_transaction_version: txn_version, - expiration_timestamp, - token_name, - } - }, - }; - - current_ans_lookups.insert( - ( - current_ans_lookup.domain.clone(), - current_ans_lookup.subdomain.clone(), - ), - current_ans_lookup, - ); - } - } - } - } - current_ans_lookups - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/collection_datas.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/collection_datas.rs deleted file mode 100644 index 0d170e1c874f3..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/collection_datas.rs +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - token_utils::{CollectionDataIdType, TokenWriteSet}, - tokens::TableHandleToOwner, -}; -use crate::{ - schema::{collection_datas, current_collection_datas}, - utils::{database::PgPoolConnection, util::standardize_address}, -}; -use aptos_protos::transaction::v1::WriteTableItem; -use bigdecimal::BigDecimal; -use diesel::{prelude::*, ExpressionMethods}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -pub const QUERY_RETRIES: u32 = 5; -pub const QUERY_RETRY_DELAY_MS: u64 = 500; -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(collection_data_id_hash, transaction_version))] -#[diesel(table_name = collection_datas)] -pub struct CollectionData { - pub collection_data_id_hash: String, - pub transaction_version: i64, - pub creator_address: String, - pub collection_name: String, - pub description: String, - pub metadata_uri: String, - pub supply: BigDecimal, - pub maximum: BigDecimal, - pub maximum_mutable: bool, - pub uri_mutable: bool, - pub description_mutable: bool, - pub table_handle: String, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(collection_data_id_hash))] -#[diesel(table_name = current_collection_datas)] -pub struct CurrentCollectionData { - pub collection_data_id_hash: String, - pub creator_address: String, - pub collection_name: String, - pub description: String, - pub metadata_uri: String, - pub supply: BigDecimal, - pub maximum: BigDecimal, - pub maximum_mutable: bool, - pub uri_mutable: bool, - pub description_mutable: bool, - pub last_transaction_version: i64, - pub table_handle: String, - pub last_transaction_timestamp: chrono::NaiveDateTime, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive(Debug, Identifiable, Queryable)] -#[diesel(primary_key(collection_data_id_hash))] -#[diesel(table_name = current_collection_datas)] -pub struct CurrentCollectionDataQuery { - pub collection_data_id_hash: String, - pub creator_address: String, - pub collection_name: String, - pub description: String, - pub metadata_uri: String, - pub supply: BigDecimal, - pub maximum: BigDecimal, - pub maximum_mutable: bool, - pub uri_mutable: bool, - pub description_mutable: bool, - pub last_transaction_version: i64, - pub inserted_at: chrono::NaiveDateTime, - pub table_handle: String, - pub last_transaction_timestamp: chrono::NaiveDateTime, -} - -impl CollectionData { - pub fn from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_collection_data = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::CollectionData(inner)) => Some(inner), - _ => None, - }; - if let Some(collection_data) = maybe_collection_data { - let table_handle = table_item.handle.to_string(); - let maybe_creator_address = table_handle_to_owner - .get(&standardize_address(&table_handle)) - .map(|table_metadata| table_metadata.get_owner_address()); - let mut creator_address = match maybe_creator_address { - Some(ca) => ca, - None => match Self::get_collection_creator(conn, &table_handle) { - Ok(creator) => creator, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = &table_handle, - "Failed to get collection creator for table handle. You probably should backfill db." - ); - return Ok(None); - }, - }, - }; - creator_address = standardize_address(&creator_address); - let collection_data_id = - CollectionDataIdType::new(creator_address, collection_data.get_name().to_string()); - let collection_data_id_hash = collection_data_id.to_hash(); - let collection_name = collection_data.get_name_trunc(); - let metadata_uri = collection_data.get_uri_trunc(); - - Ok(Some(( - Self { - collection_data_id_hash: collection_data_id_hash.clone(), - collection_name: collection_name.clone(), - creator_address: collection_data_id.creator.clone(), - description: collection_data.description.clone(), - transaction_version: txn_version, - metadata_uri: metadata_uri.clone(), - supply: collection_data.supply.clone(), - maximum: collection_data.maximum.clone(), - maximum_mutable: collection_data.mutability_config.maximum, - uri_mutable: collection_data.mutability_config.uri, - description_mutable: collection_data.mutability_config.description, - table_handle: table_handle.clone(), - transaction_timestamp: txn_timestamp, - }, - CurrentCollectionData { - collection_data_id_hash, - collection_name, - creator_address: collection_data_id.creator, - description: collection_data.description, - metadata_uri, - supply: collection_data.supply, - maximum: collection_data.maximum, - maximum_mutable: collection_data.mutability_config.maximum, - uri_mutable: collection_data.mutability_config.uri, - description_mutable: collection_data.mutability_config.description, - last_transaction_version: txn_version, - table_handle, - last_transaction_timestamp: txn_timestamp, - }, - ))) - } else { - Ok(None) - } - } - - /// If collection data is not in resources of the same transaction, then try looking for it in the database. Since collection owner - /// cannot change, we can just look in the current_collection_datas table. - /// Retrying a few times since this collection could've been written in a separate thread. - pub fn get_collection_creator( - conn: &mut PgPoolConnection, - table_handle: &str, - ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; - match CurrentCollectionDataQuery::get_by_table_handle(conn, table_handle) { - Ok(current_collection_data) => return Ok(current_collection_data.creator_address), - Err(_) => { - std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); - }, - } - } - Err(anyhow::anyhow!("Failed to get collection creator")) - } -} - -impl CurrentCollectionDataQuery { - pub fn get_by_table_handle( - conn: &mut PgPoolConnection, - table_handle: &str, - ) -> diesel::QueryResult { - current_collection_datas::table - .filter(current_collection_datas::table_handle.eq(table_handle)) - .first::(conn) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/mod.rs deleted file mode 100644 index c6c6d17f7b951..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod ans_lookup; -pub mod collection_datas; -pub mod nft_points; -pub mod token_activities; -pub mod token_claims; -pub mod token_datas; -pub mod token_ownerships; -pub mod token_utils; -pub mod tokens; -pub mod v2_collections; -pub mod v2_token_activities; -pub mod v2_token_datas; -pub mod v2_token_metadata; -pub mod v2_token_ownerships; -pub mod v2_token_utils; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/nft_points.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/nft_points.rs deleted file mode 100644 index 54039ab5e56a5..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/nft_points.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use crate::{ - schema::nft_points, - utils::util::{get_entry_function_from_user_request, parse_timestamp, standardize_address}, -}; -use aptos_protos::transaction::v1::{ - transaction::TxnData, transaction_payload::Payload, Transaction, -}; -use bigdecimal::BigDecimal; -use diesel::prelude::*; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version))] -#[diesel(table_name = nft_points)] -pub struct NftPoints { - pub transaction_version: i64, - pub owner_address: String, - pub token_name: String, - pub point_type: String, - pub amount: BigDecimal, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -impl NftPoints { - pub fn from_transaction( - transaction: &Transaction, - nft_points_contract: Option, - ) -> Option { - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - let version = transaction.version as i64; - let timestamp = transaction - .timestamp - .as_ref() - .expect("Transaction timestamp doesn't exist!"); - let transaction_info = transaction - .info - .as_ref() - .expect("Transaction info doesn't exist!"); - if let Some(contract) = nft_points_contract { - if let TxnData::User(user_txn) = txn_data { - let user_request = user_txn - .request - .as_ref() - .expect("Sends is not present in user txn"); - let payload = user_txn - .request - .as_ref() - .expect("Getting user request failed.") - .payload - .as_ref() - .expect("Getting payload failed."); - let entry_function_id_str = - get_entry_function_from_user_request(user_request).unwrap_or_default(); - - // If failed transaction, end - if !transaction_info.success { - return None; - } - if entry_function_id_str == contract { - if let Payload::EntryFunctionPayload(inner) = payload.payload.as_ref().unwrap() - { - let owner_address = standardize_address(&inner.arguments[0]); - let amount = inner.arguments[2].parse().unwrap(); - let transaction_timestamp = parse_timestamp(timestamp, version); - return Some(Self { - transaction_version: version, - owner_address, - token_name: inner.arguments[1].clone(), - point_type: inner.arguments[3].clone(), - amount, - transaction_timestamp, - }); - } - } - } - } - None - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_activities.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_activities.rs deleted file mode 100644 index c18614e7b10bb..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_activities.rs +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::token_utils::{TokenDataIdType, TokenEvent}; -use crate::{ - schema::token_activities, - utils::util::{parse_timestamp, standardize_address}, -}; -use aptos_protos::transaction::v1::{transaction::TxnData, Event, Transaction}; -use bigdecimal::{BigDecimal, Zero}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key( - transaction_version, - event_account_address, - event_creation_number, - event_sequence_number -))] -#[diesel(table_name = token_activities)] -pub struct TokenActivity { - pub transaction_version: i64, - pub event_account_address: String, - pub event_creation_number: i64, - pub event_sequence_number: i64, - pub token_data_id_hash: String, - pub property_version: BigDecimal, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub transfer_type: String, - pub from_address: Option, - pub to_address: Option, - pub token_amount: BigDecimal, - pub coin_type: Option, - pub coin_amount: Option, - pub collection_data_id_hash: String, - pub transaction_timestamp: chrono::NaiveDateTime, - pub event_index: Option, -} - -/// A simplified TokenActivity (excluded common fields) to reduce code duplication -struct TokenActivityHelper<'a> { - pub token_data_id: &'a TokenDataIdType, - pub property_version: BigDecimal, - pub from_address: Option, - pub to_address: Option, - pub token_amount: BigDecimal, - pub coin_type: Option, - pub coin_amount: Option, -} - -impl TokenActivity { - pub fn from_transaction(transaction: &Transaction) -> Vec { - let mut token_activities = vec![]; - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - if let TxnData::User(user_txn) = txn_data { - for (index, event) in user_txn.events.iter().enumerate() { - let txn_version = transaction.version as i64; - if let Some(token_event) = TokenEvent::from_event( - event.type_str.as_str(), - event.data.as_str(), - txn_version, - ) - .unwrap() - { - token_activities.push(Self::from_parsed_event( - event.type_str.as_str(), - event, - &token_event, - txn_version, - parse_timestamp(transaction.timestamp.as_ref().unwrap(), txn_version), - index as i64, - )) - } - } - } - token_activities - } - - pub fn from_parsed_event( - event_type: &str, - event: &Event, - token_event: &TokenEvent, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - event_index: i64, - ) -> Self { - let event_account_address = - standardize_address(event.key.as_ref().unwrap().account_address.as_str()); - let event_creation_number = event.key.as_ref().unwrap().creation_number as i64; - let event_sequence_number = event.sequence_number as i64; - let token_activity_helper = match token_event { - TokenEvent::MintTokenEvent(inner) => TokenActivityHelper { - token_data_id: &inner.id, - property_version: BigDecimal::zero(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::BurnTokenEvent(inner) => TokenActivityHelper { - token_data_id: &inner.id.token_data_id, - property_version: inner.id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::MutateTokenPropertyMapEvent(inner) => TokenActivityHelper { - token_data_id: &inner.new_id.token_data_id, - property_version: inner.new_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: BigDecimal::zero(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::WithdrawTokenEvent(inner) => TokenActivityHelper { - token_data_id: &inner.id.token_data_id, - property_version: inner.id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::DepositTokenEvent(inner) => TokenActivityHelper { - token_data_id: &inner.id.token_data_id, - property_version: inner.id.property_version.clone(), - from_address: None, - to_address: Some(standardize_address(&event_account_address)), - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::OfferTokenEvent(inner) => TokenActivityHelper { - token_data_id: &inner.token_id.token_data_id, - property_version: inner.token_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: Some(inner.get_to_address()), - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::CancelTokenOfferEvent(inner) => TokenActivityHelper { - token_data_id: &inner.token_id.token_data_id, - property_version: inner.token_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: Some(inner.get_to_address()), - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - TokenEvent::ClaimTokenEvent(inner) => TokenActivityHelper { - token_data_id: &inner.token_id.token_data_id, - property_version: inner.token_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: Some(inner.get_to_address()), - token_amount: inner.amount.clone(), - coin_type: None, - coin_amount: None, - }, - }; - let token_data_id = token_activity_helper.token_data_id; - Self { - event_account_address, - event_creation_number, - event_sequence_number, - token_data_id_hash: token_data_id.to_hash(), - property_version: token_activity_helper.property_version, - collection_data_id_hash: token_data_id.get_collection_data_id_hash(), - creator_address: token_data_id.get_creator_address(), - collection_name: token_data_id.get_collection_trunc(), - name: token_data_id.get_name_trunc(), - transaction_version: txn_version, - transfer_type: event_type.to_string(), - from_address: token_activity_helper.from_address, - to_address: token_activity_helper.to_address, - token_amount: token_activity_helper.token_amount, - coin_type: token_activity_helper.coin_type, - coin_amount: token_activity_helper.coin_amount, - transaction_timestamp: txn_timestamp, - event_index: Some(event_index), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_claims.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_claims.rs deleted file mode 100644 index f24a7bbf0af6e..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_claims.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{token_utils::TokenWriteSet, tokens::TableHandleToOwner}; -use crate::{schema::current_token_pending_claims, utils::util::standardize_address}; -use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; -use bigdecimal::{BigDecimal, Zero}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id_hash, property_version, from_address, to_address))] -#[diesel(table_name = current_token_pending_claims)] -pub struct CurrentTokenPendingClaim { - pub token_data_id_hash: String, - pub property_version: BigDecimal, - pub from_address: String, - pub to_address: String, - pub collection_data_id_hash: String, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub amount: BigDecimal, - pub table_handle: String, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub token_data_id: String, - pub collection_id: String, -} - -impl CurrentTokenPendingClaim { - /// Token claim is stored in a table in the offerer's account. The key is token_offer_id (token_id + to address) - /// and value is token (token_id + amount) - pub fn from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_offer = match TokenWriteSet::from_table_item_type( - table_item_data.key_type.as_str(), - &table_item_data.key, - txn_version, - )? { - Some(TokenWriteSet::TokenOfferId(inner)) => Some(inner), - _ => None, - }; - if let Some(offer) = &maybe_offer { - let maybe_token = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::Token(inner)) => Some(inner), - _ => None, - }; - if let Some(token) = &maybe_token { - let table_handle = standardize_address(&table_item.handle.to_string()); - - let maybe_table_metadata = table_handle_to_owner.get(&table_handle); - - if let Some(table_metadata) = maybe_table_metadata { - let token_id = offer.token_id.clone(); - let token_data_id_struct = token_id.token_data_id; - let collection_data_id_hash = - token_data_id_struct.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id_struct.to_hash(); - // Basically adding 0x prefix to the previous 2 lines. This is to be consistent with Token V2 - let collection_id = token_data_id_struct.get_collection_id(); - let token_data_id = token_data_id_struct.to_id(); - let collection_name = token_data_id_struct.get_collection_trunc(); - let name = token_data_id_struct.get_name_trunc(); - - return Ok(Some(Self { - token_data_id_hash, - property_version: token_id.property_version, - from_address: table_metadata.get_owner_address(), - to_address: offer.get_to_address(), - collection_data_id_hash, - creator_address: token_data_id_struct.get_creator_address(), - collection_name, - name, - amount: token.amount.clone(), - table_handle, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - token_data_id, - collection_id, - })); - } else { - tracing::warn!( - transaction_version = txn_version, - table_handle = table_handle, - "Missing table handle metadata for TokenClaim. {:?}", - table_handle_to_owner - ); - } - } else { - tracing::warn!( - transaction_version = txn_version, - value_type = table_item_data.value_type, - value = table_item_data.value, - "Expecting token as value for key = token_offer_id", - ); - } - } - Ok(None) - } - - pub fn from_delete_table_item( - table_item: &DeleteTableItem, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_offer = match TokenWriteSet::from_table_item_type( - table_item_data.key_type.as_str(), - &table_item_data.key, - txn_version, - )? { - Some(TokenWriteSet::TokenOfferId(inner)) => Some(inner), - _ => None, - }; - if let Some(offer) = &maybe_offer { - let table_handle = standardize_address(&table_item.handle.to_string()); - - let table_metadata = table_handle_to_owner.get(&table_handle).unwrap_or_else(|| { - panic!( - "Missing table handle metadata for claim. \ - Version: {}, table handle for PendingClaims: {}, all metadata: {:?}", - txn_version, table_handle, table_handle_to_owner - ) - }); - - let token_id = offer.token_id.clone(); - let token_data_id_struct = token_id.token_data_id; - let collection_data_id_hash = token_data_id_struct.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id_struct.to_hash(); - // Basically adding 0x prefix to the previous 2 lines. This is to be consistent with Token V2 - let collection_id = token_data_id_struct.get_collection_id(); - let token_data_id = token_data_id_struct.to_id(); - let collection_name = token_data_id_struct.get_collection_trunc(); - let name = token_data_id_struct.get_name_trunc(); - - return Ok(Some(Self { - token_data_id_hash, - property_version: token_id.property_version, - from_address: table_metadata.get_owner_address(), - to_address: offer.get_to_address(), - collection_data_id_hash, - creator_address: token_data_id_struct.get_creator_address(), - collection_name, - name, - amount: BigDecimal::zero(), - table_handle, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - token_data_id, - collection_id, - })); - } - Ok(None) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_datas.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_datas.rs deleted file mode 100644 index 1c27a6a25a837..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_datas.rs +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::token_utils::TokenWriteSet; -use crate::schema::{current_token_datas, token_datas}; -use aptos_protos::transaction::v1::WriteTableItem; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id_hash, transaction_version))] -#[diesel(table_name = token_datas)] -pub struct TokenData { - pub token_data_id_hash: String, - pub transaction_version: i64, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub maximum: BigDecimal, - pub supply: BigDecimal, - pub largest_property_version: BigDecimal, - pub metadata_uri: String, - pub payee_address: String, - pub royalty_points_numerator: BigDecimal, - pub royalty_points_denominator: BigDecimal, - pub maximum_mutable: bool, - pub uri_mutable: bool, - pub description_mutable: bool, - pub properties_mutable: bool, - pub royalty_mutable: bool, - pub default_properties: serde_json::Value, - pub collection_data_id_hash: String, - pub transaction_timestamp: chrono::NaiveDateTime, - pub description: String, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id_hash))] -#[diesel(table_name = current_token_datas)] -pub struct CurrentTokenData { - pub token_data_id_hash: String, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub maximum: bigdecimal::BigDecimal, - pub supply: bigdecimal::BigDecimal, - pub largest_property_version: bigdecimal::BigDecimal, - pub metadata_uri: String, - pub payee_address: String, - pub royalty_points_numerator: bigdecimal::BigDecimal, - pub royalty_points_denominator: bigdecimal::BigDecimal, - pub maximum_mutable: bool, - pub uri_mutable: bool, - pub description_mutable: bool, - pub properties_mutable: bool, - pub royalty_mutable: bool, - pub default_properties: serde_json::Value, - pub last_transaction_version: i64, - pub collection_data_id_hash: String, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub description: String, -} - -impl TokenData { - pub fn from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - ) -> anyhow::Result> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_token_data = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::TokenData(inner)) => Some(inner), - _ => None, - }; - - if let Some(token_data) = maybe_token_data { - let maybe_token_data_id = match TokenWriteSet::from_table_item_type( - table_item_data.key_type.as_str(), - &table_item_data.key, - txn_version, - )? { - Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), - _ => None, - }; - if let Some(token_data_id) = maybe_token_data_id { - let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id.to_hash(); - let collection_name = token_data_id.get_collection_trunc(); - let name = token_data_id.get_name_trunc(); - let metadata_uri = token_data.get_uri_trunc(); - - return Ok(Some(( - Self { - collection_data_id_hash: collection_data_id_hash.clone(), - token_data_id_hash: token_data_id_hash.clone(), - creator_address: token_data_id.get_creator_address(), - collection_name: collection_name.clone(), - name: name.clone(), - transaction_version: txn_version, - maximum: token_data.maximum.clone(), - supply: token_data.supply.clone(), - largest_property_version: token_data.largest_property_version.clone(), - metadata_uri: metadata_uri.clone(), - payee_address: token_data.royalty.get_payee_address(), - royalty_points_numerator: token_data - .royalty - .royalty_points_numerator - .clone(), - royalty_points_denominator: token_data - .royalty - .royalty_points_denominator - .clone(), - maximum_mutable: token_data.mutability_config.maximum, - uri_mutable: token_data.mutability_config.uri, - description_mutable: token_data.mutability_config.description, - properties_mutable: token_data.mutability_config.properties, - royalty_mutable: token_data.mutability_config.royalty, - default_properties: token_data.default_properties.clone(), - transaction_timestamp: txn_timestamp, - description: token_data.description.clone(), - }, - CurrentTokenData { - collection_data_id_hash, - token_data_id_hash, - creator_address: token_data_id.get_creator_address(), - collection_name, - name, - maximum: token_data.maximum, - supply: token_data.supply, - largest_property_version: token_data.largest_property_version, - metadata_uri, - payee_address: token_data.royalty.get_payee_address(), - royalty_points_numerator: token_data.royalty.royalty_points_numerator, - royalty_points_denominator: token_data.royalty.royalty_points_denominator, - maximum_mutable: token_data.mutability_config.maximum, - uri_mutable: token_data.mutability_config.uri, - description_mutable: token_data.mutability_config.description, - properties_mutable: token_data.mutability_config.properties, - royalty_mutable: token_data.mutability_config.royalty, - default_properties: token_data.default_properties, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - description: token_data.description, - }, - ))); - } else { - tracing::warn!( - transaction_version = txn_version, - key_type = table_item_data.key_type, - key = table_item_data.key, - "Expecting token_data_id as key for value = token_data" - ); - } - } - Ok(None) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_ownerships.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_ownerships.rs deleted file mode 100644 index b087db6f12aa7..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_ownerships.rs +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - token_utils::TokenWriteSet, - tokens::{TableHandleToOwner, Token}, -}; -use crate::{ - schema::{current_token_ownerships, token_ownerships}, - utils::util::standardize_address, -}; -use bigdecimal::BigDecimal; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key( - token_data_id_hash, - property_version, - transaction_version, - table_handle -))] -#[diesel(table_name = token_ownerships)] -pub struct TokenOwnership { - pub token_data_id_hash: String, - pub property_version: BigDecimal, - pub transaction_version: i64, - pub table_handle: String, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub owner_address: Option, - pub amount: BigDecimal, - pub table_type: Option, - pub collection_data_id_hash: String, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id_hash, property_version, owner_address))] -#[diesel(table_name = current_token_ownerships)] -pub struct CurrentTokenOwnership { - pub token_data_id_hash: String, - pub property_version: BigDecimal, - pub owner_address: String, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub amount: BigDecimal, - pub token_properties: serde_json::Value, - pub last_transaction_version: i64, - pub collection_data_id_hash: String, - pub table_type: String, - pub last_transaction_timestamp: chrono::NaiveDateTime, -} - -impl TokenOwnership { - /// We only want to track tokens in 0x1::token::TokenStore for now. This is because the table - /// schema doesn't have table type (i.e. token container) as primary key. TokenStore has token_id - /// as key and token as value. - pub fn from_token( - token: &Token, - table_item_key_type: &str, - table_item_key: &str, - amount: BigDecimal, - table_handle: String, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result)>> { - let txn_version = token.transaction_version; - let maybe_token_id = match TokenWriteSet::from_table_item_type( - table_item_key_type, - table_item_key, - txn_version, - )? { - Some(TokenWriteSet::TokenId(inner)) => Some(inner), - _ => None, - }; - // Return early if table key is not token id - if maybe_token_id.is_none() { - return Ok(None); - } - let table_handle = standardize_address(&table_handle); - let maybe_table_metadata = table_handle_to_owner.get(&table_handle); - // Return early if table type is not tokenstore - if let Some(tm) = maybe_table_metadata { - if tm.table_type != "0x3::token::TokenStore" { - return Ok(None); - } - } - let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { - Some(tm) => ( - Some(CurrentTokenOwnership { - collection_data_id_hash: token.collection_data_id_hash.clone(), - token_data_id_hash: token.token_data_id_hash.clone(), - property_version: token.property_version.clone(), - owner_address: tm.get_owner_address(), - creator_address: standardize_address(&token.creator_address.clone()), - collection_name: token.collection_name.clone(), - name: token.name.clone(), - amount: amount.clone(), - token_properties: token.token_properties.clone(), - last_transaction_version: txn_version, - table_type: tm.table_type.clone(), - last_transaction_timestamp: token.transaction_timestamp, - }), - Some(tm.get_owner_address()), - Some(tm.table_type.clone()), - ), - None => { - tracing::warn!( - transaction_version = txn_version, - table_handle = table_handle, - "Missing table handle metadata for TokenStore. {:?}", - table_handle_to_owner - ); - (None, None, None) - }, - }; - - Ok(Some(( - Self { - collection_data_id_hash: token.collection_data_id_hash.clone(), - token_data_id_hash: token.token_data_id_hash.clone(), - property_version: token.property_version.clone(), - owner_address: owner_address.map(|s| standardize_address(&s)), - creator_address: standardize_address(&token.creator_address), - collection_name: token.collection_name.clone(), - name: token.name.clone(), - amount, - table_type, - transaction_version: token.transaction_version, - table_handle, - transaction_timestamp: token.transaction_timestamp, - }, - curr_token_ownership, - ))) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_utils.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_utils.rs deleted file mode 100644 index 1b62f669dd643..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/token_utils.rs +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use crate::utils::util::{ - deserialize_from_string, deserialize_property_map_from_bcs_hexstring, - deserialize_string_from_hexstring, hash_str, standardize_address, truncate_str, -}; -use anyhow::{Context, Result}; -use bigdecimal::BigDecimal; -use serde::{Deserialize, Serialize}; -use std::fmt::{self, Formatter}; - -pub const TOKEN_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000003"; -pub const NAME_LENGTH: usize = 128; -pub const URI_LENGTH: usize = 512; -/** - * This file defines deserialized move types as defined in our 0x3 contracts. - */ - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Table { - handle: String, -} - -impl Table { - pub fn get_handle(&self) -> String { - standardize_address(&self.handle) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenDataIdType { - creator: String, - collection: String, - name: String, -} - -impl TokenDataIdType { - pub fn to_id(&self) -> String { - format!("0x{}", self.to_hash()) - } - - pub fn to_hash(&self) -> String { - hash_str(&self.to_string()) - } - - pub fn get_collection_trunc(&self) -> String { - truncate_str(&self.collection, NAME_LENGTH) - } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } - - pub fn get_collection_data_id_hash(&self) -> String { - CollectionDataIdType::new(self.creator.clone(), self.collection.clone()).to_hash() - } - - pub fn get_collection_id(&self) -> String { - CollectionDataIdType::new(self.creator.clone(), self.collection.clone()).to_id() - } - - pub fn get_creator_address(&self) -> String { - standardize_address(&self.creator) - } -} - -impl fmt::Display for TokenDataIdType { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!( - f, - "{}::{}::{}", - standardize_address(self.creator.as_str()), - self.collection, - self.name - ) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CollectionDataIdType { - pub creator: String, - pub name: String, -} - -impl CollectionDataIdType { - pub fn new(creator: String, name: String) -> Self { - Self { creator, name } - } - - pub fn to_hash(&self) -> String { - hash_str(&self.to_string()) - } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } - - pub fn to_id(&self) -> String { - format!("0x{}", self.to_hash()) - } -} - -impl fmt::Display for CollectionDataIdType { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!( - f, - "{}::{}", - standardize_address(self.creator.as_str()), - self.name - ) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenIdType { - pub token_data_id: TokenDataIdType, - #[serde(deserialize_with = "deserialize_from_string")] - pub property_version: BigDecimal, -} - -impl fmt::Display for TokenIdType { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "{}::{}", self.token_data_id, self.property_version) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenDataType { - #[serde(deserialize_with = "deserialize_property_map_from_bcs_hexstring")] - pub default_properties: serde_json::Value, - pub description: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub largest_property_version: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub maximum: BigDecimal, - pub mutability_config: TokenDataMutabilityConfigType, - name: String, - pub royalty: RoyaltyType, - #[serde(deserialize_with = "deserialize_from_string")] - pub supply: BigDecimal, - uri: String, -} - -impl TokenDataType { - pub fn get_uri_trunc(&self) -> String { - truncate_str(&self.uri, URI_LENGTH) - } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenDataMutabilityConfigType { - pub description: bool, - pub maximum: bool, - pub properties: bool, - pub royalty: bool, - pub uri: bool, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct RoyaltyType { - payee_address: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub royalty_points_denominator: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub royalty_points_numerator: BigDecimal, -} - -impl RoyaltyType { - pub fn get_payee_address(&self) -> String { - standardize_address(&self.payee_address) - } -} -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - pub id: TokenIdType, - #[serde(deserialize_with = "deserialize_property_map_from_bcs_hexstring")] - pub token_properties: serde_json::Value, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CollectionDataType { - pub description: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub maximum: BigDecimal, - pub mutability_config: CollectionDataMutabilityConfigType, - name: String, - #[serde(deserialize_with = "deserialize_from_string")] - pub supply: BigDecimal, - uri: String, -} - -impl CollectionDataType { - pub fn get_name(&self) -> &str { - &self.name - } - - pub fn get_uri_trunc(&self) -> String { - truncate_str(&self.uri, URI_LENGTH) - } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenOfferIdType { - to_addr: String, - pub token_id: TokenIdType, -} - -impl TokenOfferIdType { - pub fn get_to_address(&self) -> String { - standardize_address(&self.to_addr) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CollectionResourceType { - pub collection_data: Table, - pub token_data: Table, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenStoreResourceType { - pub tokens: Table, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct PendingClaimsResourceType { - pub pending_claims: Table, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CollectionDataMutabilityConfigType { - pub description: bool, - pub maximum: bool, - pub uri: bool, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct WithdrawTokenEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - pub id: TokenIdType, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DepositTokenEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - pub id: TokenIdType, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct MintTokenEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - pub id: TokenDataIdType, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct BurnTokenEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - pub id: TokenIdType, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct MutateTokenPropertyMapEventType { - pub old_id: TokenIdType, - pub new_id: TokenIdType, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct OfferTokenEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - to_address: String, - pub token_id: TokenIdType, -} - -impl OfferTokenEventType { - pub fn get_to_address(&self) -> String { - standardize_address(&self.to_address) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CancelTokenOfferEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - to_address: String, - pub token_id: TokenIdType, -} - -impl CancelTokenOfferEventType { - pub fn get_to_address(&self) -> String { - standardize_address(&self.to_address) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ClaimTokenEventType { - #[serde(deserialize_with = "deserialize_from_string")] - pub amount: BigDecimal, - to_address: String, - pub token_id: TokenIdType, -} - -impl ClaimTokenEventType { - pub fn get_to_address(&self) -> String { - standardize_address(&self.to_address) - } -} -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TypeInfo { - pub account_address: String, - #[serde(deserialize_with = "deserialize_string_from_hexstring")] - pub module_name: String, - #[serde(deserialize_with = "deserialize_string_from_hexstring")] - pub struct_name: String, -} - -impl fmt::Display for TypeInfo { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!( - f, - "{}::{}::{}", - self.account_address, self.module_name, self.struct_name - ) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum TokenWriteSet { - TokenDataId(TokenDataIdType), - TokenId(TokenIdType), - TokenData(TokenDataType), - Token(TokenType), - CollectionData(CollectionDataType), - TokenOfferId(TokenOfferIdType), -} - -impl TokenWriteSet { - pub fn from_table_item_type( - data_type: &str, - data: &str, - txn_version: i64, - ) -> Result> { - match data_type { - "0x3::token::TokenDataId" => { - serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenDataId(inner))) - }, - "0x3::token::TokenId" => { - serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenId(inner))) - }, - "0x3::token::TokenData" => { - serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenData(inner))) - }, - "0x3::token::Token" => { - serde_json::from_str(data).map(|inner| Some(TokenWriteSet::Token(inner))) - }, - "0x3::token::CollectionData" => { - serde_json::from_str(data).map(|inner| Some(TokenWriteSet::CollectionData(inner))) - }, - "0x3::token_transfers::TokenOfferId" => { - serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenOfferId(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum TokenEvent { - MintTokenEvent(MintTokenEventType), - BurnTokenEvent(BurnTokenEventType), - MutateTokenPropertyMapEvent(MutateTokenPropertyMapEventType), - WithdrawTokenEvent(WithdrawTokenEventType), - DepositTokenEvent(DepositTokenEventType), - OfferTokenEvent(OfferTokenEventType), - CancelTokenOfferEvent(CancelTokenOfferEventType), - ClaimTokenEvent(ClaimTokenEventType), -} - -impl TokenEvent { - pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { - match data_type { - "0x3::token::MintTokenEvent" => { - serde_json::from_str(data).map(|inner| Some(TokenEvent::MintTokenEvent(inner))) - }, - "0x3::token::BurnTokenEvent" => { - serde_json::from_str(data).map(|inner| Some(TokenEvent::BurnTokenEvent(inner))) - }, - "0x3::token::MutateTokenPropertyMapEvent" => serde_json::from_str(data) - .map(|inner| Some(TokenEvent::MutateTokenPropertyMapEvent(inner))), - "0x3::token::WithdrawEvent" => { - serde_json::from_str(data).map(|inner| Some(TokenEvent::WithdrawTokenEvent(inner))) - }, - "0x3::token::DepositEvent" => { - serde_json::from_str(data).map(|inner| Some(TokenEvent::DepositTokenEvent(inner))) - }, - "0x3::token_transfers::TokenOfferEvent" => { - serde_json::from_str(data).map(|inner| Some(TokenEvent::OfferTokenEvent(inner))) - }, - "0x3::token_transfers::TokenCancelOfferEvent" => serde_json::from_str(data) - .map(|inner| Some(TokenEvent::CancelTokenOfferEvent(inner))), - "0x3::token_transfers::TokenClaimEvent" => { - serde_json::from_str(data).map(|inner| Some(TokenEvent::ClaimTokenEvent(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum TokenResource { - CollectionResource(CollectionResourceType), - TokenStoreResource(TokenStoreResourceType), - PendingClaimsResource(PendingClaimsResourceType), -} - -impl TokenResource { - pub fn is_resource_supported(data_type: &str) -> bool { - [ - format!("{}::token::Collections", TOKEN_ADDR), - format!("{}::token::TokenStore", TOKEN_ADDR), - format!("{}::token_transfers::PendingClaims", TOKEN_ADDR), - ] - .contains(&data_type.to_string()) - } - - pub fn from_resource( - data_type: &str, - data: &serde_json::Value, - txn_version: i64, - ) -> Result { - match data_type { - x if x == format!("{}::token::Collections", TOKEN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(TokenResource::CollectionResource(inner))) - }, - x if x == format!("{}::token::TokenStore", TOKEN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(TokenResource::TokenStoreResource(inner))) - }, - x if x == format!("{}::token_transfers::PendingClaims", TOKEN_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(TokenResource::PendingClaimsResource(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - ))? - .context(format!( - "Resource unsupported! Call is_resource_supported first. version {} type {}", - txn_version, data_type - )) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/tokens.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/tokens.rs deleted file mode 100644 index 760aa15e748e3..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/tokens.rs +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - collection_datas::{CollectionData, CurrentCollectionData}, - token_claims::CurrentTokenPendingClaim, - token_datas::{CurrentTokenData, TokenData}, - token_ownerships::{CurrentTokenOwnership, TokenOwnership}, - token_utils::{TokenResource, TokenWriteSet}, -}; -use crate::{ - models::default_models::move_resources::MoveResource, - schema::tokens, - utils::{ - database::PgPoolConnection, - util::{ensure_not_negative, parse_timestamp, standardize_address}, - }, -}; -use aptos_protos::transaction::v1::{ - transaction::TxnData, write_set_change::Change as WriteSetChangeEnum, DeleteTableItem, - Transaction, WriteResource, WriteTableItem, -}; -use bigdecimal::{BigDecimal, Zero}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -type TableHandle = String; -type Address = String; -type TableType = String; -pub type TableHandleToOwner = HashMap; -pub type TokenDataIdHash = String; -// PK of current_token_ownerships, i.e. token_data_id_hash + property_version + owner_address, used to dedupe -pub type CurrentTokenOwnershipPK = (TokenDataIdHash, BigDecimal, Address); -// PK of current_token_pending_claims, i.e. token_data_id_hash + property_version + to/from_address, used to dedupe -pub type CurrentTokenPendingClaimPK = (TokenDataIdHash, BigDecimal, Address, Address); -// PK of tokens table, used to dedupe tokens -pub type TokenPK = (TokenDataIdHash, BigDecimal); - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id_hash, property_version, transaction_version))] -#[diesel(table_name = tokens)] -pub struct Token { - pub token_data_id_hash: String, - pub property_version: BigDecimal, - pub transaction_version: i64, - pub creator_address: String, - pub collection_name: String, - pub name: String, - pub token_properties: serde_json::Value, - pub collection_data_id_hash: String, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -#[derive(Debug)] -pub struct TableMetadataForToken { - owner_address: Address, - pub table_type: TableType, -} - -impl Token { - /// We can find token data from write sets in user transactions. Table items will contain metadata for collections - /// and tokens. To find ownership, we have to look in write resource write sets for who owns those table handles - /// - /// We also will compute current versions of the token tables which are at a higher granularity than the transactional tables (only - /// state at the last transaction will be tracked, hence using hashmap to dedupe) - pub fn from_transaction( - transaction: &Transaction, - table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection, - ) -> ( - Vec, - Vec, - Vec, - Vec, - HashMap, - HashMap, - HashMap, - HashMap, - ) { - let txn_data = transaction - .txn_data - .as_ref() - .expect("Txn Data doesn't exit!"); - if let TxnData::User(_) = txn_data { - let mut token_ownerships = vec![]; - let mut token_datas = vec![]; - let mut collection_datas = vec![]; - - let mut tokens: HashMap = HashMap::new(); - let mut current_token_ownerships: HashMap< - CurrentTokenOwnershipPK, - CurrentTokenOwnership, - > = HashMap::new(); - let mut current_token_datas: HashMap = - HashMap::new(); - let mut current_collection_datas: HashMap = - HashMap::new(); - let mut current_token_claims: HashMap< - CurrentTokenPendingClaimPK, - CurrentTokenPendingClaim, - > = HashMap::new(); - - let txn_version = transaction.version as i64; - let txn_timestamp = - parse_timestamp(transaction.timestamp.as_ref().unwrap(), txn_version); - let transaction_info = transaction - .info - .as_ref() - .expect("Transaction info doesn't exist!"); - - for wsc in &transaction_info.changes { - // Basic token and ownership data - let (maybe_token_w_ownership, maybe_token_data, maybe_collection_data) = - match wsc.change.as_ref().unwrap() { - WriteSetChangeEnum::WriteTableItem(write_table_item) => ( - Self::from_write_table_item( - write_table_item, - txn_version, - txn_timestamp, - table_handle_to_owner, - ) - .unwrap(), - TokenData::from_write_table_item( - write_table_item, - txn_version, - txn_timestamp, - ) - .unwrap(), - CollectionData::from_write_table_item( - write_table_item, - txn_version, - txn_timestamp, - table_handle_to_owner, - conn, - ) - .unwrap(), - ), - WriteSetChangeEnum::DeleteTableItem(delete_table_item) => ( - Self::from_delete_table_item( - delete_table_item, - txn_version, - txn_timestamp, - table_handle_to_owner, - ) - .unwrap(), - None, - None, - ), - _ => (None, None, None), - }; - // More advanced token contracts - let maybe_current_token_claim = match wsc.change.as_ref().unwrap() { - WriteSetChangeEnum::WriteTableItem(write_table_item) => { - CurrentTokenPendingClaim::from_write_table_item( - write_table_item, - txn_version, - txn_timestamp, - table_handle_to_owner, - ) - .unwrap() - }, - WriteSetChangeEnum::DeleteTableItem(delete_table_item) => { - CurrentTokenPendingClaim::from_delete_table_item( - delete_table_item, - txn_version, - txn_timestamp, - table_handle_to_owner, - ) - .unwrap() - }, - _ => None, - }; - - if let Some((token, maybe_token_ownership, maybe_current_token_ownership)) = - maybe_token_w_ownership - { - tokens.insert( - ( - token.token_data_id_hash.clone(), - token.property_version.clone(), - ), - token, - ); - if let Some(token_ownership) = maybe_token_ownership { - token_ownerships.push(token_ownership); - } - if let Some(current_token_ownership) = maybe_current_token_ownership { - current_token_ownerships.insert( - ( - current_token_ownership.token_data_id_hash.clone(), - current_token_ownership.property_version.clone(), - current_token_ownership.owner_address.clone(), - ), - current_token_ownership, - ); - } - } - if let Some((token_data, current_token_data)) = maybe_token_data { - token_datas.push(token_data); - current_token_datas.insert( - current_token_data.token_data_id_hash.clone(), - current_token_data, - ); - } - if let Some((collection_data, current_collection_data)) = maybe_collection_data { - collection_datas.push(collection_data); - current_collection_datas.insert( - current_collection_data.collection_data_id_hash.clone(), - current_collection_data, - ); - } - if let Some(claim) = maybe_current_token_claim { - current_token_claims.insert( - ( - claim.token_data_id_hash.clone(), - claim.property_version.clone(), - claim.from_address.clone(), - claim.to_address.clone(), - ), - claim, - ); - } - } - return ( - tokens.into_values().collect(), - token_ownerships, - token_datas, - collection_datas, - current_token_ownerships, - current_token_datas, - current_collection_datas, - current_token_claims, - ); - } - Default::default() - } - - /// Get token from write table item. Table items don't have address of the table so we need to look it up in the table_handle_to_owner mapping - /// We get the mapping from resource. - /// If the mapping is missing we'll just leave owner address as blank. This isn't great but at least helps us account for the token - pub fn from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result, Option)>> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_token = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::Token(inner)) => Some(inner), - _ => None, - }; - - if let Some(token) = maybe_token { - let token_id = token.id; - let token_data_id = token_id.token_data_id; - let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id.to_hash(); - let collection_name = token_data_id.get_collection_trunc(); - let name = token_data_id.get_name_trunc(); - - let token_pg = Self { - collection_data_id_hash, - token_data_id_hash, - creator_address: token_data_id.get_creator_address(), - collection_name, - name, - property_version: token_id.property_version, - transaction_version: txn_version, - token_properties: token.token_properties, - transaction_timestamp: txn_timestamp, - }; - - let (token_ownership, current_token_ownership) = TokenOwnership::from_token( - &token_pg, - table_item_data.key_type.as_str(), - &table_item_data.key, - ensure_not_negative(token.amount), - table_item.handle.to_string(), - table_handle_to_owner, - )? - .map(|(token_ownership, current_token_ownership)| { - (Some(token_ownership), current_token_ownership) - }) - .unwrap_or((None, None)); - - Ok(Some((token_pg, token_ownership, current_token_ownership))) - } else { - Ok(None) - } - } - - /// Get token from delete table item. The difference from write table item is that value isn't there so - /// we'll set amount to 0 and token property to blank. - pub fn from_delete_table_item( - table_item: &DeleteTableItem, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result, Option)>> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_token_id = match TokenWriteSet::from_table_item_type( - table_item_data.key_type.as_str(), - &table_item_data.key, - txn_version, - )? { - Some(TokenWriteSet::TokenId(inner)) => Some(inner), - _ => None, - }; - - if let Some(token_id) = maybe_token_id { - let token_data_id = token_id.token_data_id; - let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id.to_hash(); - let collection_name = token_data_id.get_collection_trunc(); - let name = token_data_id.get_name_trunc(); - - let token = Self { - collection_data_id_hash, - token_data_id_hash, - creator_address: token_data_id.get_creator_address(), - collection_name, - name, - property_version: token_id.property_version, - transaction_version: txn_version, - token_properties: serde_json::Value::Null, - transaction_timestamp: txn_timestamp, - }; - let (token_ownership, current_token_ownership) = TokenOwnership::from_token( - &token, - table_item_data.key_type.as_str(), - &table_item_data.key, - BigDecimal::zero(), - table_item.handle.to_string(), - table_handle_to_owner, - )? - .map(|(token_ownership, current_token_ownership)| { - (Some(token_ownership), current_token_ownership) - }) - .unwrap_or((None, None)); - Ok(Some((token, token_ownership, current_token_ownership))) - } else { - Ok(None) - } - } -} - -impl TableMetadataForToken { - /// Mapping from table handle to owner type, including type of the table (AKA resource type) - /// from user transactions in a batch of transactions - pub fn get_table_handle_to_owner_from_transactions( - transactions: &[Transaction], - ) -> TableHandleToOwner { - let mut table_handle_to_owner: TableHandleToOwner = HashMap::new(); - // Do a first pass to get all the table metadata in the batch. - for transaction in transactions { - if let TxnData::User(_) = transaction.txn_data.as_ref().unwrap() { - let txn_version = transaction.version as i64; - - let transaction_info = transaction - .info - .as_ref() - .expect("Transaction info doesn't exist!"); - for wsc in &transaction_info.changes { - if let WriteSetChangeEnum::WriteResource(write_resource) = - wsc.change.as_ref().unwrap() - { - let maybe_map = TableMetadataForToken::get_table_handle_to_owner( - write_resource, - txn_version, - ) - .unwrap(); - if let Some(map) = maybe_map { - table_handle_to_owner.extend(map); - } - } - } - } - } - table_handle_to_owner - } - - /// Mapping from table handle to owner type, including type of the table (AKA resource type) - fn get_table_handle_to_owner( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - let value = TableMetadataForToken { - owner_address: resource.address.clone(), - table_type: write_resource.type_str.clone(), - }; - let table_handle: TableHandle = match TokenResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? { - TokenResource::CollectionResource(collection_resource) => { - collection_resource.collection_data.get_handle() - }, - TokenResource::TokenStoreResource(inner) => inner.tokens.get_handle(), - TokenResource::PendingClaimsResource(inner) => inner.pending_claims.get_handle(), - }; - Ok(Some(HashMap::from([( - standardize_address(&table_handle), - value, - )]))) - } - - pub fn get_owner_address(&self) -> String { - standardize_address(&self.owner_address) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_collections.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_collections.rs deleted file mode 100644 index bb3c784d2058d..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_collections.rs +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - collection_datas::{CollectionData, QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - token_utils::{CollectionDataIdType, TokenWriteSet}, - tokens::TableHandleToOwner, - v2_token_utils::{TokenStandard, TokenV2AggregatedDataMapping, V2TokenResource}, -}; -use crate::{ - models::default_models::move_resources::MoveResource, - schema::{collections_v2, current_collections_v2}, - utils::{database::PgPoolConnection, util::standardize_address}, -}; -use anyhow::Context; -use aptos_protos::transaction::v1::{WriteResource, WriteTableItem}; -use bigdecimal::{BigDecimal, Zero}; -use diesel::{prelude::*, sql_query, sql_types::Text}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -// PK of current_collections_v2, i.e. collection_id -pub type CurrentCollectionV2PK = String; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = collections_v2)] -pub struct CollectionV2 { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub collection_id: String, - pub creator_address: String, - pub collection_name: String, - pub description: String, - pub uri: String, - pub current_supply: BigDecimal, - pub max_supply: Option, - pub total_minted_v2: Option, - pub mutable_description: Option, - pub mutable_uri: Option, - pub table_handle_v1: Option, - pub token_standard: String, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(collection_id))] -#[diesel(table_name = current_collections_v2)] -pub struct CurrentCollectionV2 { - pub collection_id: String, - pub creator_address: String, - pub collection_name: String, - pub description: String, - pub uri: String, - pub current_supply: BigDecimal, - pub max_supply: Option, - pub total_minted_v2: Option, - pub mutable_description: Option, - pub mutable_uri: Option, - pub table_handle_v1: Option, - pub token_standard: String, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, -} - -#[derive(Debug, QueryableByName)] -pub struct CreatorFromCollectionTableV1 { - #[diesel(sql_type = Text)] - pub creator_address: String, -} - -impl CollectionV2 { - pub fn get_v2_from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2TokenResource::Collection(inner) = &V2TokenResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? { - let (mut current_supply, mut max_supply, mut total_minted_v2) = - (BigDecimal::zero(), None, None); - let (mut mutable_description, mut mutable_uri) = (None, None); - if let Some(metadata) = token_v2_metadata.get(&resource.address) { - // Getting supply data (prefer fixed supply over unlimited supply although they should never appear at the same time anyway) - let fixed_supply = metadata.fixed_supply.as_ref(); - let unlimited_supply = metadata.unlimited_supply.as_ref(); - if let Some(supply) = unlimited_supply { - (current_supply, max_supply, total_minted_v2) = ( - supply.current_supply.clone(), - None, - Some(supply.total_minted.clone()), - ); - } - if let Some(supply) = fixed_supply { - (current_supply, max_supply, total_minted_v2) = ( - supply.current_supply.clone(), - Some(supply.max_supply.clone()), - Some(supply.total_minted.clone()), - ); - } - - // Getting collection mutability config from AptosCollection - let collection = metadata.aptos_collection.as_ref(); - if let Some(collection) = collection { - mutable_description = Some(collection.mutable_description); - mutable_uri = Some(collection.mutable_uri); - } - } else { - // ObjectCore should not be missing, returning from entire function early - return Ok(None); - } - - let collection_id = resource.address.clone(); - let creator_address = inner.get_creator_address(); - let collection_name = inner.get_name_trunc(); - let description = inner.description.clone(); - let uri = inner.get_uri_trunc(); - - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - collection_id: collection_id.clone(), - creator_address: creator_address.clone(), - collection_name: collection_name.clone(), - description: description.clone(), - uri: uri.clone(), - current_supply: current_supply.clone(), - max_supply: max_supply.clone(), - total_minted_v2: total_minted_v2.clone(), - mutable_description, - mutable_uri, - table_handle_v1: None, - token_standard: TokenStandard::V2.to_string(), - transaction_timestamp: txn_timestamp, - }, - CurrentCollectionV2 { - collection_id, - creator_address, - collection_name, - description, - uri, - current_supply, - max_supply, - total_minted_v2, - mutable_description, - mutable_uri, - table_handle_v1: None, - token_standard: TokenStandard::V2.to_string(), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - }, - ))) - } else { - Ok(None) - } - } - - pub fn get_v1_from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_collection_data = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::CollectionData(inner)) => Some(inner), - _ => None, - }; - if let Some(collection_data) = maybe_collection_data { - let table_handle = table_item.handle.to_string(); - let maybe_creator_address = table_handle_to_owner - .get(&standardize_address(&table_handle)) - .map(|table_metadata| table_metadata.get_owner_address()); - let mut creator_address = match maybe_creator_address { - Some(ca) => ca, - None => { - match Self::get_collection_creator_for_v1(conn, &table_handle).context(format!( - "Failed to get collection creator for table handle {}, txn version {}", - table_handle, txn_version - )) { - Ok(ca) => ca, - Err(_) => { - // Try our best by getting from the older collection data - match CollectionData::get_collection_creator(conn, &table_handle) { - Ok(creator) => creator, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = &table_handle, - "Failed to get collection v2 creator for table handle. You probably should backfill db." - ); - return Ok(None); - }, - } - }, - } - }, - }; - creator_address = standardize_address(&creator_address); - let collection_id_struct = - CollectionDataIdType::new(creator_address, collection_data.get_name().to_string()); - let collection_id = collection_id_struct.to_id(); - let collection_name = collection_data.get_name_trunc(); - let uri = collection_data.get_uri_trunc(); - - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - collection_id: collection_id.clone(), - creator_address: collection_id_struct.creator.clone(), - collection_name: collection_name.clone(), - description: collection_data.description.clone(), - uri: uri.clone(), - current_supply: collection_data.supply.clone(), - max_supply: Some(collection_data.maximum.clone()), - total_minted_v2: None, - mutable_uri: Some(collection_data.mutability_config.uri), - mutable_description: Some(collection_data.mutability_config.description), - table_handle_v1: Some(table_handle.clone()), - token_standard: TokenStandard::V1.to_string(), - transaction_timestamp: txn_timestamp, - }, - CurrentCollectionV2 { - collection_id, - creator_address: collection_id_struct.creator, - collection_name, - description: collection_data.description, - uri, - current_supply: collection_data.supply, - max_supply: Some(collection_data.maximum.clone()), - total_minted_v2: None, - mutable_uri: Some(collection_data.mutability_config.uri), - mutable_description: Some(collection_data.mutability_config.description), - table_handle_v1: Some(table_handle), - token_standard: TokenStandard::V1.to_string(), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - }, - ))) - } else { - Ok(None) - } - } - - /// If collection data is not in resources of the same transaction, then try looking for it in the database. Since collection owner - /// cannot change, we can just look in the current_collection_datas table. - /// Retrying a few times since this collection could've been written in a separate thread. - fn get_collection_creator_for_v1( - conn: &mut PgPoolConnection, - table_handle: &str, - ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; - match Self::get_by_table_handle(conn, table_handle) { - Ok(creator) => return Ok(creator), - Err(_) => { - std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); - }, - } - } - Err(anyhow::anyhow!("Failed to get collection creator")) - } - - /// TODO: Change this to a KV store - fn get_by_table_handle( - conn: &mut PgPoolConnection, - table_handle: &str, - ) -> anyhow::Result { - let mut res: Vec> = sql_query( - "SELECT creator_address FROM current_collections_v2 WHERE table_handle_v1 = $1", - ) - .bind::(table_handle) - .get_results(conn)?; - Ok(res - .pop() - .context("collection result empty")? - .context("collection result null")? - .creator_address) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_activities.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_activities.rs deleted file mode 100644 index 204d93f4b9d9d..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_activities.rs +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - token_utils::{TokenDataIdType, TokenEvent}, - v2_token_datas::TokenDataV2, - v2_token_utils::{TokenStandard, TokenV2AggregatedDataMapping, V2TokenEvent}, -}; -use crate::{ - models::coin_models::v2_fungible_asset_utils::FungibleAssetEvent, - schema::token_activities_v2, - utils::{database::PgPoolConnection, util::standardize_address}, -}; -use aptos_protos::transaction::v1::Event; -use bigdecimal::{BigDecimal, One, Zero}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, event_index))] -#[diesel(table_name = token_activities_v2)] -pub struct TokenActivityV2 { - pub transaction_version: i64, - pub event_index: i64, - pub event_account_address: String, - pub token_data_id: String, - pub property_version_v1: BigDecimal, - pub type_: String, - pub from_address: Option, - pub to_address: Option, - pub token_amount: BigDecimal, - pub before_value: Option, - pub after_value: Option, - pub entry_function_id_str: Option, - pub token_standard: String, - pub is_fungible_v2: Option, - pub transaction_timestamp: chrono::NaiveDateTime, -} - -/// A simplified TokenActivity (excluded common fields) to reduce code duplication -struct TokenActivityHelperV1 { - pub token_data_id_struct: TokenDataIdType, - pub property_version: BigDecimal, - pub from_address: Option, - pub to_address: Option, - pub token_amount: BigDecimal, -} - -/// A simplified TokenActivity (excluded common fields) to reduce code duplication -struct TokenActivityHelperV2 { - pub from_address: Option, - pub to_address: Option, - pub token_amount: BigDecimal, - pub before_value: Option, - pub after_value: Option, -} - -impl TokenActivityV2 { - /// We'll go from 0x1::fungible_asset::withdraw/deposit events. - /// We're guaranteed to find a 0x1::fungible_asset::FungibleStore which has a pointer to the - /// fungible asset metadata which could be a token. We'll either find that token in token_v2_metadata - /// or by looking up the postgres table. - /// TODO: Create artificial events for mint and burn. There are no mint and burn events so we'll have to - /// add all the deposits/withdrawals and if it's positive/negative it's a mint/burn. - pub fn get_ft_v2_from_parsed_event( - event: &Event, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - event_index: i64, - entry_function_id_str: &Option, - token_v2_metadata: &TokenV2AggregatedDataMapping, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - let event_type = event.type_str.clone(); - if let Some(fa_event) = - &FungibleAssetEvent::from_event(event_type.as_str(), &event.data, txn_version)? - { - let event_account_address = - standardize_address(&event.key.as_ref().unwrap().account_address); - - // The event account address will also help us find fungible store which tells us where to find - // the metadata - if let Some(metadata) = token_v2_metadata.get(&event_account_address) { - let object_core = &metadata.object.object_core; - let fungible_asset = metadata.fungible_asset_store.as_ref().unwrap(); - let maybe_token_data_id = fungible_asset.metadata.get_reference_address(); - // Now we try to see if the fungible asset is actually a token. If it's not token, return early - let is_token = if token_v2_metadata.get(&maybe_token_data_id).is_some() { - true - } else { - // Look up in the db - TokenDataV2::is_address_token(conn, &maybe_token_data_id) - }; - if !is_token { - return Ok(None); - } - - let token_activity_helper = match fa_event { - FungibleAssetEvent::WithdrawEvent(inner) => TokenActivityHelperV2 { - from_address: Some(object_core.get_owner_address()), - to_address: None, - token_amount: inner.amount.clone(), - before_value: None, - after_value: None, - }, - FungibleAssetEvent::DepositEvent(inner) => TokenActivityHelperV2 { - from_address: None, - to_address: Some(object_core.get_owner_address()), - token_amount: inner.amount.clone(), - before_value: None, - after_value: None, - }, - }; - - return Ok(Some(Self { - transaction_version: txn_version, - event_index, - event_account_address, - token_data_id: maybe_token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - type_: event_type.to_string(), - from_address: token_activity_helper.from_address, - to_address: token_activity_helper.to_address, - token_amount: token_activity_helper.token_amount, - before_value: token_activity_helper.before_value, - after_value: token_activity_helper.after_value, - entry_function_id_str: entry_function_id_str.clone(), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(true), - transaction_timestamp: txn_timestamp, - })); - } - } - Ok(None) - } - - pub fn get_nft_v2_from_parsed_event( - event: &Event, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - event_index: i64, - entry_function_id_str: &Option, - token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result> { - let event_type = event.type_str.clone(); - if let Some(token_event) = - &V2TokenEvent::from_event(&event_type, event.data.as_str(), txn_version)? - { - let event_account_address = - standardize_address(&event.key.as_ref().unwrap().account_address); - // burn and mint events are attached to the collection. The rest should be attached to the token - let token_data_id = match token_event { - V2TokenEvent::MintEvent(inner) => inner.get_token_address(), - V2TokenEvent::BurnEvent(inner) => inner.get_token_address(), - V2TokenEvent::TransferEvent(inner) => inner.get_object_address(), - _ => event_account_address.clone(), - }; - - if let Some(metadata) = token_v2_metadata.get(&token_data_id) { - let object_core = &metadata.object.object_core; - let token_activity_helper = match token_event { - V2TokenEvent::MintEvent(_) => TokenActivityHelperV2 { - from_address: Some(object_core.get_owner_address()), - to_address: None, - token_amount: BigDecimal::one(), - before_value: None, - after_value: None, - }, - V2TokenEvent::TokenMutationEvent(inner) => TokenActivityHelperV2 { - from_address: Some(object_core.get_owner_address()), - to_address: None, - token_amount: BigDecimal::zero(), - before_value: Some(inner.old_value.clone()), - after_value: Some(inner.new_value.clone()), - }, - V2TokenEvent::BurnEvent(_) => TokenActivityHelperV2 { - from_address: Some(object_core.get_owner_address()), - to_address: None, - token_amount: BigDecimal::one(), - before_value: None, - after_value: None, - }, - V2TokenEvent::TransferEvent(inner) => TokenActivityHelperV2 { - from_address: Some(inner.get_from_address()), - to_address: Some(inner.get_to_address()), - token_amount: BigDecimal::one(), - before_value: None, - after_value: None, - }, - }; - return Ok(Some(Self { - transaction_version: txn_version, - event_index, - event_account_address, - token_data_id, - property_version_v1: BigDecimal::zero(), - type_: event_type, - from_address: token_activity_helper.from_address, - to_address: token_activity_helper.to_address, - token_amount: token_activity_helper.token_amount, - before_value: token_activity_helper.before_value, - after_value: token_activity_helper.after_value, - entry_function_id_str: entry_function_id_str.clone(), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), - transaction_timestamp: txn_timestamp, - })); - } - } - Ok(None) - } - - pub fn get_v1_from_parsed_event( - event: &Event, - txn_version: i64, - txn_timestamp: chrono::NaiveDateTime, - event_index: i64, - entry_function_id_str: &Option, - ) -> anyhow::Result> { - let event_type = event.type_str.clone(); - if let Some(token_event) = &TokenEvent::from_event(&event_type, &event.data, txn_version)? { - let event_account_address = - standardize_address(&event.key.as_ref().unwrap().account_address); - let token_activity_helper = match token_event { - TokenEvent::MintTokenEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.id.clone(), - property_version: BigDecimal::zero(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: inner.amount.clone(), - }, - TokenEvent::BurnTokenEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.id.token_data_id.clone(), - property_version: inner.id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: inner.amount.clone(), - }, - TokenEvent::MutateTokenPropertyMapEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.new_id.token_data_id.clone(), - property_version: inner.new_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: BigDecimal::zero(), - }, - TokenEvent::WithdrawTokenEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.id.token_data_id.clone(), - property_version: inner.id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: None, - token_amount: inner.amount.clone(), - }, - TokenEvent::DepositTokenEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.id.token_data_id.clone(), - property_version: inner.id.property_version.clone(), - from_address: None, - to_address: Some(standardize_address(&event_account_address)), - token_amount: inner.amount.clone(), - }, - TokenEvent::OfferTokenEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.token_id.token_data_id.clone(), - property_version: inner.token_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: Some(inner.get_to_address()), - token_amount: inner.amount.clone(), - }, - TokenEvent::CancelTokenOfferEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.token_id.token_data_id.clone(), - property_version: inner.token_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: Some(inner.get_to_address()), - token_amount: inner.amount.clone(), - }, - TokenEvent::ClaimTokenEvent(inner) => TokenActivityHelperV1 { - token_data_id_struct: inner.token_id.token_data_id.clone(), - property_version: inner.token_id.property_version.clone(), - from_address: Some(event_account_address.clone()), - to_address: Some(inner.get_to_address()), - token_amount: inner.amount.clone(), - }, - }; - let token_data_id_struct = token_activity_helper.token_data_id_struct; - return Ok(Some(Self { - transaction_version: txn_version, - event_index, - event_account_address, - token_data_id: token_data_id_struct.to_id(), - property_version_v1: token_activity_helper.property_version, - type_: event_type, - from_address: token_activity_helper.from_address, - to_address: token_activity_helper.to_address, - token_amount: token_activity_helper.token_amount, - before_value: None, - after_value: None, - entry_function_id_str: entry_function_id_str.clone(), - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - transaction_timestamp: txn_timestamp, - })); - } - Ok(None) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_datas.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_datas.rs deleted file mode 100644 index cbef5e72ba18b..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_datas.rs +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - token_utils::TokenWriteSet, - v2_token_utils::{TokenStandard, TokenV2, TokenV2AggregatedDataMapping}, -}; -use crate::{ - schema::{current_token_datas_v2, token_datas_v2}, - utils::{database::PgPoolConnection, util::standardize_address}, -}; -use anyhow::Context; -use aptos_protos::transaction::v1::{WriteResource, WriteTableItem}; -use bigdecimal::{BigDecimal, Zero}; -use diesel::{prelude::*, sql_query, sql_types::Text}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; - -// PK of current_token_datas_v2, i.e. token_data_id -pub type CurrentTokenDataV2PK = String; - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = token_datas_v2)] -pub struct TokenDataV2 { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub token_data_id: String, - pub collection_id: String, - pub token_name: String, - pub maximum: Option, - pub supply: BigDecimal, - pub largest_property_version_v1: Option, - pub token_uri: String, - pub token_properties: serde_json::Value, - pub description: String, - pub token_standard: String, - pub is_fungible_v2: Option, - pub transaction_timestamp: chrono::NaiveDateTime, - pub decimals: i64, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id))] -#[diesel(table_name = current_token_datas_v2)] -pub struct CurrentTokenDataV2 { - pub token_data_id: String, - pub collection_id: String, - pub token_name: String, - pub maximum: Option, - pub supply: BigDecimal, - pub largest_property_version_v1: Option, - pub token_uri: String, - pub token_properties: serde_json::Value, - pub description: String, - pub token_standard: String, - pub is_fungible_v2: Option, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub decimals: i64, -} - -#[derive(Debug, QueryableByName)] -pub struct TokenDataIdFromTable { - #[diesel(sql_type = Text)] - pub token_data_id: String, -} - -impl TokenDataV2 { - pub fn get_v2_from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result> { - if let Some(inner) = &TokenV2::from_write_resource(write_resource, txn_version)? { - let token_data_id = standardize_address(&write_resource.address.to_string()); - // Get maximum, supply, and is fungible from fungible asset if this is a fungible token - let (mut maximum, mut supply, mut decimals, mut is_fungible_v2) = - (None, BigDecimal::zero(), 0, Some(false)); - // Get token properties from 0x4::property_map::PropertyMap - let mut token_properties = serde_json::Value::Null; - if let Some(metadata) = token_v2_metadata.get(&token_data_id) { - let fungible_asset_metadata = metadata.fungible_asset_metadata.as_ref(); - let fungible_asset_supply = metadata.fungible_asset_supply.as_ref(); - if let Some(metadata) = fungible_asset_metadata { - if let Some(fa_supply) = fungible_asset_supply { - maximum = fa_supply.get_maximum(); - supply = fa_supply.current.clone(); - decimals = metadata.decimals as i64; - is_fungible_v2 = Some(true); - } - } - token_properties = metadata - .property_map - .as_ref() - .map(|m| m.inner.clone()) - .unwrap_or(token_properties); - } else { - // ObjectCore should not be missing, returning from entire function early - return Ok(None); - } - - let collection_id = inner.get_collection_address(); - let token_name = inner.get_name_trunc(); - let token_uri = inner.get_uri_trunc(); - - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id: token_data_id.clone(), - collection_id: collection_id.clone(), - token_name: token_name.clone(), - maximum: maximum.clone(), - supply: supply.clone(), - largest_property_version_v1: None, - token_uri: token_uri.clone(), - token_properties: token_properties.clone(), - description: inner.description.clone(), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2, - transaction_timestamp: txn_timestamp, - decimals, - }, - CurrentTokenDataV2 { - token_data_id, - collection_id, - token_name, - maximum, - supply, - largest_property_version_v1: None, - token_uri, - token_properties, - description: inner.description.clone(), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - decimals, - }, - ))) - } else { - Ok(None) - } - } - - pub fn get_v1_from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - ) -> anyhow::Result> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_token_data = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::TokenData(inner)) => Some(inner), - _ => None, - }; - - if let Some(token_data) = maybe_token_data { - let maybe_token_data_id = match TokenWriteSet::from_table_item_type( - table_item_data.key_type.as_str(), - &table_item_data.key, - txn_version, - )? { - Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), - _ => None, - }; - if let Some(token_data_id_struct) = maybe_token_data_id { - let collection_id = token_data_id_struct.get_collection_id(); - let token_data_id = token_data_id_struct.to_id(); - let token_name = token_data_id_struct.get_name_trunc(); - let token_uri = token_data.get_uri_trunc(); - - return Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id: token_data_id.clone(), - collection_id: collection_id.clone(), - token_name: token_name.clone(), - maximum: Some(token_data.maximum.clone()), - supply: token_data.supply.clone(), - largest_property_version_v1: Some( - token_data.largest_property_version.clone(), - ), - token_uri: token_uri.clone(), - token_properties: token_data.default_properties.clone(), - description: token_data.description.clone(), - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - transaction_timestamp: txn_timestamp, - decimals: 0, - }, - CurrentTokenDataV2 { - token_data_id, - collection_id, - token_name, - maximum: Some(token_data.maximum), - supply: token_data.supply, - largest_property_version_v1: Some(token_data.largest_property_version), - token_uri, - token_properties: token_data.default_properties, - description: token_data.description, - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - decimals: 0, - }, - ))); - } else { - tracing::warn!( - transaction_version = txn_version, - key_type = table_item_data.key_type, - key = table_item_data.key, - "Expecting token_data_id as key for value = token_data" - ); - } - } - Ok(None) - } - - /// Try to see if an address is a token. We'll try a few times in case there is a race condition, - /// and if we can't find after 3 times, we'll assume that it's not a token. - /// TODO: An improvement is that we'll make another query to see if address is a coin. - pub fn is_address_token(conn: &mut PgPoolConnection, address: &str) -> bool { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; - match Self::get_by_token_data_id(conn, address) { - Ok(_) => return true, - Err(_) => { - std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); - }, - } - } - false - } - - /// TODO: Change this to a KV store - fn get_by_token_data_id(conn: &mut PgPoolConnection, address: &str) -> anyhow::Result { - let mut res: Vec> = - sql_query("SELECT token_data_id FROM current_token_datas_v2 WHERE token_data_id = $1") - .bind::(address) - .get_results(conn)?; - Ok(res - .pop() - .context("token data result empty")? - .context("token data result null")? - .token_data_id) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_metadata.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_metadata.rs deleted file mode 100644 index d2a7b775de8c6..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_metadata.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - token_utils::{NAME_LENGTH, TOKEN_ADDR}, - v2_token_utils::{TokenV2AggregatedDataMapping, TOKEN_V2_ADDR}, -}; -use crate::{ - models::{coin_models::coin_utils::COIN_ADDR, default_models::move_resources::MoveResource}, - schema::current_token_v2_metadata, - utils::util::{standardize_address, truncate_str}, -}; -use anyhow::Context; -use aptos_protos::transaction::v1::WriteResource; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use serde_json::Value; - -// PK of current_objects, i.e. object_address, resource_type -pub type CurrentTokenV2MetadataPK = (String, String); - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(object_address, resource_type))] -#[diesel(table_name = current_token_v2_metadata)] -pub struct CurrentTokenV2Metadata { - pub object_address: String, - pub resource_type: String, - pub data: Value, - pub state_key_hash: String, - pub last_transaction_version: i64, -} - -impl CurrentTokenV2Metadata { - /// Parsing unknown resources with 0x4::token::Token - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result> { - let object_address = standardize_address(&write_resource.address.to_string()); - if let Some(metadata) = token_v2_metadata.get(&object_address) { - // checking if token_v2 - if metadata.token.is_some() { - let move_tag = - MoveResource::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); - let resource_type_addr = move_tag.get_address(); - if matches!( - resource_type_addr.as_str(), - COIN_ADDR | TOKEN_ADDR | TOKEN_V2_ADDR - ) { - return Ok(None); - } - - let resource = MoveResource::from_write_resource(write_resource, 0, txn_version, 0); - - let state_key_hash = metadata.object.get_state_key_hash(); - if state_key_hash != resource.state_key_hash { - return Ok(None); - } - - let resource_type = truncate_str(&resource.type_, NAME_LENGTH); - return Ok(Some(CurrentTokenV2Metadata { - object_address, - resource_type, - data: resource - .data - .context("data must be present in write resource")?, - state_key_hash: resource.state_key_hash, - last_transaction_version: txn_version, - })); - } - } - Ok(None) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_ownerships.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_ownerships.rs deleted file mode 100644 index 04e906969e8d1..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_ownerships.rs +++ /dev/null @@ -1,630 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] -#![allow(clippy::unused_unit)] - -use super::{ - collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, - token_utils::TokenWriteSet, - tokens::TableHandleToOwner, - v2_token_datas::TokenDataV2, - v2_token_utils::{ - ObjectWithMetadata, TokenStandard, TokenV2AggregatedDataMapping, TokenV2Burned, - }, -}; -use crate::{ - models::{ - coin_models::v2_fungible_asset_utils::V2FungibleAssetResource, - default_models::move_resources::MoveResource, - }, - schema::{current_token_ownerships_v2, token_ownerships_v2}, - utils::{ - database::PgPoolConnection, - util::{ensure_not_negative, standardize_address}, - }, -}; -use anyhow::Context; -use aptos_protos::transaction::v1::{ - DeleteResource, DeleteTableItem, WriteResource, WriteTableItem, -}; -use bigdecimal::{BigDecimal, One, Zero}; -use diesel::{prelude::*, ExpressionMethods}; -use field_count::FieldCount; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -// PK of current_token_ownerships_v2, i.e. token_data_id, property_version_v1, owner_address, storage_id -pub type CurrentTokenOwnershipV2PK = (String, BigDecimal, String, String); - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(transaction_version, write_set_change_index))] -#[diesel(table_name = token_ownerships_v2)] -pub struct TokenOwnershipV2 { - pub transaction_version: i64, - pub write_set_change_index: i64, - pub token_data_id: String, - pub property_version_v1: BigDecimal, - pub owner_address: Option, - pub storage_id: String, - pub amount: BigDecimal, - pub table_type_v1: Option, - pub token_properties_mutated_v1: Option, - pub is_soulbound_v2: Option, - pub token_standard: String, - pub is_fungible_v2: Option, - pub transaction_timestamp: chrono::NaiveDateTime, - pub non_transferrable_by_owner: Option, -} - -#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] -#[diesel(primary_key(token_data_id, property_version_v1, owner_address, storage_id))] -#[diesel(table_name = current_token_ownerships_v2)] -pub struct CurrentTokenOwnershipV2 { - pub token_data_id: String, - pub property_version_v1: BigDecimal, - pub owner_address: String, - pub storage_id: String, - pub amount: BigDecimal, - pub table_type_v1: Option, - pub token_properties_mutated_v1: Option, - pub is_soulbound_v2: Option, - pub token_standard: String, - pub is_fungible_v2: Option, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub non_transferrable_by_owner: Option, -} - -// Facilitate tracking when a token is burned -#[derive(Clone, Debug)] -pub struct NFTOwnershipV2 { - pub token_data_id: String, - pub owner_address: String, - pub is_soulbound: Option, -} - -/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) -#[derive(Debug, Identifiable, Queryable)] -#[diesel(primary_key(token_data_id, property_version_v1, owner_address, storage_id))] -#[diesel(table_name = current_token_ownerships_v2)] -pub struct CurrentTokenOwnershipV2Query { - pub token_data_id: String, - pub property_version_v1: BigDecimal, - pub owner_address: String, - pub storage_id: String, - pub amount: BigDecimal, - pub table_type_v1: Option, - pub token_properties_mutated_v1: Option, - pub is_soulbound_v2: Option, - pub token_standard: String, - pub is_fungible_v2: Option, - pub last_transaction_version: i64, - pub last_transaction_timestamp: chrono::NaiveDateTime, - pub inserted_at: chrono::NaiveDateTime, - pub non_transferrable_by_owner: Option, -} - -impl TokenOwnershipV2 { - /// For nfts it's the same resources that we parse tokendatas from so we leverage the work done in there to get ownership data - pub fn get_nft_v2_from_token_data( - token_data: &TokenDataV2, - token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result< - Option<( - Self, - CurrentTokenOwnershipV2, - Option, // If token was transferred, the previous ownership record - Option, // If token was transferred, the previous ownership record - )>, - > { - // We should be indexing v1 token or v2 fungible token here - if token_data.is_fungible_v2 != Some(false) { - return Ok(None); - } - let metadata = token_v2_metadata - .get(&token_data.token_data_id) - .context("If token data exists objectcore must exist")?; - let object_core = metadata.object.object_core.clone(); - let token_data_id = token_data.token_data_id.clone(); - let owner_address = object_core.get_owner_address(); - let storage_id = token_data_id.clone(); - let is_soulbound = !object_core.allow_ungated_transfer; - - let ownership = Self { - transaction_version: token_data.transaction_version, - write_set_change_index: token_data.write_set_change_index, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - owner_address: Some(owner_address.clone()), - storage_id: storage_id.clone(), - amount: BigDecimal::one(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, - transaction_timestamp: token_data.transaction_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }; - let current_ownership = CurrentTokenOwnershipV2 { - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - owner_address, - storage_id: storage_id.clone(), - amount: BigDecimal::one(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, - last_transaction_version: token_data.transaction_version, - last_transaction_timestamp: token_data.transaction_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }; - - // check if token was transferred - if let Some((event_index, transfer_event)) = &metadata.transfer_event { - Ok(Some(( - ownership, - current_ownership, - Some(Self { - transaction_version: token_data.transaction_version, - // set to negative of event index to avoid collison with write set index - write_set_change_index: -1 * event_index, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - // previous owner - owner_address: Some(transfer_event.get_from_address()), - storage_id: storage_id.clone(), - // soft delete - amount: BigDecimal::zero(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, - transaction_timestamp: token_data.transaction_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }), - Some(CurrentTokenOwnershipV2 { - token_data_id, - property_version_v1: BigDecimal::zero(), - // previous owner - owner_address: transfer_event.get_from_address(), - storage_id, - // soft delete - amount: BigDecimal::zero(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: token_data.is_fungible_v2, - last_transaction_version: token_data.transaction_version, - last_transaction_timestamp: token_data.transaction_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }), - ))) - } else { - Ok(Some((ownership, current_ownership, None, None))) - } - } - - /// This handles the case where token is burned but objectCore is still there - pub fn get_burned_nft_v2_from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - tokens_burned: &TokenV2Burned, - ) -> anyhow::Result> { - if let Some(token_address) = - tokens_burned.get(&standardize_address(&write_resource.address.to_string())) - { - if let Some(object) = - &ObjectWithMetadata::from_write_resource(write_resource, txn_version)? - { - let object_core = &object.object_core; - let token_data_id = token_address.clone(); - let owner_address = object_core.get_owner_address(); - let storage_id = token_data_id.clone(); - let is_soulbound = !object_core.allow_ungated_transfer; - - return Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - owner_address: Some(owner_address.clone()), - storage_id: storage_id.clone(), - amount: BigDecimal::zero(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), - transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }, - CurrentTokenOwnershipV2 { - token_data_id, - property_version_v1: BigDecimal::zero(), - owner_address, - storage_id, - amount: BigDecimal::zero(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }, - ))); - } - } - Ok(None) - } - - /// This handles the case where token is burned and objectCore is deleted - pub fn get_burned_nft_v2_from_delete_resource( - write_resource: &DeleteResource, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - prior_nft_ownership: &HashMap, - tokens_burned: &TokenV2Burned, - conn: &mut PgPoolConnection, - ) -> anyhow::Result> { - if let Some(token_address) = - tokens_burned.get(&standardize_address(&write_resource.address.to_string())) - { - let latest_nft_ownership: NFTOwnershipV2 = match prior_nft_ownership.get(token_address) - { - Some(inner) => inner.clone(), - None => { - match CurrentTokenOwnershipV2Query::get_nft_by_token_data_id( - conn, - token_address, - ) { - Ok(nft) => nft, - Err(_) => { - tracing::error!( - transaction_version = txn_version, - lookup_key = &token_address, - "Failed to find NFT for burned token. You probably should backfill db." - ); - return Ok(None); - }, - } - }, - }; - - let token_data_id = token_address.clone(); - let owner_address = latest_nft_ownership.owner_address.clone(); - let storage_id = token_data_id.clone(); - let is_soulbound = latest_nft_ownership.is_soulbound; - - return Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - owner_address: Some(owner_address.clone()), - storage_id: storage_id.clone(), - amount: BigDecimal::zero(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: is_soulbound, - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), - transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: is_soulbound, - }, - CurrentTokenOwnershipV2 { - token_data_id, - property_version_v1: BigDecimal::zero(), - owner_address, - storage_id, - amount: BigDecimal::zero(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: is_soulbound, - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(false), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: is_soulbound, - }, - ))); - } - Ok(None) - } - - // Getting this from 0x1::fungible_asset::FungibleStore - pub fn get_ft_v2_from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - token_v2_metadata: &TokenV2AggregatedDataMapping, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2FungibleAssetResource::FungibleAssetStore(inner) = - V2FungibleAssetResource::from_resource( - &type_str, - resource.data.as_ref().unwrap(), - txn_version, - )? - { - if let Some(metadata) = token_v2_metadata.get(&resource.address) { - let object_core = &metadata.object.object_core; - let token_data_id = inner.metadata.get_reference_address(); - let storage_id = token_data_id.clone(); - let is_soulbound = inner.frozen; - let amount = inner.balance; - let owner_address = object_core.get_owner_address(); - - return Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id: token_data_id.clone(), - property_version_v1: BigDecimal::zero(), - owner_address: Some(owner_address.clone()), - storage_id: storage_id.clone(), - amount: amount.clone(), - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(true), - transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }, - CurrentTokenOwnershipV2 { - token_data_id, - property_version_v1: BigDecimal::zero(), - owner_address, - storage_id, - amount, - table_type_v1: None, - token_properties_mutated_v1: None, - is_soulbound_v2: Some(is_soulbound), - token_standard: TokenStandard::V2.to_string(), - is_fungible_v2: Some(true), - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: Some(is_soulbound), - }, - ))); - } - } - Ok(None) - } - - /// We want to track tokens in any offer/claims and tokenstore - pub fn get_v1_from_write_table_item( - table_item: &WriteTableItem, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result)>> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_token = match TokenWriteSet::from_table_item_type( - table_item_data.value_type.as_str(), - &table_item_data.value, - txn_version, - )? { - Some(TokenWriteSet::Token(inner)) => Some(inner), - _ => None, - }; - - if let Some(token) = maybe_token { - let table_handle = standardize_address(&table_item.handle.to_string()); - let amount = ensure_not_negative(token.amount); - let token_id_struct = token.id; - let token_data_id_struct = token_id_struct.token_data_id; - let token_data_id = token_data_id_struct.to_id(); - - let maybe_table_metadata = table_handle_to_owner.get(&table_handle); - let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { - Some(tm) => { - if tm.table_type != "0x3::token::TokenStore" { - return Ok(None); - } - let owner_address = tm.get_owner_address(); - ( - Some(CurrentTokenOwnershipV2 { - token_data_id: token_data_id.clone(), - property_version_v1: token_id_struct.property_version.clone(), - owner_address: owner_address.clone(), - storage_id: table_handle.clone(), - amount: amount.clone(), - table_type_v1: Some(tm.table_type.clone()), - token_properties_mutated_v1: Some(token.token_properties.clone()), - is_soulbound_v2: None, - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: None, - }), - Some(owner_address), - Some(tm.table_type.clone()), - ) - }, - None => { - tracing::warn!( - transaction_version = txn_version, - table_handle = table_handle, - "Missing table handle metadata for TokenStore. {:?}", - table_handle_to_owner - ); - (None, None, None) - }, - }; - - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id, - property_version_v1: token_id_struct.property_version, - owner_address, - storage_id: table_handle, - amount, - table_type_v1: table_type, - token_properties_mutated_v1: Some(token.token_properties), - is_soulbound_v2: None, - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: None, - }, - curr_token_ownership, - ))) - } else { - Ok(None) - } - } - - /// We want to track tokens in any offer/claims and tokenstore - pub fn get_v1_from_delete_table_item( - table_item: &DeleteTableItem, - txn_version: i64, - write_set_change_index: i64, - txn_timestamp: chrono::NaiveDateTime, - table_handle_to_owner: &TableHandleToOwner, - ) -> anyhow::Result)>> { - let table_item_data = table_item.data.as_ref().unwrap(); - - let maybe_token_id = match TokenWriteSet::from_table_item_type( - table_item_data.key_type.as_str(), - &table_item_data.key, - txn_version, - )? { - Some(TokenWriteSet::TokenId(inner)) => Some(inner), - _ => None, - }; - - if let Some(token_id_struct) = maybe_token_id { - let table_handle = standardize_address(&table_item.handle.to_string()); - let token_data_id_struct = token_id_struct.token_data_id; - let token_data_id = token_data_id_struct.to_id(); - - let maybe_table_metadata = table_handle_to_owner.get(&table_handle); - let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { - Some(tm) => { - if tm.table_type != "0x3::token::TokenStore" { - return Ok(None); - } - let owner_address = tm.get_owner_address(); - ( - Some(CurrentTokenOwnershipV2 { - token_data_id: token_data_id.clone(), - property_version_v1: token_id_struct.property_version.clone(), - owner_address: owner_address.clone(), - storage_id: table_handle.clone(), - amount: BigDecimal::zero(), - table_type_v1: Some(tm.table_type.clone()), - token_properties_mutated_v1: None, - is_soulbound_v2: None, - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - last_transaction_version: txn_version, - last_transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: None, - }), - Some(owner_address), - Some(tm.table_type.clone()), - ) - }, - None => { - tracing::warn!( - transaction_version = txn_version, - table_handle = table_handle, - "Missing table handle metadata for TokenStore. {:?}", - table_handle_to_owner - ); - (None, None, None) - }, - }; - - Ok(Some(( - Self { - transaction_version: txn_version, - write_set_change_index, - token_data_id, - property_version_v1: token_id_struct.property_version, - owner_address, - storage_id: table_handle, - amount: BigDecimal::zero(), - table_type_v1: table_type, - token_properties_mutated_v1: None, - is_soulbound_v2: None, - token_standard: TokenStandard::V1.to_string(), - is_fungible_v2: None, - transaction_timestamp: txn_timestamp, - non_transferrable_by_owner: None, - }, - curr_token_ownership, - ))) - } else { - Ok(None) - } - } -} - -impl CurrentTokenOwnershipV2Query { - pub fn get_nft_by_token_data_id( - conn: &mut PgPoolConnection, - token_data_id: &str, - ) -> anyhow::Result { - let mut retried = 0; - while retried < QUERY_RETRIES { - retried += 1; - match Self::get_nft_by_token_data_id_impl(conn, token_data_id) { - Ok(inner) => { - return Ok(NFTOwnershipV2 { - token_data_id: inner.token_data_id.clone(), - owner_address: inner.owner_address.clone(), - is_soulbound: inner.is_soulbound_v2, - }) - }, - Err(_) => { - std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); - }, - } - } - Err(anyhow::anyhow!( - "Failed to get nft by token data id: {}", - token_data_id - )) - } - - fn get_nft_by_token_data_id_impl( - conn: &mut PgPoolConnection, - token_data_id: &str, - ) -> diesel::QueryResult { - current_token_ownerships_v2::table - .filter(current_token_ownerships_v2::token_data_id.eq(token_data_id)) - .first::(conn) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_utils.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_utils.rs deleted file mode 100644 index f0aae5198b349..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/models/token_models/v2_token_utils.rs +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -// This is required because a diesel macro makes clippy sad -#![allow(clippy::extra_unused_lifetimes)] - -use super::token_utils::{NAME_LENGTH, URI_LENGTH}; -use crate::{ - models::{ - coin_models::{ - coin_utils::COIN_ADDR, - v2_fungible_asset_utils::{ - FungibleAssetMetadata, FungibleAssetStore, FungibleAssetSupply, - }, - }, - default_models::{move_resources::MoveResource, v2_objects::CurrentObjectPK}, - }, - utils::util::{ - deserialize_from_string, deserialize_token_object_property_map_from_bcs_hexstring, - standardize_address, truncate_str, - }, -}; -use anyhow::{Context, Result}; -use aptos_protos::transaction::v1::{Event, WriteResource}; -use bigdecimal::BigDecimal; -use serde::{Deserialize, Serialize}; -use std::{ - collections::{HashMap, HashSet}, - fmt::{self, Formatter}, -}; - -pub const TOKEN_V2_ADDR: &str = - "0x0000000000000000000000000000000000000000000000000000000000000004"; - -/// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) -pub type TokenV2AggregatedDataMapping = HashMap; -/// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) -pub type TokenV2Burned = HashSet; -/// Index of the event so that we can write its inverse to the db as primary key (to avoid collisiona) -pub type EventIndex = i64; - -/// This contains both metadata for fungible assets and fungible tokens -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenV2AggregatedData { - pub aptos_collection: Option, - pub fixed_supply: Option, - pub fungible_asset_metadata: Option, - pub fungible_asset_supply: Option, - pub fungible_asset_store: Option, - pub object: ObjectWithMetadata, - pub property_map: Option, - pub token: Option, - pub transfer_event: Option<(EventIndex, TransferEvent)>, - pub unlimited_supply: Option, -} - -/// Tracks which token standard a token / collection is built upon -#[derive(Serialize)] -pub enum TokenStandard { - V1, - V2, -} - -impl fmt::Display for TokenStandard { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - let res = match self { - TokenStandard::V1 => "v1", - TokenStandard::V2 => "v2", - }; - write!(f, "{}", res) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ObjectCore { - pub allow_ungated_transfer: bool, - #[serde(deserialize_with = "deserialize_from_string")] - pub guid_creation_num: BigDecimal, - owner: String, -} - -impl ObjectCore { - pub fn get_owner_address(&self) -> String { - standardize_address(&self.owner) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ObjectWithMetadata { - pub object_core: ObjectCore, - state_key_hash: String, -} - -impl ObjectWithMetadata { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - if let V2TokenResource::ObjectCore(inner) = V2TokenResource::from_resource( - &type_str, - &serde_json::from_str(write_resource.data.as_str()).unwrap(), - txn_version, - )? { - Ok(Some(Self { - object_core: inner, - state_key_hash: standardize_address( - hex::encode(write_resource.state_key_hash.as_slice()).as_str(), - ), - })) - } else { - Ok(None) - } - } - - pub fn get_state_key_hash(&self) -> String { - standardize_address(&self.state_key_hash) - } -} - -/* Section on Collection / Token */ -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Collection { - creator: String, - pub description: String, - // These are set to private because we should never get name or uri directly - name: String, - uri: String, -} - -impl Collection { - pub fn get_creator_address(&self) -> String { - standardize_address(&self.creator) - } - - pub fn get_uri_trunc(&self) -> String { - truncate_str(&self.uri, URI_LENGTH) - } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct AptosCollection { - pub mutable_description: bool, - pub mutable_uri: bool, -} - -impl AptosCollection { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2TokenResource::AptosCollection(inner) = - V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenV2 { - collection: ResourceReference, - pub description: String, - // These are set to private because we should never get name or uri directly - name: String, - uri: String, -} - -impl TokenV2 { - pub fn get_collection_address(&self) -> String { - self.collection.get_reference_address() - } - - pub fn get_uri_trunc(&self) -> String { - truncate_str(&self.uri, URI_LENGTH) - } - - pub fn get_name_trunc(&self) -> String { - truncate_str(&self.name, NAME_LENGTH) - } - - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2TokenResource::TokenV2(inner) = - V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ResourceReference { - inner: String, -} - -impl ResourceReference { - pub fn get_reference_address(&self) -> String { - standardize_address(&self.inner) - } -} - -/* Section on Supply */ -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct FixedSupply { - #[serde(deserialize_with = "deserialize_from_string")] - pub current_supply: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub max_supply: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub total_minted: BigDecimal, -} - -impl FixedSupply { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2TokenResource::FixedSupply(inner) = - V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct UnlimitedSupply { - #[serde(deserialize_with = "deserialize_from_string")] - pub current_supply: BigDecimal, - #[serde(deserialize_with = "deserialize_from_string")] - pub total_minted: BigDecimal, -} - -impl UnlimitedSupply { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2TokenResource::UnlimitedSupply(inner) = - V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } -} - -/* Section on Events */ -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct MintEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub index: BigDecimal, - token: String, -} - -impl MintEvent { - pub fn get_token_address(&self) -> String { - standardize_address(&self.token) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TokenMutationEvent { - pub mutated_field_name: String, - pub old_value: String, - pub new_value: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct BurnEvent { - #[serde(deserialize_with = "deserialize_from_string")] - pub index: BigDecimal, - token: String, -} - -impl BurnEvent { - pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { - if let Some(V2TokenEvent::BurnEvent(inner)) = - V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() - { - Ok(Some(inner)) - } else { - Ok(None) - } - } - - pub fn get_token_address(&self) -> String { - standardize_address(&self.token) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TransferEvent { - from: String, - to: String, - object: String, -} - -impl TransferEvent { - pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { - if let Some(V2TokenEvent::TransferEvent(inner)) = - V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() - { - Ok(Some(inner)) - } else { - Ok(None) - } - } - - pub fn get_from_address(&self) -> String { - standardize_address(&self.from) - } - - pub fn get_to_address(&self) -> String { - standardize_address(&self.to) - } - - pub fn get_object_address(&self) -> String { - standardize_address(&self.object) - } -} - -/* Section on Property Maps */ -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct PropertyMapModel { - #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] - pub inner: serde_json::Value, -} - -impl PropertyMapModel { - pub fn from_write_resource( - write_resource: &WriteResource, - txn_version: i64, - ) -> anyhow::Result> { - let type_str = MoveResource::get_outer_type_from_resource(write_resource); - if !V2TokenResource::is_resource_supported(type_str.as_str()) { - return Ok(None); - } - let resource = MoveResource::from_write_resource( - write_resource, - 0, // Placeholder, this isn't used anyway - txn_version, - 0, // Placeholder, this isn't used anyway - ); - - if let V2TokenResource::PropertyMapModel(inner) = - V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? - { - Ok(Some(inner)) - } else { - Ok(None) - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum V2TokenResource { - AptosCollection(AptosCollection), - Collection(Collection), - FixedSupply(FixedSupply), - ObjectCore(ObjectCore), - UnlimitedSupply(UnlimitedSupply), - TokenV2(TokenV2), - PropertyMapModel(PropertyMapModel), -} - -impl V2TokenResource { - pub fn is_resource_supported(data_type: &str) -> bool { - [ - format!("{}::object::ObjectCore", COIN_ADDR), - format!("{}::collection::Collection", TOKEN_V2_ADDR), - format!("{}::collection::FixedSupply", TOKEN_V2_ADDR), - format!("{}::collection::UnlimitedSupply", TOKEN_V2_ADDR), - format!("{}::aptos_token::AptosCollection", TOKEN_V2_ADDR), - format!("{}::token::Token", TOKEN_V2_ADDR), - format!("{}::property_map::PropertyMap", TOKEN_V2_ADDR), - ] - .contains(&data_type.to_string()) - } - - pub fn from_resource( - data_type: &str, - data: &serde_json::Value, - txn_version: i64, - ) -> Result { - match data_type { - x if x == format!("{}::object::ObjectCore", COIN_ADDR) => { - serde_json::from_value(data.clone()).map(|inner| Some(Self::ObjectCore(inner))) - }, - x if x == format!("{}::collection::Collection", TOKEN_V2_ADDR) => { - serde_json::from_value(data.clone()).map(|inner| Some(Self::Collection(inner))) - }, - x if x == format!("{}::collection::FixedSupply", TOKEN_V2_ADDR) => { - serde_json::from_value(data.clone()).map(|inner| Some(Self::FixedSupply(inner))) - }, - x if x == format!("{}::collection::UnlimitedSupply", TOKEN_V2_ADDR) => { - serde_json::from_value(data.clone()).map(|inner| Some(Self::UnlimitedSupply(inner))) - }, - x if x == format!("{}::aptos_token::AptosCollection", TOKEN_V2_ADDR) => { - serde_json::from_value(data.clone()).map(|inner| Some(Self::AptosCollection(inner))) - }, - x if x == format!("{}::token::Token", TOKEN_V2_ADDR) => { - serde_json::from_value(data.clone()).map(|inner| Some(Self::TokenV2(inner))) - }, - x if x == format!("{}::property_map::PropertyMap", TOKEN_V2_ADDR) => { - serde_json::from_value(data.clone()) - .map(|inner| Some(Self::PropertyMapModel(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - ))? - .context(format!( - "Resource unsupported! Call is_resource_supported first. version {} type {}", - txn_version, data_type - )) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum V2TokenEvent { - MintEvent(MintEvent), - TokenMutationEvent(TokenMutationEvent), - BurnEvent(BurnEvent), - TransferEvent(TransferEvent), -} - -impl V2TokenEvent { - pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { - match data_type { - "0x4::collection::MintEvent" => { - serde_json::from_str(data).map(|inner| Some(Self::MintEvent(inner))) - }, - "0x4::token::MutationEvent" => { - serde_json::from_str(data).map(|inner| Some(Self::TokenMutationEvent(inner))) - }, - "0x4::collection::BurnEvent" => { - serde_json::from_str(data).map(|inner| Some(Self::BurnEvent(inner))) - }, - "0x1::object::TransferEvent" => { - serde_json::from_str(data).map(|inner| Some(Self::TransferEvent(inner))) - }, - _ => Ok(None), - } - .context(format!( - "version {} failed! failed to parse type {}, data {:?}", - txn_version, data_type, data - )) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/coin_processor.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/coin_processor.rs deleted file mode 100644 index f07a717f2ce01..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/coin_processor.rs +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use super::processor_trait::{ProcessingResult, ProcessorTrait}; -use crate::{ - models::coin_models::{ - account_transactions::AccountTransaction, - coin_activities::{CoinActivity, CurrentCoinBalancePK}, - coin_balances::{CoinBalance, CurrentCoinBalance}, - coin_infos::{CoinInfo, CoinInfoQuery}, - coin_supply::CoinSupply, - }, - schema, - utils::database::{ - clean_data_for_db, execute_with_better_error, get_chunks, PgDbPool, PgPoolConnection, - }, -}; -use anyhow::bail; -use aptos_protos::transaction::v1::Transaction; -use async_trait::async_trait; -use diesel::{pg::upsert::excluded, result::Error, ExpressionMethods, PgConnection}; -use field_count::FieldCount; -use std::{collections::HashMap, fmt::Debug}; -use tracing::error; - -pub const NAME: &str = "coin_processor"; -const APTOS_COIN_TYPE_STR: &str = "0x1::aptos_coin::AptosCoin"; -pub struct CoinTransactionProcessor { - connection_pool: PgDbPool, -} - -impl CoinTransactionProcessor { - pub fn new(connection_pool: PgDbPool) -> Self { - Self { connection_pool } - } -} - -impl Debug for CoinTransactionProcessor { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let state = &self.connection_pool.state(); - write!( - f, - "CoinTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", - state.connections, state.idle_connections - ) - } -} - -fn insert_to_db_impl( - conn: &mut PgConnection, - coin_activities: &[CoinActivity], - coin_infos: &[CoinInfo], - coin_balances: &[CoinBalance], - current_coin_balances: &[CurrentCoinBalance], - coin_supply: &[CoinSupply], - account_transactions: &[AccountTransaction], -) -> Result<(), diesel::result::Error> { - insert_coin_activities(conn, coin_activities)?; - insert_coin_infos(conn, coin_infos)?; - insert_coin_balances(conn, coin_balances)?; - insert_current_coin_balances(conn, current_coin_balances)?; - insert_coin_supply(conn, coin_supply)?; - insert_account_transactions(conn, account_transactions)?; - Ok(()) -} - -fn insert_to_db( - conn: &mut PgPoolConnection, - name: &'static str, - start_version: u64, - end_version: u64, - coin_activities: Vec, - coin_infos: Vec, - coin_balances: Vec, - current_coin_balances: Vec, - coin_supply: Vec, - account_transactions: Vec, -) -> Result<(), diesel::result::Error> { - tracing::trace!( - name = name, - start_version = start_version, - end_version = end_version, - "Inserting to db", - ); - match conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - insert_to_db_impl( - pg_conn, - &coin_activities, - &coin_infos, - &coin_balances, - ¤t_coin_balances, - &coin_supply, - &account_transactions, - ) - }) { - Ok(_) => Ok(()), - Err(_) => conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - let coin_activities = clean_data_for_db(coin_activities, true); - let coin_infos = clean_data_for_db(coin_infos, true); - let coin_balances = clean_data_for_db(coin_balances, true); - let current_coin_balances = clean_data_for_db(current_coin_balances, true); - let coin_supply = clean_data_for_db(coin_supply, true); - let account_transactions = clean_data_for_db(account_transactions, true); - - insert_to_db_impl( - pg_conn, - &coin_activities, - &coin_infos, - &coin_balances, - ¤t_coin_balances, - &coin_supply, - &account_transactions, - ) - }), - } -} - -fn insert_coin_activities( - conn: &mut PgConnection, - item_to_insert: &[CoinActivity], -) -> Result<(), diesel::result::Error> { - use schema::coin_activities::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CoinActivity::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::coin_activities::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict(( - transaction_version, - event_account_address, - event_creation_number, - event_sequence_number, - )) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_coin_infos( - conn: &mut PgConnection, - item_to_insert: &[CoinInfo], -) -> Result<(), diesel::result::Error> { - use schema::coin_infos::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CoinInfo::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::coin_infos::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict(coin_type_hash) - .do_update() - .set(( - transaction_version_created.eq(excluded(transaction_version_created)), - creator_address.eq(excluded(creator_address)), - name.eq(excluded(name)), - symbol.eq(excluded(symbol)), - decimals.eq(excluded(decimals)), - transaction_created_timestamp.eq(excluded(transaction_created_timestamp)), - supply_aggregator_table_handle.eq(excluded(supply_aggregator_table_handle)), - supply_aggregator_table_key.eq(excluded(supply_aggregator_table_key)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE coin_infos.transaction_version_created >= EXCLUDED.transaction_version_created "), - )?; - } - Ok(()) -} - -fn insert_coin_balances( - conn: &mut PgConnection, - item_to_insert: &[CoinBalance], -) -> Result<(), diesel::result::Error> { - use schema::coin_balances::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CoinBalance::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::coin_balances::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, owner_address, coin_type_hash)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_current_coin_balances( - conn: &mut PgConnection, - item_to_insert: &[CurrentCoinBalance], -) -> Result<(), diesel::result::Error> { - use schema::current_coin_balances::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CurrentCoinBalance::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_coin_balances::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((owner_address, coin_type_hash)) - .do_update() - .set(( - amount.eq(excluded(amount)), - last_transaction_version.eq(excluded(last_transaction_version)), - last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_coin_balances.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_coin_supply( - conn: &mut PgConnection, - item_to_insert: &[CoinSupply], -) -> Result<(), diesel::result::Error> { - use schema::coin_supply::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CoinSupply::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::coin_supply::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, coin_type_hash)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_account_transactions( - conn: &mut PgConnection, - item_to_insert: &[AccountTransaction], -) -> Result<(), diesel::result::Error> { - use schema::account_transactions::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), AccountTransaction::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::account_transactions::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, account_address)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -#[async_trait] -impl ProcessorTrait for CoinTransactionProcessor { - fn name(&self) -> &'static str { - NAME - } - - async fn process_transactions( - &self, - transactions: Vec, - start_version: u64, - end_version: u64, - ) -> anyhow::Result { - let mut conn = self.get_conn(); - // get aptos_coin info for supply tracking - // TODO: This only needs to be fetched once. Need to persist somehow - let maybe_aptos_coin_info = - &CoinInfoQuery::get_by_coin_type(APTOS_COIN_TYPE_STR.to_string(), &mut conn).unwrap(); - - let mut all_coin_activities = vec![]; - let mut all_coin_balances = vec![]; - let mut all_coin_infos: HashMap = HashMap::new(); - let mut all_current_coin_balances: HashMap = - HashMap::new(); - let mut all_coin_supply = vec![]; - - let mut account_transactions = HashMap::new(); - - for txn in &transactions { - let ( - mut coin_activities, - mut coin_balances, - coin_infos, - current_coin_balances, - mut coin_supply, - ) = CoinActivity::from_transaction(txn, maybe_aptos_coin_info); - all_coin_activities.append(&mut coin_activities); - all_coin_balances.append(&mut coin_balances); - all_coin_supply.append(&mut coin_supply); - // For coin infos, we only want to keep the first version, so insert only if key is not present already - for (key, value) in coin_infos { - all_coin_infos.entry(key).or_insert(value); - } - all_current_coin_balances.extend(current_coin_balances); - - account_transactions.extend(AccountTransaction::from_transaction(txn)); - } - let mut all_coin_infos = all_coin_infos.into_values().collect::>(); - let mut all_current_coin_balances = all_current_coin_balances - .into_values() - .collect::>(); - let mut account_transactions = account_transactions - .into_values() - .collect::>(); - - // Sort by PK - all_coin_infos.sort_by(|a, b| a.coin_type.cmp(&b.coin_type)); - all_current_coin_balances.sort_by(|a, b| { - (&a.owner_address, &a.coin_type).cmp(&(&b.owner_address, &b.coin_type)) - }); - account_transactions.sort_by(|a, b| { - (&a.transaction_version, &a.account_address) - .cmp(&(&b.transaction_version, &b.account_address)) - }); - - let tx_result = insert_to_db( - &mut conn, - self.name(), - start_version, - end_version, - all_coin_activities, - all_coin_infos, - all_coin_balances, - all_current_coin_balances, - all_coin_supply, - account_transactions, - ); - match tx_result { - Ok(_) => Ok((start_version, end_version)), - Err(err) => { - error!( - start_version = start_version, - end_version = end_version, - processor_name = self.name(), - "[Parser] Error inserting transactions to db: {:?}", - err - ); - bail!(format!("Error inserting transactions to db. Processor {}. Start {}. End {}. Error {:?}", self.name(), start_version, end_version, err)) - }, - } - } - - fn connection_pool(&self) -> &PgDbPool { - &self.connection_pool - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/default_processor.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/default_processor.rs deleted file mode 100644 index 2ffafb8bc43db..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/default_processor.rs +++ /dev/null @@ -1,618 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use super::processor_trait::{ProcessingResult, ProcessorTrait}; -use crate::{ - models::default_models::{ - block_metadata_transactions::BlockMetadataTransactionModel, - events::EventModel, - move_modules::MoveModule, - move_resources::MoveResource, - move_tables::{CurrentTableItem, TableItem, TableMetadata}, - signatures::Signature, - transactions::{TransactionDetail, TransactionModel}, - user_transactions::UserTransactionModel, - v2_objects::{CurrentObject, Object}, - write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, - }, - schema, - utils::database::{ - clean_data_for_db, execute_with_better_error, get_chunks, PgDbPool, PgPoolConnection, - }, -}; -use anyhow::bail; -use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; -use async_trait::async_trait; -use diesel::{pg::upsert::excluded, result::Error, ExpressionMethods, PgConnection}; -use field_count::FieldCount; -use std::{collections::HashMap, fmt::Debug}; -use tracing::error; - -pub const NAME: &str = "default_processor"; -pub struct DefaultTransactionProcessor { - connection_pool: PgDbPool, -} - -impl DefaultTransactionProcessor { - pub fn new(connection_pool: PgDbPool) -> Self { - Self { connection_pool } - } -} - -impl Debug for DefaultTransactionProcessor { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let state = &self.connection_pool.state(); - write!( - f, - "DefaultTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", - state.connections, state.idle_connections - ) - } -} - -fn insert_to_db_impl( - conn: &mut PgConnection, - txns: &[TransactionModel], - (user_transactions, signatures, block_metadata_transactions): ( - &[UserTransactionModel], - &[Signature], - &[BlockMetadataTransactionModel], - ), - events: &[EventModel], - wscs: &[WriteSetChangeModel], - (move_modules, move_resources, table_items, current_table_items, table_metadata): ( - &[MoveModule], - &[MoveResource], - &[TableItem], - &[CurrentTableItem], - &[TableMetadata], - ), - (objects, current_objects): (&[Object], &[CurrentObject]), -) -> Result<(), diesel::result::Error> { - insert_transactions(conn, txns)?; - insert_user_transactions(conn, user_transactions)?; - insert_signatures(conn, signatures)?; - insert_block_metadata_transactions(conn, block_metadata_transactions)?; - insert_events(conn, events)?; - insert_write_set_changes(conn, wscs)?; - insert_move_modules(conn, move_modules)?; - insert_move_resources(conn, move_resources)?; - insert_table_items(conn, table_items)?; - insert_current_table_items(conn, current_table_items)?; - insert_table_metadata(conn, table_metadata)?; - insert_objects(conn, objects)?; - insert_current_objects(conn, current_objects)?; - Ok(()) -} - -fn insert_to_db( - conn: &mut PgPoolConnection, - name: &'static str, - start_version: u64, - end_version: u64, - txns: Vec, - (user_transactions, signatures, block_metadata_transactions): ( - Vec, - Vec, - Vec, - ), - events: Vec, - wscs: Vec, - (move_modules, move_resources, table_items, current_table_items, table_metadata): ( - Vec, - Vec, - Vec, - Vec, - Vec, - ), - (objects, current_objects): (Vec, Vec), -) -> Result<(), diesel::result::Error> { - tracing::trace!( - name = name, - start_version = start_version, - end_version = end_version, - "Inserting to db", - ); - match conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - insert_to_db_impl( - pg_conn, - &txns, - ( - &user_transactions, - &signatures, - &block_metadata_transactions, - ), - &events, - &wscs, - ( - &move_modules, - &move_resources, - &table_items, - ¤t_table_items, - &table_metadata, - ), - (&objects, ¤t_objects), - ) - }) { - Ok(_) => Ok(()), - Err(_) => { - let txns = clean_data_for_db(txns, true); - let user_transactions = clean_data_for_db(user_transactions, true); - let signatures = clean_data_for_db(signatures, true); - let block_metadata_transactions = clean_data_for_db(block_metadata_transactions, true); - let events = clean_data_for_db(events, true); - let wscs = clean_data_for_db(wscs, true); - let move_modules = clean_data_for_db(move_modules, true); - let move_resources = clean_data_for_db(move_resources, true); - let table_items = clean_data_for_db(table_items, true); - let current_table_items = clean_data_for_db(current_table_items, true); - let table_metadata = clean_data_for_db(table_metadata, true); - let objects = clean_data_for_db(objects, true); - let current_objects = clean_data_for_db(current_objects, true); - - conn.build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - insert_to_db_impl( - pg_conn, - &txns, - ( - &user_transactions, - &signatures, - &block_metadata_transactions, - ), - &events, - &wscs, - ( - &move_modules, - &move_resources, - &table_items, - ¤t_table_items, - &table_metadata, - ), - (&objects, ¤t_objects), - ) - }) - }, - } -} - -fn insert_transactions( - conn: &mut PgConnection, - items_to_insert: &[TransactionModel], -) -> Result<(), diesel::result::Error> { - use schema::transactions::dsl::*; - let chunks = get_chunks(items_to_insert.len(), TransactionModel::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::transactions::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(version) - .do_update() - .set(( - num_events.eq(excluded(num_events)), - inserted_at.eq(excluded(inserted_at)), - )), - None, - )?; - } - Ok(()) -} - -fn insert_user_transactions( - conn: &mut PgConnection, - items_to_insert: &[UserTransactionModel], -) -> Result<(), diesel::result::Error> { - use schema::user_transactions::dsl::*; - let chunks = get_chunks(items_to_insert.len(), UserTransactionModel::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::user_transactions::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(version) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_signatures( - conn: &mut PgConnection, - items_to_insert: &[Signature], -) -> Result<(), diesel::result::Error> { - use schema::signatures::dsl::*; - let chunks = get_chunks(items_to_insert.len(), Signature::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::signatures::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(( - transaction_version, - multi_agent_index, - multi_sig_index, - is_sender_primary, - )) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_block_metadata_transactions( - conn: &mut PgConnection, - items_to_insert: &[BlockMetadataTransactionModel], -) -> Result<(), diesel::result::Error> { - use schema::block_metadata_transactions::dsl::*; - let chunks = get_chunks( - items_to_insert.len(), - BlockMetadataTransactionModel::field_count(), - ); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::block_metadata_transactions::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(version) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_events( - conn: &mut PgConnection, - items_to_insert: &[EventModel], -) -> Result<(), diesel::result::Error> { - use schema::events::dsl::*; - let chunks = get_chunks(items_to_insert.len(), EventModel::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::events::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((account_address, creation_number, sequence_number)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_write_set_changes( - conn: &mut PgConnection, - items_to_insert: &[WriteSetChangeModel], -) -> Result<(), diesel::result::Error> { - use schema::write_set_changes::dsl::*; - let chunks = get_chunks(items_to_insert.len(), WriteSetChangeModel::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::write_set_changes::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_move_modules( - conn: &mut PgConnection, - items_to_insert: &[MoveModule], -) -> Result<(), diesel::result::Error> { - use schema::move_modules::dsl::*; - let chunks = get_chunks(items_to_insert.len(), MoveModule::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::move_modules::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_move_resources( - conn: &mut PgConnection, - items_to_insert: &[MoveResource], -) -> Result<(), diesel::result::Error> { - use schema::move_resources::dsl::*; - let chunks = get_chunks(items_to_insert.len(), MoveResource::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::move_resources::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_update() - .set(( - inserted_at.eq(excluded(inserted_at)), - state_key_hash.eq(excluded(state_key_hash)), - )), - None, - )?; - } - Ok(()) -} - -fn insert_table_items( - conn: &mut PgConnection, - items_to_insert: &[TableItem], -) -> Result<(), diesel::result::Error> { - use schema::table_items::dsl::*; - let chunks = get_chunks(items_to_insert.len(), TableItem::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::table_items::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_current_table_items( - conn: &mut PgConnection, - items_to_insert: &[CurrentTableItem], -) -> Result<(), diesel::result::Error> { - use schema::current_table_items::dsl::*; - let chunks = get_chunks(items_to_insert.len(), CurrentTableItem::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_table_items::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((table_handle, key_hash)) - .do_update() - .set(( - key.eq(excluded(key)), - decoded_key.eq(excluded(decoded_key)), - decoded_value.eq(excluded(decoded_value)), - is_deleted.eq(excluded(is_deleted)), - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_table_items.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_table_metadata( - conn: &mut PgConnection, - items_to_insert: &[TableMetadata], -) -> Result<(), diesel::result::Error> { - use schema::table_metadatas::dsl::*; - let chunks = get_chunks(items_to_insert.len(), TableMetadata::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::table_metadatas::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(handle) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_objects( - conn: &mut PgConnection, - items_to_insert: &[Object], -) -> Result<(), diesel::result::Error> { - use schema::objects::dsl::*; - let chunks = get_chunks(items_to_insert.len(), Object::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::objects::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_current_objects( - conn: &mut PgConnection, - items_to_insert: &[CurrentObject], -) -> Result<(), diesel::result::Error> { - use schema::current_objects::dsl::*; - let chunks = get_chunks(items_to_insert.len(), CurrentObject::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_objects::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(object_address) - .do_update() - .set(( - owner_address.eq(excluded(owner_address)), - state_key_hash.eq(excluded(state_key_hash)), - allow_ungated_transfer.eq(excluded(allow_ungated_transfer)), - last_guid_creation_num.eq(excluded(last_guid_creation_num)), - last_transaction_version.eq(excluded(last_transaction_version)), - is_deleted.eq(excluded(is_deleted)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_objects.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -#[async_trait] -impl ProcessorTrait for DefaultTransactionProcessor { - fn name(&self) -> &'static str { - NAME - } - - async fn process_transactions( - &self, - transactions: Vec, - start_version: u64, - end_version: u64, - ) -> anyhow::Result { - let mut conn = self.get_conn(); - let (txns, txn_details, events, write_set_changes, wsc_details) = - TransactionModel::from_transactions(&transactions); - - let mut signatures = vec![]; - let mut user_transactions = vec![]; - let mut block_metadata_transactions = vec![]; - for detail in txn_details { - match detail { - TransactionDetail::User(user_txn, sigs) => { - signatures.append(&mut sigs.clone()); - user_transactions.push(user_txn.clone()); - }, - TransactionDetail::BlockMetadata(bmt) => { - block_metadata_transactions.push(bmt.clone()) - }, - } - } - let mut move_modules = vec![]; - let mut move_resources = vec![]; - let mut table_items = vec![]; - let mut current_table_items = HashMap::new(); - let mut table_metadata = HashMap::new(); - for detail in wsc_details { - match detail { - WriteSetChangeDetail::Module(module) => move_modules.push(module.clone()), - WriteSetChangeDetail::Resource(resource) => move_resources.push(resource.clone()), - WriteSetChangeDetail::Table(item, current_item, metadata) => { - table_items.push(item.clone()); - current_table_items.insert( - ( - current_item.table_handle.clone(), - current_item.key_hash.clone(), - ), - current_item.clone(), - ); - if let Some(meta) = metadata { - table_metadata.insert(meta.handle.clone(), meta.clone()); - } - }, - } - } - - // TODO, merge this loop with above - // Moving object handling here because we need a single object - // map through transactions for lookups - let mut all_objects = vec![]; - let mut all_current_objects = HashMap::new(); - for txn in &transactions { - let txn_version = txn.version as i64; - let changes = &txn - .info - .as_ref() - .unwrap_or_else(|| { - panic!( - "Transaction info doesn't exist! Transaction {}", - txn_version - ) - }) - .changes; - for (index, wsc) in changes.iter().enumerate() { - let index: i64 = index as i64; - match wsc.change.as_ref().unwrap() { - Change::WriteResource(inner) => { - if let Some((object, current_object)) = - &Object::from_write_resource(inner, txn_version, index).unwrap() - { - all_objects.push(object.clone()); - all_current_objects - .insert(object.object_address.clone(), current_object.clone()); - } - }, - Change::DeleteResource(inner) => { - // Passing all_current_objects into the function so that we can get the owner of the deleted - // resource if it was handled in the same batch - if let Some((object, current_object)) = Object::from_delete_resource( - inner, - txn_version, - index, - &all_current_objects, - &mut conn, - ) - .unwrap() - { - all_objects.push(object.clone()); - all_current_objects - .insert(object.object_address.clone(), current_object.clone()); - } - }, - _ => {}, - }; - } - } - // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes - let mut current_table_items = current_table_items - .into_values() - .collect::>(); - let mut table_metadata = table_metadata.into_values().collect::>(); - // Sort by PK - let mut all_current_objects = all_current_objects - .into_values() - .collect::>(); - current_table_items - .sort_by(|a, b| (&a.table_handle, &a.key_hash).cmp(&(&b.table_handle, &b.key_hash))); - table_metadata.sort_by(|a, b| a.handle.cmp(&b.handle)); - all_current_objects.sort_by(|a, b| a.object_address.cmp(&b.object_address)); - - let tx_result = insert_to_db( - &mut conn, - self.name(), - start_version, - end_version, - txns, - (user_transactions, signatures, block_metadata_transactions), - events, - write_set_changes, - ( - move_modules, - move_resources, - table_items, - current_table_items, - table_metadata, - ), - (all_objects, all_current_objects), - ); - match tx_result { - Ok(_) => Ok((start_version, end_version)), - Err(e) => { - error!( - start_version = start_version, - end_version = end_version, - processor_name = self.name(), - error = ?e, - "[Parser] Error inserting transactions to db", - ); - bail!(e) - }, - } - } - - fn connection_pool(&self) -> &PgDbPool { - &self.connection_pool - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/mod.rs deleted file mode 100644 index 5fc7acfc571c8..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod coin_processor; -pub mod default_processor; -pub mod processor_trait; -pub mod stake_processor; -pub mod token_processor; - -use self::{ - coin_processor::NAME as COIN_PROCESSOR_NAME, default_processor::NAME as DEFAULT_PROCESSOR_NAME, - stake_processor::NAME as STAKE_PROCESSOR_NAME, token_processor::NAME as TOKEN_PROCESSOR_NAME, -}; - -pub enum Processor { - CoinProcessor, - DefaultProcessor, - StakeProcessor, - TokenProcessor, -} - -impl Processor { - pub fn from_string(input_str: &String) -> Self { - match input_str.as_str() { - DEFAULT_PROCESSOR_NAME => Self::DefaultProcessor, - COIN_PROCESSOR_NAME => Self::CoinProcessor, - STAKE_PROCESSOR_NAME => Self::StakeProcessor, - TOKEN_PROCESSOR_NAME => Self::TokenProcessor, - _ => panic!("Processor unsupported {}", input_str), - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/processor_trait.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/processor_trait.rs deleted file mode 100644 index fa4ddb31b62c7..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/processor_trait.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - models::processor_status::ProcessorStatus, - schema::processor_status, - utils::{ - counters::{GOT_CONNECTION_COUNT, UNABLE_TO_GET_CONNECTION_COUNT}, - database::{execute_with_better_error, PgDbPool, PgPoolConnection}, - }, -}; -use aptos_protos::transaction::v1::Transaction as ProtoTransaction; -use async_trait::async_trait; -use diesel::{pg::upsert::excluded, prelude::*}; -use std::fmt::Debug; - -type StartVersion = u64; -type EndVersion = u64; -pub type ProcessingResult = (StartVersion, EndVersion); - -/// Base trait for all processors -#[async_trait] -pub trait ProcessorTrait: Send + Sync + Debug { - fn name(&self) -> &'static str; - - /// Process all transactions including writing to the database - async fn process_transactions( - &self, - transactions: Vec, - start_version: u64, - end_version: u64, - ) -> anyhow::Result; - - /// Gets a reference to the connection pool - /// This is used by the `get_conn()` helper below - fn connection_pool(&self) -> &PgDbPool; - - //* Below are helper methods that don't need to be implemented *// - - /// Gets the connection. - /// If it was unable to do so (default timeout: 30s), it will keep retrying until it can. - fn get_conn(&self) -> PgPoolConnection { - let pool = self.connection_pool(); - loop { - match pool.get() { - Ok(conn) => { - GOT_CONNECTION_COUNT.inc(); - return conn; - }, - Err(err) => { - UNABLE_TO_GET_CONNECTION_COUNT.inc(); - tracing::error!( - "Could not get DB connection from pool, will retry in {:?}. Err: {:?}", - pool.connection_timeout(), - err - ); - }, - }; - } - } - - /// Store last processed version from database. We can assume that all previously processed - /// versions are successful because any gap would cause the processor to panic - async fn update_last_processed_version(&self, version: u64) -> anyhow::Result<()> { - let mut conn = self.get_conn(); - let status = ProcessorStatus { - processor: self.name().to_string(), - last_success_version: version as i64, - }; - execute_with_better_error( - &mut conn, - diesel::insert_into(processor_status::table) - .values(&status) - .on_conflict(processor_status::processor) - .do_update() - .set(( - processor_status::last_success_version - .eq(excluded(processor_status::last_success_version)), - processor_status::last_updated.eq(excluded(processor_status::last_updated)), - )), - Some(" WHERE processor_status.last_success_version <= EXCLUDED.last_success_version "), - )?; - Ok(()) - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/stake_processor.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/stake_processor.rs deleted file mode 100644 index 2140097852fd2..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/stake_processor.rs +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use super::processor_trait::{ProcessingResult, ProcessorTrait}; -use crate::{ - models::stake_models::{ - delegator_activities::DelegatedStakingActivity, - delegator_balances::{CurrentDelegatorBalance, CurrentDelegatorBalanceMap}, - delegator_pools::{ - CurrentDelegatorPoolBalance, DelegatorPool, DelegatorPoolBalance, DelegatorPoolMap, - }, - proposal_votes::ProposalVote, - staking_pool_voter::{CurrentStakingPoolVoter, StakingPoolVoterMap}, - }, - schema, - utils::database::{ - clean_data_for_db, execute_with_better_error, get_chunks, PgDbPool, PgPoolConnection, - }, -}; -use anyhow::bail; -use aptos_protos::transaction::v1::Transaction; -use async_trait::async_trait; -use diesel::{pg::upsert::excluded, result::Error, ExpressionMethods, PgConnection}; -use field_count::FieldCount; -use std::{collections::HashMap, fmt::Debug}; -use tracing::error; - -pub const NAME: &str = "stake_processor"; -pub struct StakeTransactionProcessor { - connection_pool: PgDbPool, -} - -impl StakeTransactionProcessor { - pub fn new(connection_pool: PgDbPool) -> Self { - Self { connection_pool } - } -} - -impl Debug for StakeTransactionProcessor { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let state = &self.connection_pool.state(); - write!( - f, - "StakeTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", - state.connections, state.idle_connections - ) - } -} - -fn insert_to_db_impl( - conn: &mut PgConnection, - current_stake_pool_voters: &[CurrentStakingPoolVoter], - proposal_votes: &[ProposalVote], - delegator_actvities: &[DelegatedStakingActivity], - delegator_balances: &[CurrentDelegatorBalance], - delegator_pools: &[DelegatorPool], - delegator_pool_balances: &[DelegatorPoolBalance], - current_delegator_pool_balances: &[CurrentDelegatorPoolBalance], -) -> Result<(), diesel::result::Error> { - insert_current_stake_pool_voter(conn, current_stake_pool_voters)?; - insert_proposal_votes(conn, proposal_votes)?; - insert_delegator_activities(conn, delegator_actvities)?; - insert_delegator_balances(conn, delegator_balances)?; - insert_delegator_pools(conn, delegator_pools)?; - insert_delegator_pool_balances(conn, delegator_pool_balances)?; - insert_current_delegator_pool_balances(conn, current_delegator_pool_balances)?; - Ok(()) -} - -fn insert_to_db( - conn: &mut PgPoolConnection, - name: &'static str, - start_version: u64, - end_version: u64, - current_stake_pool_voters: Vec, - proposal_votes: Vec, - delegator_actvities: Vec, - delegator_balances: Vec, - delegator_pools: Vec, - delegator_pool_balances: Vec, - current_delegator_pool_balances: Vec, -) -> Result<(), diesel::result::Error> { - tracing::trace!( - name = name, - start_version = start_version, - end_version = end_version, - "Inserting to db", - ); - match conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - insert_to_db_impl( - pg_conn, - ¤t_stake_pool_voters, - &proposal_votes, - &delegator_actvities, - &delegator_balances, - &delegator_pools, - &delegator_pool_balances, - ¤t_delegator_pool_balances, - ) - }) { - Ok(_) => Ok(()), - Err(_) => conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - let current_stake_pool_voters = clean_data_for_db(current_stake_pool_voters, true); - let proposal_votes = clean_data_for_db(proposal_votes, true); - let delegator_actvities = clean_data_for_db(delegator_actvities, true); - let delegator_balances = clean_data_for_db(delegator_balances, true); - let delegator_pools = clean_data_for_db(delegator_pools, true); - let delegator_pool_balances = clean_data_for_db(delegator_pool_balances, true); - let current_delegator_pool_balances = - clean_data_for_db(current_delegator_pool_balances, true); - - insert_to_db_impl( - pg_conn, - ¤t_stake_pool_voters, - &proposal_votes, - &delegator_actvities, - &delegator_balances, - &delegator_pools, - &delegator_pool_balances, - ¤t_delegator_pool_balances, - ) - }), - } -} - -fn insert_current_stake_pool_voter( - conn: &mut PgConnection, - item_to_insert: &[CurrentStakingPoolVoter], -) -> Result<(), diesel::result::Error> { - use schema::current_staking_pool_voter::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CurrentStakingPoolVoter::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_staking_pool_voter::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict(staking_pool_address) - .do_update() - .set(( - staking_pool_address.eq(excluded(staking_pool_address)), - voter_address.eq(excluded(voter_address)), - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - operator_address.eq(excluded(operator_address)), - )), - Some( - " WHERE current_staking_pool_voter.last_transaction_version <= EXCLUDED.last_transaction_version ", - ), - )?; - } - Ok(()) -} - -fn insert_proposal_votes( - conn: &mut PgConnection, - item_to_insert: &[ProposalVote], -) -> Result<(), diesel::result::Error> { - use schema::proposal_votes::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), ProposalVote::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::proposal_votes::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, proposal_id, voter_address)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_delegator_activities( - conn: &mut PgConnection, - item_to_insert: &[DelegatedStakingActivity], -) -> Result<(), diesel::result::Error> { - use schema::delegated_staking_activities::dsl::*; - - let chunks = get_chunks( - item_to_insert.len(), - DelegatedStakingActivity::field_count(), - ); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::delegated_staking_activities::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, event_index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_delegator_balances( - conn: &mut PgConnection, - item_to_insert: &[CurrentDelegatorBalance], -) -> Result<(), diesel::result::Error> { - use schema::current_delegator_balances::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), CurrentDelegatorBalance::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_delegator_balances::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((delegator_address, pool_address, pool_type, table_handle)) - .do_update() - .set(( - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - shares.eq(excluded(shares)), - parent_table_handle.eq(excluded(parent_table_handle)), - )), - Some( - " WHERE current_delegator_balances.last_transaction_version <= EXCLUDED.last_transaction_version ", - ), - )?; - } - Ok(()) -} - -fn insert_delegator_pools( - conn: &mut PgConnection, - item_to_insert: &[DelegatorPool], -) -> Result<(), diesel::result::Error> { - use schema::delegated_staking_pools::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), DelegatorPool::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::delegated_staking_pools::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict(staking_pool_address) - .do_update() - .set(( - first_transaction_version.eq(excluded(first_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - )), - Some( - " WHERE delegated_staking_pools.first_transaction_version >= EXCLUDED.first_transaction_version ", - ), - )?; - } - Ok(()) -} - -fn insert_delegator_pool_balances( - conn: &mut PgConnection, - item_to_insert: &[DelegatorPoolBalance], -) -> Result<(), diesel::result::Error> { - use schema::delegated_staking_pool_balances::dsl::*; - - let chunks = get_chunks(item_to_insert.len(), DelegatorPoolBalance::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::delegated_staking_pool_balances::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, staking_pool_address)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_current_delegator_pool_balances( - conn: &mut PgConnection, - item_to_insert: &[CurrentDelegatorPoolBalance], -) -> Result<(), diesel::result::Error> { - use schema::current_delegated_staking_pool_balances::dsl::*; - - let chunks = get_chunks( - item_to_insert.len(), - CurrentDelegatorPoolBalance::field_count(), - ); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_delegated_staking_pool_balances::table) - .values(&item_to_insert[start_ind..end_ind]) - .on_conflict(staking_pool_address) - .do_update() - .set(( - total_coins.eq(excluded(total_coins)), - total_shares.eq(excluded(total_shares)), - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - operator_commission_percentage.eq(excluded(operator_commission_percentage)), - inactive_table_handle.eq(excluded(inactive_table_handle)), - active_table_handle.eq(excluded(active_table_handle)), - )), - Some( - " WHERE current_delegated_staking_pool_balances.last_transaction_version <= EXCLUDED.last_transaction_version ", - ), - )?; - } - Ok(()) -} - -#[async_trait] -impl ProcessorTrait for StakeTransactionProcessor { - fn name(&self) -> &'static str { - NAME - } - - async fn process_transactions( - &self, - transactions: Vec, - start_version: u64, - end_version: u64, - ) -> anyhow::Result { - let mut conn = self.get_conn(); - - let mut all_current_stake_pool_voters: StakingPoolVoterMap = HashMap::new(); - let mut all_proposal_votes = vec![]; - let mut all_delegator_activities = vec![]; - let mut all_delegator_balances: CurrentDelegatorBalanceMap = HashMap::new(); - let mut all_delegator_pools: DelegatorPoolMap = HashMap::new(); - let mut all_delegator_pool_balances = vec![]; - let mut all_current_delegator_pool_balances = HashMap::new(); - - for txn in &transactions { - // Add votes data - let current_stake_pool_voter = CurrentStakingPoolVoter::from_transaction(txn).unwrap(); - all_current_stake_pool_voters.extend(current_stake_pool_voter); - let mut proposal_votes = ProposalVote::from_transaction(txn).unwrap(); - all_proposal_votes.append(&mut proposal_votes); - - // Add delegator activities - let mut delegator_activities = DelegatedStakingActivity::from_transaction(txn).unwrap(); - all_delegator_activities.append(&mut delegator_activities); - - // Add delegator balances - let delegator_balances = - CurrentDelegatorBalance::from_transaction(txn, &mut conn).unwrap(); - all_delegator_balances.extend(delegator_balances); - - // Add delegator pools - let (delegator_pools, mut delegator_pool_balances, current_delegator_pool_balances) = - DelegatorPool::from_transaction(txn).unwrap(); - all_delegator_pools.extend(delegator_pools); - all_delegator_pool_balances.append(&mut delegator_pool_balances); - all_current_delegator_pool_balances.extend(current_delegator_pool_balances); - } - - // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes - let mut all_current_stake_pool_voters = all_current_stake_pool_voters - .into_values() - .collect::>(); - let mut all_delegator_balances = all_delegator_balances - .into_values() - .collect::>(); - let mut all_delegator_pools = all_delegator_pools - .into_values() - .collect::>(); - let mut all_current_delegator_pool_balances = all_current_delegator_pool_balances - .into_values() - .collect::>(); - - // Sort by PK - all_current_stake_pool_voters - .sort_by(|a, b| a.staking_pool_address.cmp(&b.staking_pool_address)); - all_delegator_balances.sort_by(|a, b| { - (&a.delegator_address, &a.pool_address, &a.pool_type).cmp(&( - &b.delegator_address, - &b.pool_address, - &b.pool_type, - )) - }); - all_delegator_pools.sort_by(|a, b| a.staking_pool_address.cmp(&b.staking_pool_address)); - all_current_delegator_pool_balances - .sort_by(|a, b| a.staking_pool_address.cmp(&b.staking_pool_address)); - - let tx_result = insert_to_db( - &mut conn, - self.name(), - start_version, - end_version, - all_current_stake_pool_voters, - all_proposal_votes, - all_delegator_activities, - all_delegator_balances, - all_delegator_pools, - all_delegator_pool_balances, - all_current_delegator_pool_balances, - ); - - match tx_result { - Ok(_) => Ok((start_version, end_version)), - Err(e) => { - error!( - start_version = start_version, - end_version = end_version, - processor_name = self.name(), - error = ?e, - "[Parser] Error inserting transactions to db", - ); - bail!(e) - }, - } - } - - fn connection_pool(&self) -> &PgDbPool { - &self.connection_pool - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/token_processor.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/token_processor.rs deleted file mode 100644 index 672aa4d038463..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/processors/token_processor.rs +++ /dev/null @@ -1,1565 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use super::processor_trait::{ProcessingResult, ProcessorTrait}; -use crate::{ - models::{ - coin_models::v2_fungible_asset_utils::{ - FungibleAssetMetadata, FungibleAssetStore, FungibleAssetSupply, - }, - token_models::{ - ans_lookup::{CurrentAnsLookup, CurrentAnsLookupPK}, - collection_datas::{CollectionData, CurrentCollectionData}, - nft_points::NftPoints, - token_activities::TokenActivity, - token_claims::CurrentTokenPendingClaim, - token_datas::{CurrentTokenData, TokenData}, - token_ownerships::{CurrentTokenOwnership, TokenOwnership}, - tokens::{ - CurrentTokenOwnershipPK, CurrentTokenPendingClaimPK, TableHandleToOwner, - TableMetadataForToken, Token, TokenDataIdHash, - }, - v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, - v2_token_activities::TokenActivityV2, - v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, - v2_token_metadata::{CurrentTokenV2Metadata, CurrentTokenV2MetadataPK}, - v2_token_ownerships::{ - CurrentTokenOwnershipV2, CurrentTokenOwnershipV2PK, NFTOwnershipV2, - TokenOwnershipV2, - }, - v2_token_utils::{ - AptosCollection, BurnEvent, FixedSupply, ObjectWithMetadata, PropertyMapModel, - TokenV2, TokenV2AggregatedData, TokenV2AggregatedDataMapping, TokenV2Burned, - TransferEvent, UnlimitedSupply, - }, - }, - }, - schema, - utils::{ - database::{ - clean_data_for_db, execute_with_better_error, get_chunks, PgDbPool, PgPoolConnection, - }, - util::{get_entry_function_from_user_request, parse_timestamp, standardize_address}, - }, -}; -use anyhow::bail; -use aptos_protos::transaction::v1::{transaction::TxnData, write_set_change::Change, Transaction}; -use async_trait::async_trait; -use diesel::{pg::upsert::excluded, result::Error, ExpressionMethods, PgConnection}; -use field_count::FieldCount; -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, -}; -use tracing::error; - -pub const NAME: &str = "token_processor"; -pub struct TokenTransactionProcessor { - connection_pool: PgDbPool, - ans_contract_address: Option, - nft_points_contract: Option, -} - -impl TokenTransactionProcessor { - pub fn new( - connection_pool: PgDbPool, - ans_contract_address: Option, - nft_points_contract: Option, - ) -> Self { - tracing::info!( - ans_contract_address = ans_contract_address, - "init TokenTransactionProcessor" - ); - Self { - connection_pool, - ans_contract_address, - nft_points_contract, - } - } -} - -impl Debug for TokenTransactionProcessor { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let state = &self.connection_pool.state(); - write!( - f, - "TokenTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", - state.connections, state.idle_connections - ) - } -} - -fn insert_to_db_impl( - conn: &mut PgConnection, - basic_token_transaction_lists: (&[Token], &[TokenOwnership], &[TokenData], &[CollectionData]), - basic_token_current_lists: ( - &[CurrentTokenOwnership], - &[CurrentTokenData], - &[CurrentCollectionData], - ), - token_activities: &[TokenActivity], - current_token_claims: &[CurrentTokenPendingClaim], - current_ans_lookups: &[CurrentAnsLookup], - nft_points: &[NftPoints], - ( - collections_v2, - token_datas_v2, - token_ownerships_v2, - current_collections_v2, - current_token_datas_v2, - current_token_ownerships_v2, - token_activities_v2, - current_token_v2_metadata, - ): ( - &[CollectionV2], - &[TokenDataV2], - &[TokenOwnershipV2], - &[CurrentCollectionV2], - &[CurrentTokenDataV2], - &[CurrentTokenOwnershipV2], - &[TokenActivityV2], - &[CurrentTokenV2Metadata], - ), -) -> Result<(), diesel::result::Error> { - let (tokens, token_ownerships, token_datas, collection_datas) = basic_token_transaction_lists; - let (current_token_ownerships, current_token_datas, current_collection_datas) = - basic_token_current_lists; - insert_tokens(conn, tokens)?; - insert_token_datas(conn, token_datas)?; - insert_token_ownerships(conn, token_ownerships)?; - insert_collection_datas(conn, collection_datas)?; - insert_current_token_ownerships(conn, current_token_ownerships)?; - insert_current_token_datas(conn, current_token_datas)?; - insert_current_collection_datas(conn, current_collection_datas)?; - insert_token_activities(conn, token_activities)?; - insert_current_token_claims(conn, current_token_claims)?; - insert_current_ans_lookups(conn, current_ans_lookups)?; - insert_nft_points(conn, nft_points)?; - insert_collections_v2(conn, collections_v2)?; - insert_token_datas_v2(conn, token_datas_v2)?; - insert_token_ownerships_v2(conn, token_ownerships_v2)?; - insert_current_collections_v2(conn, current_collections_v2)?; - insert_current_token_datas_v2(conn, current_token_datas_v2)?; - insert_current_token_ownerships_v2(conn, current_token_ownerships_v2)?; - insert_token_activities_v2(conn, token_activities_v2)?; - insert_current_token_v2_metadatas(conn, current_token_v2_metadata)?; - Ok(()) -} - -fn insert_to_db( - conn: &mut PgPoolConnection, - name: &'static str, - start_version: u64, - end_version: u64, - basic_token_transaction_lists: ( - Vec, - Vec, - Vec, - Vec, - ), - basic_token_current_lists: ( - Vec, - Vec, - Vec, - ), - token_activities: Vec, - current_token_claims: Vec, - current_ans_lookups: Vec, - nft_points: Vec, - ( - collections_v2, - token_datas_v2, - token_ownerships_v2, - current_collections_v2, - current_token_datas_v2, - current_token_ownerships_v2, - token_activities_v2, - current_token_v2_metadata, - ): ( - Vec, - Vec, - Vec, - Vec, - Vec, - Vec, - Vec, - Vec, - ), -) -> Result<(), diesel::result::Error> { - tracing::trace!( - name = name, - start_version = start_version, - end_version = end_version, - "Inserting to db", - ); - let (tokens, token_ownerships, token_datas, collection_datas) = basic_token_transaction_lists; - let (current_token_ownerships, current_token_datas, current_collection_datas) = - basic_token_current_lists; - match conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - insert_to_db_impl( - pg_conn, - (&tokens, &token_ownerships, &token_datas, &collection_datas), - ( - ¤t_token_ownerships, - ¤t_token_datas, - ¤t_collection_datas, - ), - &token_activities, - ¤t_token_claims, - ¤t_ans_lookups, - &nft_points, - ( - &collections_v2, - &token_datas_v2, - &token_ownerships_v2, - ¤t_collections_v2, - ¤t_token_datas_v2, - ¤t_token_ownerships_v2, - &token_activities_v2, - ¤t_token_v2_metadata, - ), - ) - }) { - Ok(_) => Ok(()), - Err(_) => conn - .build_transaction() - .read_write() - .run::<_, Error, _>(|pg_conn| { - let tokens = clean_data_for_db(tokens, true); - let token_datas = clean_data_for_db(token_datas, true); - let token_ownerships = clean_data_for_db(token_ownerships, true); - let collection_datas = clean_data_for_db(collection_datas, true); - let current_token_ownerships = clean_data_for_db(current_token_ownerships, true); - let current_token_datas = clean_data_for_db(current_token_datas, true); - let current_collection_datas = clean_data_for_db(current_collection_datas, true); - let token_activities = clean_data_for_db(token_activities, true); - let current_token_claims = clean_data_for_db(current_token_claims, true); - let current_ans_lookups = clean_data_for_db(current_ans_lookups, true); - let nft_points = clean_data_for_db(nft_points, true); - let collections_v2 = clean_data_for_db(collections_v2, true); - let token_datas_v2 = clean_data_for_db(token_datas_v2, true); - let token_ownerships_v2 = clean_data_for_db(token_ownerships_v2, true); - let current_collections_v2 = clean_data_for_db(current_collections_v2, true); - let current_token_datas_v2 = clean_data_for_db(current_token_datas_v2, true); - let current_token_ownerships_v2 = - clean_data_for_db(current_token_ownerships_v2, true); - let token_activities_v2 = clean_data_for_db(token_activities_v2, true); - let current_token_v2_metadata = clean_data_for_db(current_token_v2_metadata, true); - - insert_to_db_impl( - pg_conn, - (&tokens, &token_ownerships, &token_datas, &collection_datas), - ( - ¤t_token_ownerships, - ¤t_token_datas, - ¤t_collection_datas, - ), - &token_activities, - ¤t_token_claims, - ¤t_ans_lookups, - &nft_points, - ( - &collections_v2, - &token_datas_v2, - &token_ownerships_v2, - ¤t_collections_v2, - ¤t_token_datas_v2, - ¤t_token_ownerships_v2, - &token_activities_v2, - ¤t_token_v2_metadata, - ), - ) - }), - } -} - -fn insert_tokens( - conn: &mut PgConnection, - tokens_to_insert: &[Token], -) -> Result<(), diesel::result::Error> { - use schema::tokens::dsl::*; - - let chunks = get_chunks(tokens_to_insert.len(), Token::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::tokens::table) - .values(&tokens_to_insert[start_ind..end_ind]) - .on_conflict((token_data_id_hash, property_version, transaction_version)) - .do_update() - .set(( - token_properties.eq(excluded(token_properties)), - inserted_at.eq(excluded(inserted_at)), - )), - None, - )?; - } - Ok(()) -} - -fn insert_token_ownerships( - conn: &mut PgConnection, - token_ownerships_to_insert: &[TokenOwnership], -) -> Result<(), diesel::result::Error> { - use schema::token_ownerships::dsl::*; - - let chunks = get_chunks( - token_ownerships_to_insert.len(), - TokenOwnership::field_count(), - ); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::token_ownerships::table) - .values(&token_ownerships_to_insert[start_ind..end_ind]) - .on_conflict(( - token_data_id_hash, - property_version, - transaction_version, - table_handle, - )) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_token_datas( - conn: &mut PgConnection, - token_datas_to_insert: &[TokenData], -) -> Result<(), diesel::result::Error> { - use schema::token_datas::dsl::*; - - let chunks = get_chunks(token_datas_to_insert.len(), TokenData::field_count()); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::token_datas::table) - .values(&token_datas_to_insert[start_ind..end_ind]) - .on_conflict((token_data_id_hash, transaction_version)) - .do_update() - .set(( - default_properties.eq(excluded(default_properties)), - inserted_at.eq(excluded(inserted_at)), - )), - None, - )?; - } - Ok(()) -} - -fn insert_collection_datas( - conn: &mut PgConnection, - collection_datas_to_insert: &[CollectionData], -) -> Result<(), diesel::result::Error> { - use schema::collection_datas::dsl::*; - - let chunks = get_chunks( - collection_datas_to_insert.len(), - CollectionData::field_count(), - ); - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::collection_datas::table) - .values(&collection_datas_to_insert[start_ind..end_ind]) - .on_conflict((collection_data_id_hash, transaction_version)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_current_token_ownerships( - conn: &mut PgConnection, - items_to_insert: &[CurrentTokenOwnership], -) -> Result<(), diesel::result::Error> { - use schema::current_token_ownerships::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentTokenOwnership::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_token_ownerships::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((token_data_id_hash, property_version, owner_address)) - .do_update() - .set(( - creator_address.eq(excluded(creator_address)), - collection_name.eq(excluded(collection_name)), - name.eq(excluded(name)), - amount.eq(excluded(amount)), - token_properties.eq(excluded(token_properties)), - last_transaction_version.eq(excluded(last_transaction_version)), - collection_data_id_hash.eq(excluded(collection_data_id_hash)), - table_type.eq(excluded(table_type)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_token_ownerships.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_current_token_datas( - conn: &mut PgConnection, - items_to_insert: &[CurrentTokenData], -) -> Result<(), diesel::result::Error> { - use schema::current_token_datas::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentTokenData::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_token_datas::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(token_data_id_hash) - .do_update() - .set(( - creator_address.eq(excluded(creator_address)), - collection_name.eq(excluded(collection_name)), - name.eq(excluded(name)), - maximum.eq(excluded(maximum)), - supply.eq(excluded(supply)), - largest_property_version.eq(excluded(largest_property_version)), - metadata_uri.eq(excluded(metadata_uri)), - payee_address.eq(excluded(payee_address)), - royalty_points_numerator.eq(excluded(royalty_points_numerator)), - royalty_points_denominator.eq(excluded(royalty_points_denominator)), - maximum_mutable.eq(excluded(maximum_mutable)), - uri_mutable.eq(excluded(uri_mutable)), - description_mutable.eq(excluded(description_mutable)), - properties_mutable.eq(excluded(properties_mutable)), - royalty_mutable.eq(excluded(royalty_mutable)), - default_properties.eq(excluded(default_properties)), - last_transaction_version.eq(excluded(last_transaction_version)), - collection_data_id_hash.eq(excluded(collection_data_id_hash)), - description.eq(excluded(description)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_token_datas.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_current_collection_datas( - conn: &mut PgConnection, - items_to_insert: &[CurrentCollectionData], -) -> Result<(), diesel::result::Error> { - use schema::current_collection_datas::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentCollectionData::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_collection_datas::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(collection_data_id_hash) - .do_update() - .set(( - creator_address.eq(excluded(creator_address)), - collection_name.eq(excluded(collection_name)), - description.eq(excluded(description)), - metadata_uri.eq(excluded(metadata_uri)), - supply.eq(excluded(supply)), - maximum.eq(excluded(maximum)), - maximum_mutable.eq(excluded(maximum_mutable)), - uri_mutable.eq(excluded(uri_mutable)), - description_mutable.eq(excluded(description_mutable)), - last_transaction_version.eq(excluded(last_transaction_version)), - table_handle.eq(excluded(table_handle)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_collection_datas.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_token_activities( - conn: &mut PgConnection, - items_to_insert: &[TokenActivity], -) -> Result<(), diesel::result::Error> { - use schema::token_activities::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), TokenActivity::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::token_activities::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(( - transaction_version, - event_account_address, - event_creation_number, - event_sequence_number, - )) - .do_update() - .set(( - inserted_at.eq(excluded(inserted_at)), - event_index.eq(excluded(event_index)), - )), - None, - )?; - } - Ok(()) -} -fn insert_current_token_claims( - conn: &mut PgConnection, - items_to_insert: &[CurrentTokenPendingClaim], -) -> Result<(), diesel::result::Error> { - use schema::current_token_pending_claims::dsl::*; - - let chunks = get_chunks( - items_to_insert.len(), - CurrentTokenPendingClaim::field_count(), - ); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_token_pending_claims::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(( - token_data_id_hash, property_version, from_address, to_address - )) - .do_update() - .set(( - collection_data_id_hash.eq(excluded(collection_data_id_hash)), - creator_address.eq(excluded(creator_address)), - collection_name.eq(excluded(collection_name)), - name.eq(excluded(name)), - amount.eq(excluded(amount)), - table_handle.eq(excluded(table_handle)), - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - token_data_id.eq(excluded(token_data_id)), - collection_id.eq(excluded(collection_id)), - )), - Some(" WHERE current_token_pending_claims.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_current_ans_lookups( - conn: &mut PgConnection, - items_to_insert: &[CurrentAnsLookup], -) -> Result<(), diesel::result::Error> { - use schema::current_ans_lookup::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentAnsLookup::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_ans_lookup::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((domain, subdomain)) - .do_update() - .set(( - registered_address.eq(excluded(registered_address)), - expiration_timestamp.eq(excluded(expiration_timestamp)), - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - token_name.eq(excluded(token_name)), - )), - Some(" WHERE current_ans_lookup.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_nft_points( - conn: &mut PgConnection, - items_to_insert: &[NftPoints], -) -> Result<(), diesel::result::Error> { - use schema::nft_points::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), NftPoints::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::nft_points::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(transaction_version) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_collections_v2( - conn: &mut PgConnection, - items_to_insert: &[CollectionV2], -) -> Result<(), diesel::result::Error> { - use schema::collections_v2::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CollectionV2::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::collections_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_token_datas_v2( - conn: &mut PgConnection, - items_to_insert: &[TokenDataV2], -) -> Result<(), diesel::result::Error> { - use schema::token_datas_v2::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), TokenDataV2::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::token_datas_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_update() - .set(( - inserted_at.eq(excluded(inserted_at)), - decimals.eq(excluded(decimals)), - )), - None, - )?; - } - Ok(()) -} - -fn insert_token_ownerships_v2( - conn: &mut PgConnection, - items_to_insert: &[TokenOwnershipV2], -) -> Result<(), diesel::result::Error> { - use schema::token_ownerships_v2::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), TokenOwnershipV2::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::token_ownerships_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, write_set_change_index)) - .do_update() - .set(( - token_data_id.eq(excluded(token_data_id)), - property_version_v1.eq(excluded(property_version_v1)), - owner_address.eq(excluded(owner_address)), - storage_id.eq(excluded(storage_id)), - amount.eq(excluded(amount)), - table_type_v1.eq(excluded(table_type_v1)), - token_properties_mutated_v1.eq(excluded(token_properties_mutated_v1)), - is_soulbound_v2.eq(excluded(is_soulbound_v2)), - token_standard.eq(excluded(token_standard)), - is_fungible_v2.eq(excluded(is_fungible_v2)), - transaction_timestamp.eq(excluded(transaction_timestamp)), - inserted_at.eq(excluded(inserted_at)), - non_transferrable_by_owner.eq(excluded(non_transferrable_by_owner)), - )), - None, - )?; - } - Ok(()) -} - -fn insert_current_collections_v2( - conn: &mut PgConnection, - items_to_insert: &[CurrentCollectionV2], -) -> Result<(), diesel::result::Error> { - use schema::current_collections_v2::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentCollectionV2::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_collections_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(collection_id) - .do_update() - .set(( - creator_address.eq(excluded(creator_address)), - collection_name.eq(excluded(collection_name)), - description.eq(excluded(description)), - uri.eq(excluded(uri)), - current_supply.eq(excluded(current_supply)), - max_supply.eq(excluded(max_supply)), - total_minted_v2.eq(excluded(total_minted_v2)), - mutable_description.eq(excluded(mutable_description)), - mutable_uri.eq(excluded(mutable_uri)), - table_handle_v1.eq(excluded(table_handle_v1)), - token_standard.eq(excluded(token_standard)), - last_transaction_version.eq(excluded(last_transaction_version)), - last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_collections_v2.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_current_token_datas_v2( - conn: &mut PgConnection, - items_to_insert: &[CurrentTokenDataV2], -) -> Result<(), diesel::result::Error> { - use schema::current_token_datas_v2::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentTokenDataV2::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_token_datas_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict(token_data_id) - .do_update() - .set(( - collection_id.eq(excluded(collection_id)), - token_name.eq(excluded(token_name)), - maximum.eq(excluded(maximum)), - supply.eq(excluded(supply)), - largest_property_version_v1.eq(excluded(largest_property_version_v1)), - token_uri.eq(excluded(token_uri)), - description.eq(excluded(description)), - token_properties.eq(excluded(token_properties)), - token_standard.eq(excluded(token_standard)), - is_fungible_v2.eq(excluded(is_fungible_v2)), - last_transaction_version.eq(excluded(last_transaction_version)), - last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), - inserted_at.eq(excluded(inserted_at)), - decimals.eq(excluded(decimals)), - )), - Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_current_token_ownerships_v2( - conn: &mut PgConnection, - items_to_insert: &[CurrentTokenOwnershipV2], -) -> Result<(), diesel::result::Error> { - use schema::current_token_ownerships_v2::dsl::*; - - let chunks = get_chunks( - items_to_insert.len(), - CurrentTokenOwnershipV2::field_count(), - ); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_token_ownerships_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((token_data_id, property_version_v1, owner_address, storage_id)) - .do_update() - .set(( - amount.eq(excluded(amount)), - table_type_v1.eq(excluded(table_type_v1)), - token_properties_mutated_v1.eq(excluded(token_properties_mutated_v1)), - is_soulbound_v2.eq(excluded(is_soulbound_v2)), - token_standard.eq(excluded(token_standard)), - is_fungible_v2.eq(excluded(is_fungible_v2)), - last_transaction_version.eq(excluded(last_transaction_version)), - last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), - inserted_at.eq(excluded(inserted_at)), - non_transferrable_by_owner.eq(excluded(non_transferrable_by_owner)), - )), - Some(" WHERE current_token_ownerships_v2.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -fn insert_token_activities_v2( - conn: &mut PgConnection, - items_to_insert: &[TokenActivityV2], -) -> Result<(), diesel::result::Error> { - use schema::token_activities_v2::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), TokenActivityV2::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::token_activities_v2::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((transaction_version, event_index)) - .do_nothing(), - None, - )?; - } - Ok(()) -} - -fn insert_current_token_v2_metadatas( - conn: &mut PgConnection, - items_to_insert: &[CurrentTokenV2Metadata], -) -> Result<(), diesel::result::Error> { - use schema::current_token_v2_metadata::dsl::*; - - let chunks = get_chunks(items_to_insert.len(), CurrentTokenV2Metadata::field_count()); - - for (start_ind, end_ind) in chunks { - execute_with_better_error( - conn, - diesel::insert_into(schema::current_token_v2_metadata::table) - .values(&items_to_insert[start_ind..end_ind]) - .on_conflict((object_address, resource_type)) - .do_update() - .set(( - data.eq(excluded(data)), - state_key_hash.eq(excluded(state_key_hash)), - last_transaction_version.eq(excluded(last_transaction_version)), - inserted_at.eq(excluded(inserted_at)), - )), - Some(" WHERE current_token_v2_metadata.last_transaction_version <= excluded.last_transaction_version "), - )?; - } - Ok(()) -} - -#[async_trait] -impl ProcessorTrait for TokenTransactionProcessor { - fn name(&self) -> &'static str { - NAME - } - - async fn process_transactions( - &self, - transactions: Vec, - start_version: u64, - end_version: u64, - ) -> anyhow::Result { - let mut conn = self.get_conn(); - - // First get all token related table metadata from the batch of transactions. This is in case - // an earlier transaction has metadata (in resources) that's missing from a later transaction. - let table_handle_to_owner = - TableMetadataForToken::get_table_handle_to_owner_from_transactions(&transactions); - - // Token V1 only, this section will be deprecated soon - let mut all_tokens = vec![]; - let mut all_token_ownerships = vec![]; - let mut all_token_datas = vec![]; - let mut all_collection_datas = vec![]; - let mut all_token_activities = vec![]; - - // Hashmap key will be the PK of the table, we do not want to send duplicates writes to the db within a batch - let mut all_current_token_ownerships: HashMap< - CurrentTokenOwnershipPK, - CurrentTokenOwnership, - > = HashMap::new(); - let mut all_current_token_datas: HashMap = - HashMap::new(); - let mut all_current_collection_datas: HashMap = - HashMap::new(); - let mut all_current_token_claims: HashMap< - CurrentTokenPendingClaimPK, - CurrentTokenPendingClaim, - > = HashMap::new(); - let mut all_current_ans_lookups: HashMap = - HashMap::new(); - - // This is likely temporary - let mut all_nft_points = vec![]; - - for txn in &transactions { - let ( - mut tokens, - mut token_ownerships, - mut token_datas, - mut collection_datas, - current_token_ownerships, - current_token_datas, - current_collection_datas, - current_token_claims, - ) = Token::from_transaction(txn, &table_handle_to_owner, &mut conn); - all_tokens.append(&mut tokens); - all_token_ownerships.append(&mut token_ownerships); - all_token_datas.append(&mut token_datas); - all_collection_datas.append(&mut collection_datas); - // Given versions will always be increasing here (within a single batch), we can just override current values - all_current_token_ownerships.extend(current_token_ownerships); - all_current_token_datas.extend(current_token_datas); - all_current_collection_datas.extend(current_collection_datas); - - // Track token activities - let mut activities = TokenActivity::from_transaction(txn); - all_token_activities.append(&mut activities); - - // claims - all_current_token_claims.extend(current_token_claims); - - // ANS lookups - let current_ans_lookups = - CurrentAnsLookup::from_transaction(txn, self.ans_contract_address.clone()); - all_current_ans_lookups.extend(current_ans_lookups); - - // NFT points - let nft_points_txn = NftPoints::from_transaction(txn, self.nft_points_contract.clone()); - if let Some(nft_points) = nft_points_txn { - all_nft_points.push(nft_points); - } - } - - // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes - let mut all_current_token_ownerships = all_current_token_ownerships - .into_values() - .collect::>(); - let mut all_current_token_datas = all_current_token_datas - .into_values() - .collect::>(); - let mut all_current_collection_datas = all_current_collection_datas - .into_values() - .collect::>(); - let mut all_current_token_claims = all_current_token_claims - .into_values() - .collect::>(); - - // Sort by PK - all_current_token_ownerships.sort_by(|a, b| { - (&a.token_data_id_hash, &a.property_version, &a.owner_address).cmp(&( - &b.token_data_id_hash, - &b.property_version, - &b.owner_address, - )) - }); - all_current_token_datas.sort_by(|a, b| a.token_data_id_hash.cmp(&b.token_data_id_hash)); - all_current_collection_datas - .sort_by(|a, b| a.collection_data_id_hash.cmp(&b.collection_data_id_hash)); - all_current_token_claims.sort_by(|a, b| { - ( - &a.token_data_id_hash, - &a.property_version, - &a.from_address, - &a.to_address, - ) - .cmp(&( - &b.token_data_id_hash, - &b.property_version, - &b.from_address, - &a.to_address, - )) - }); - // Sort ans lookup values for postgres insert - let mut all_current_ans_lookups = all_current_ans_lookups - .into_values() - .collect::>(); - // Token V2 processing which includes token v1 - let ( - collections_v2, - token_datas_v2, - token_ownerships_v2, - current_collections_v2, - current_token_ownerships_v2, - current_token_datas_v2, - token_activities_v2, - current_token_v2_metadata, - ) = parse_v2_token(&transactions, &table_handle_to_owner, &mut conn); - all_current_ans_lookups - .sort_by(|a, b| a.domain.cmp(&b.domain).then(a.subdomain.cmp(&b.subdomain))); - - let tx_result = insert_to_db( - &mut conn, - self.name(), - start_version, - end_version, - ( - all_tokens, - all_token_ownerships, - all_token_datas, - all_collection_datas, - ), - ( - all_current_token_ownerships, - all_current_token_datas, - all_current_collection_datas, - ), - all_token_activities, - all_current_token_claims, - all_current_ans_lookups, - all_nft_points, - // Token V2 stuff which will token v1 tables above - ( - collections_v2, - token_datas_v2, - token_ownerships_v2, - current_collections_v2, - current_token_ownerships_v2, - current_token_datas_v2, - token_activities_v2, - current_token_v2_metadata, - ), - ); - match tx_result { - Ok(_) => Ok((start_version, end_version)), - Err(e) => { - error!( - start_version = start_version, - end_version = end_version, - processor_name = self.name(), - error = ?e, - "[Parser] Error inserting transactions to db", - ); - bail!(e) - }, - } - } - - fn connection_pool(&self) -> &PgDbPool { - &self.connection_pool - } -} - -fn parse_v2_token( - transactions: &[Transaction], - table_handle_to_owner: &TableHandleToOwner, - conn: &mut PgPoolConnection, -) -> ( - Vec, - Vec, - Vec, - Vec, - Vec, - Vec, - Vec, - Vec, -) { - // Token V2 and V1 combined - let mut collections_v2 = vec![]; - let mut token_datas_v2 = vec![]; - let mut token_ownerships_v2 = vec![]; - let mut token_activities_v2 = vec![]; - let mut current_collections_v2: HashMap = - HashMap::new(); - let mut current_token_datas_v2: HashMap = - HashMap::new(); - let mut current_token_ownerships_v2: HashMap< - CurrentTokenOwnershipV2PK, - CurrentTokenOwnershipV2, - > = HashMap::new(); - // Tracks prior ownership in case a token gets burned - let mut prior_nft_ownership: HashMap = HashMap::new(); - // Get Metadata for token v2 by object - // We want to persist this through the entire batch so that even if a token is burned, - // we can still get the object core metadata for it - let mut token_v2_metadata_helper: TokenV2AggregatedDataMapping = HashMap::new(); - // Basically token properties - let mut current_token_v2_metadata: HashMap = - HashMap::new(); - - // Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure - for txn in transactions { - let txn_data = txn.txn_data.as_ref().expect("Txn Data doesn't exit!"); - let txn_version = txn.version as i64; - let txn_timestamp = parse_timestamp(txn.timestamp.as_ref().unwrap(), txn_version); - let transaction_info = txn.info.as_ref().expect("Transaction info doesn't exist!"); - - if let TxnData::User(user_txn) = txn_data { - let user_request = user_txn - .request - .as_ref() - .expect("Sends is not present in user txn"); - let entry_function_id_str = get_entry_function_from_user_request(user_request); - - // Get burn events for token v2 by object - let mut tokens_burned: TokenV2Burned = HashSet::new(); - - // Need to do a first pass to get all the objects - for (_, wsc) in transaction_info.changes.iter().enumerate() { - if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { - if let Some(object) = - ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() - { - token_v2_metadata_helper.insert( - standardize_address(&wr.address.to_string()), - TokenV2AggregatedData { - aptos_collection: None, - fixed_supply: None, - object, - unlimited_supply: None, - property_map: None, - transfer_event: None, - token: None, - fungible_asset_metadata: None, - fungible_asset_supply: None, - fungible_asset_store: None, - }, - ); - } - } - } - - // Need to do a second pass to get all the structs related to the object - for (_, wsc) in transaction_info.changes.iter().enumerate() { - if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { - let address = standardize_address(&wr.address.to_string()); - if let Some(aggregated_data) = token_v2_metadata_helper.get_mut(&address) { - if let Some(fixed_supply) = - FixedSupply::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.fixed_supply = Some(fixed_supply); - } - if let Some(unlimited_supply) = - UnlimitedSupply::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.unlimited_supply = Some(unlimited_supply); - } - if let Some(aptos_collection) = - AptosCollection::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.aptos_collection = Some(aptos_collection); - } - if let Some(property_map) = - PropertyMapModel::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.property_map = Some(property_map); - } - if let Some(token) = TokenV2::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.token = Some(token); - } - if let Some(fungible_asset_metadata) = - FungibleAssetMetadata::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.fungible_asset_metadata = Some(fungible_asset_metadata); - } - if let Some(fungible_asset_supply) = - FungibleAssetSupply::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.fungible_asset_supply = Some(fungible_asset_supply); - } - if let Some(fungible_asset_store) = - FungibleAssetStore::from_write_resource(wr, txn_version).unwrap() - { - aggregated_data.fungible_asset_store = Some(fungible_asset_store); - } - } - } - } - - // Pass through events to get the burn events and token activities v2 - // This needs to be here because we need the metadata above for token activities - // and burn / transfer events need to come before the next section - for (index, event) in user_txn.events.iter().enumerate() { - if let Some(burn_event) = BurnEvent::from_event(event, txn_version).unwrap() { - tokens_burned.insert(burn_event.get_token_address()); - } - if let Some(transfer_event) = TransferEvent::from_event(event, txn_version).unwrap() - { - if let Some(aggregated_data) = - token_v2_metadata_helper.get_mut(&transfer_event.get_object_address()) - { - // we don't want index to be 0 otherwise we might have collision with write set change index - let index = if index == 0 { - user_txn.events.len() - } else { - index - }; - aggregated_data.transfer_event = Some((index as i64, transfer_event)); - } - } - // handling all the token v1 events - if let Some(event) = TokenActivityV2::get_v1_from_parsed_event( - event, - txn_version, - txn_timestamp, - index as i64, - &entry_function_id_str, - ) - .unwrap() - { - token_activities_v2.push(event); - } - // handling all the token v2 events - if let Some(event) = TokenActivityV2::get_nft_v2_from_parsed_event( - event, - txn_version, - txn_timestamp, - index as i64, - &entry_function_id_str, - &token_v2_metadata_helper, - ) - .unwrap() - { - token_activities_v2.push(event); - } - // handling all the token v2 events - if let Some(event) = TokenActivityV2::get_ft_v2_from_parsed_event( - event, - txn_version, - txn_timestamp, - index as i64, - &entry_function_id_str, - &token_v2_metadata_helper, - conn, - ) - .unwrap() - { - token_activities_v2.push(event); - } - } - - for (index, wsc) in transaction_info.changes.iter().enumerate() { - let wsc_index = index as i64; - match wsc.change.as_ref().unwrap() { - Change::WriteTableItem(table_item) => { - if let Some((collection, current_collection)) = - CollectionV2::get_v1_from_write_table_item( - table_item, - txn_version, - wsc_index, - txn_timestamp, - table_handle_to_owner, - conn, - ) - .unwrap() - { - collections_v2.push(collection); - current_collections_v2.insert( - current_collection.collection_id.clone(), - current_collection, - ); - } - if let Some((token_data, current_token_data)) = - TokenDataV2::get_v1_from_write_table_item( - table_item, - txn_version, - wsc_index, - txn_timestamp, - ) - .unwrap() - { - token_datas_v2.push(token_data); - current_token_datas_v2.insert( - current_token_data.token_data_id.clone(), - current_token_data, - ); - } - if let Some((token_ownership, current_token_ownership)) = - TokenOwnershipV2::get_v1_from_write_table_item( - table_item, - txn_version, - wsc_index, - txn_timestamp, - table_handle_to_owner, - ) - .unwrap() - { - token_ownerships_v2.push(token_ownership); - if let Some(cto) = current_token_ownership { - prior_nft_ownership.insert( - cto.token_data_id.clone(), - NFTOwnershipV2 { - token_data_id: cto.token_data_id.clone(), - owner_address: cto.owner_address.clone(), - is_soulbound: cto.is_soulbound_v2, - }, - ); - current_token_ownerships_v2.insert( - ( - cto.token_data_id.clone(), - cto.property_version_v1.clone(), - cto.owner_address.clone(), - cto.storage_id.clone(), - ), - cto, - ); - } - } - }, - Change::DeleteTableItem(table_item) => { - if let Some((token_ownership, current_token_ownership)) = - TokenOwnershipV2::get_v1_from_delete_table_item( - table_item, - txn_version, - wsc_index, - txn_timestamp, - table_handle_to_owner, - ) - .unwrap() - { - token_ownerships_v2.push(token_ownership); - if let Some(cto) = current_token_ownership { - prior_nft_ownership.insert( - cto.token_data_id.clone(), - NFTOwnershipV2 { - token_data_id: cto.token_data_id.clone(), - owner_address: cto.owner_address.clone(), - is_soulbound: cto.is_soulbound_v2, - }, - ); - current_token_ownerships_v2.insert( - ( - cto.token_data_id.clone(), - cto.property_version_v1.clone(), - cto.owner_address.clone(), - cto.storage_id.clone(), - ), - cto, - ); - } - } - }, - Change::WriteResource(resource) => { - if let Some((collection, current_collection)) = - CollectionV2::get_v2_from_write_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &token_v2_metadata_helper, - ) - .unwrap() - { - collections_v2.push(collection); - current_collections_v2.insert( - current_collection.collection_id.clone(), - current_collection, - ); - } - if let Some((token_data, current_token_data)) = - TokenDataV2::get_v2_from_write_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &token_v2_metadata_helper, - ) - .unwrap() - { - // Add NFT ownership - if let Some(inner) = TokenOwnershipV2::get_nft_v2_from_token_data( - &token_data, - &token_v2_metadata_helper, - ) - .unwrap() - { - let ( - nft_ownership, - current_nft_ownership, - from_nft_ownership, - from_current_nft_ownership, - ) = inner; - token_ownerships_v2.push(nft_ownership); - // this is used to persist latest owner for burn event handling - prior_nft_ownership.insert( - current_nft_ownership.token_data_id.clone(), - NFTOwnershipV2 { - token_data_id: current_nft_ownership.token_data_id.clone(), - owner_address: current_nft_ownership.owner_address.clone(), - is_soulbound: current_nft_ownership.is_soulbound_v2, - }, - ); - current_token_ownerships_v2.insert( - ( - current_nft_ownership.token_data_id.clone(), - current_nft_ownership.property_version_v1.clone(), - current_nft_ownership.owner_address.clone(), - current_nft_ownership.storage_id.clone(), - ), - current_nft_ownership, - ); - // Add the previous owner of the token transfer - if let Some(from_nft_ownership) = from_nft_ownership { - let from_current_nft_ownership = - from_current_nft_ownership.unwrap(); - token_ownerships_v2.push(from_nft_ownership); - current_token_ownerships_v2.insert( - ( - from_current_nft_ownership.token_data_id.clone(), - from_current_nft_ownership.property_version_v1.clone(), - from_current_nft_ownership.owner_address.clone(), - from_current_nft_ownership.storage_id.clone(), - ), - from_current_nft_ownership, - ); - } - } - token_datas_v2.push(token_data); - current_token_datas_v2.insert( - current_token_data.token_data_id.clone(), - current_token_data, - ); - } - - // Add burned NFT handling - if let Some((nft_ownership, current_nft_ownership)) = - TokenOwnershipV2::get_burned_nft_v2_from_write_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &tokens_burned, - ) - .unwrap() - { - token_ownerships_v2.push(nft_ownership); - prior_nft_ownership.insert( - current_nft_ownership.token_data_id.clone(), - NFTOwnershipV2 { - token_data_id: current_nft_ownership.token_data_id.clone(), - owner_address: current_nft_ownership.owner_address.clone(), - is_soulbound: current_nft_ownership.is_soulbound_v2, - }, - ); - current_token_ownerships_v2.insert( - ( - current_nft_ownership.token_data_id.clone(), - current_nft_ownership.property_version_v1.clone(), - current_nft_ownership.owner_address.clone(), - current_nft_ownership.storage_id.clone(), - ), - current_nft_ownership, - ); - } - - // Add fungible token handling - if let Some((ft_ownership, current_ft_ownership)) = - TokenOwnershipV2::get_ft_v2_from_write_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &token_v2_metadata_helper, - ) - .unwrap() - { - token_ownerships_v2.push(ft_ownership); - current_token_ownerships_v2.insert( - ( - current_ft_ownership.token_data_id.clone(), - current_ft_ownership.property_version_v1.clone(), - current_ft_ownership.owner_address.clone(), - current_ft_ownership.storage_id.clone(), - ), - current_ft_ownership, - ); - } - - // Track token properties - if let Some(token_metadata) = CurrentTokenV2Metadata::from_write_resource( - resource, - txn_version, - &token_v2_metadata_helper, - ) - .unwrap() - { - current_token_v2_metadata.insert( - ( - token_metadata.object_address.clone(), - token_metadata.resource_type.clone(), - ), - token_metadata, - ); - } - }, - Change::DeleteResource(resource) => { - // Add burned NFT handling - if let Some((nft_ownership, current_nft_ownership)) = - TokenOwnershipV2::get_burned_nft_v2_from_delete_resource( - resource, - txn_version, - wsc_index, - txn_timestamp, - &prior_nft_ownership, - &tokens_burned, - conn, - ) - .unwrap() - { - token_ownerships_v2.push(nft_ownership); - prior_nft_ownership.insert( - current_nft_ownership.token_data_id.clone(), - NFTOwnershipV2 { - token_data_id: current_nft_ownership.token_data_id.clone(), - owner_address: current_nft_ownership.owner_address.clone(), - is_soulbound: current_nft_ownership.is_soulbound_v2, - }, - ); - current_token_ownerships_v2.insert( - ( - current_nft_ownership.token_data_id.clone(), - current_nft_ownership.property_version_v1.clone(), - current_nft_ownership.owner_address.clone(), - current_nft_ownership.storage_id.clone(), - ), - current_nft_ownership, - ); - } - }, - _ => {}, - } - } - } - } - - // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes - let mut current_collections_v2 = current_collections_v2 - .into_values() - .collect::>(); - let mut current_token_datas_v2 = current_token_datas_v2 - .into_values() - .collect::>(); - let mut current_token_ownerships_v2 = current_token_ownerships_v2 - .into_values() - .collect::>(); - let mut current_token_v2_metadata = current_token_v2_metadata - .into_values() - .collect::>(); - - // Sort by PK - current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); - current_token_datas_v2.sort_by(|a, b| a.token_data_id.cmp(&b.token_data_id)); - current_token_ownerships_v2.sort_by(|a, b| { - ( - &a.token_data_id, - &a.property_version_v1, - &a.owner_address, - &a.storage_id, - ) - .cmp(&( - &b.token_data_id, - &b.property_version_v1, - &b.owner_address, - &b.storage_id, - )) - }); - current_token_v2_metadata.sort_by(|a, b| { - (&a.object_address, &a.resource_type).cmp(&(&b.object_address, &b.resource_type)) - }); - - ( - collections_v2, - token_datas_v2, - token_ownerships_v2, - current_collections_v2, - current_token_datas_v2, - current_token_ownerships_v2, - token_activities_v2, - current_token_v2_metadata, - ) -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/schema.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/schema.rs deleted file mode 100644 index 7fde42d0b9637..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/schema.rs +++ /dev/null @@ -1,976 +0,0 @@ -// Copyright © Aptos Foundation - -// @generated automatically by Diesel CLI. - -diesel::table! { - account_transactions (account_address, transaction_version) { - transaction_version -> Int8, - #[max_length = 66] - account_address -> Varchar, - inserted_at -> Timestamp, - } -} - -diesel::table! { - block_metadata_transactions (version) { - version -> Int8, - block_height -> Int8, - #[max_length = 66] - id -> Varchar, - round -> Int8, - epoch -> Int8, - previous_block_votes_bitvec -> Jsonb, - #[max_length = 66] - proposer -> Varchar, - failed_proposer_indices -> Jsonb, - timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - coin_activities (transaction_version, event_account_address, event_creation_number, event_sequence_number) { - transaction_version -> Int8, - #[max_length = 66] - event_account_address -> Varchar, - event_creation_number -> Int8, - event_sequence_number -> Int8, - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 5000] - coin_type -> Varchar, - amount -> Numeric, - #[max_length = 200] - activity_type -> Varchar, - is_gas_fee -> Bool, - is_transaction_success -> Bool, - #[max_length = 100] - entry_function_id_str -> Nullable, - block_height -> Int8, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - event_index -> Nullable, - } -} - -diesel::table! { - coin_balances (transaction_version, owner_address, coin_type_hash) { - transaction_version -> Int8, - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 64] - coin_type_hash -> Varchar, - #[max_length = 5000] - coin_type -> Varchar, - amount -> Numeric, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - coin_infos (coin_type_hash) { - #[max_length = 64] - coin_type_hash -> Varchar, - #[max_length = 5000] - coin_type -> Varchar, - transaction_version_created -> Int8, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 32] - name -> Varchar, - #[max_length = 10] - symbol -> Varchar, - decimals -> Int4, - transaction_created_timestamp -> Timestamp, - inserted_at -> Timestamp, - #[max_length = 66] - supply_aggregator_table_handle -> Nullable, - supply_aggregator_table_key -> Nullable, - } -} - -diesel::table! { - coin_supply (transaction_version, coin_type_hash) { - transaction_version -> Int8, - #[max_length = 64] - coin_type_hash -> Varchar, - #[max_length = 5000] - coin_type -> Varchar, - supply -> Numeric, - transaction_timestamp -> Timestamp, - transaction_epoch -> Int8, - inserted_at -> Timestamp, - } -} - -diesel::table! { - collection_datas (collection_data_id_hash, transaction_version) { - #[max_length = 64] - collection_data_id_hash -> Varchar, - transaction_version -> Int8, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - description -> Text, - #[max_length = 512] - metadata_uri -> Varchar, - supply -> Numeric, - maximum -> Numeric, - maximum_mutable -> Bool, - uri_mutable -> Bool, - description_mutable -> Bool, - inserted_at -> Timestamp, - #[max_length = 66] - table_handle -> Varchar, - transaction_timestamp -> Timestamp, - } -} - -diesel::table! { - collections_v2 (transaction_version, write_set_change_index) { - transaction_version -> Int8, - write_set_change_index -> Int8, - #[max_length = 66] - collection_id -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - description -> Text, - #[max_length = 512] - uri -> Varchar, - current_supply -> Numeric, - max_supply -> Nullable, - total_minted_v2 -> Nullable, - mutable_description -> Nullable, - mutable_uri -> Nullable, - #[max_length = 66] - table_handle_v1 -> Nullable, - #[max_length = 10] - token_standard -> Varchar, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - current_ans_lookup (domain, subdomain) { - #[max_length = 64] - domain -> Varchar, - #[max_length = 64] - subdomain -> Varchar, - #[max_length = 66] - registered_address -> Nullable, - expiration_timestamp -> Timestamp, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - #[max_length = 140] - token_name -> Varchar, - } -} - -diesel::table! { - current_coin_balances (owner_address, coin_type_hash) { - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 64] - coin_type_hash -> Varchar, - #[max_length = 5000] - coin_type -> Varchar, - amount -> Numeric, - last_transaction_version -> Int8, - last_transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - current_collection_datas (collection_data_id_hash) { - #[max_length = 64] - collection_data_id_hash -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - description -> Text, - #[max_length = 512] - metadata_uri -> Varchar, - supply -> Numeric, - maximum -> Numeric, - maximum_mutable -> Bool, - uri_mutable -> Bool, - description_mutable -> Bool, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - #[max_length = 66] - table_handle -> Varchar, - last_transaction_timestamp -> Timestamp, - } -} - -diesel::table! { - current_collections_v2 (collection_id) { - #[max_length = 66] - collection_id -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - description -> Text, - #[max_length = 512] - uri -> Varchar, - current_supply -> Numeric, - max_supply -> Nullable, - total_minted_v2 -> Nullable, - mutable_description -> Nullable, - mutable_uri -> Nullable, - #[max_length = 66] - table_handle_v1 -> Nullable, - #[max_length = 10] - token_standard -> Varchar, - last_transaction_version -> Int8, - last_transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - current_delegated_staking_pool_balances (staking_pool_address) { - #[max_length = 66] - staking_pool_address -> Varchar, - total_coins -> Numeric, - total_shares -> Numeric, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - operator_commission_percentage -> Numeric, - #[max_length = 66] - inactive_table_handle -> Varchar, - #[max_length = 66] - active_table_handle -> Varchar, - } -} - -diesel::table! { - current_delegator_balances (delegator_address, pool_address, pool_type, table_handle) { - #[max_length = 66] - delegator_address -> Varchar, - #[max_length = 66] - pool_address -> Varchar, - #[max_length = 100] - pool_type -> Varchar, - #[max_length = 66] - table_handle -> Varchar, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - shares -> Numeric, - #[max_length = 66] - parent_table_handle -> Varchar, - } -} - -diesel::table! { - current_objects (object_address) { - #[max_length = 66] - object_address -> Varchar, - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 66] - state_key_hash -> Varchar, - allow_ungated_transfer -> Bool, - last_guid_creation_num -> Numeric, - last_transaction_version -> Int8, - is_deleted -> Bool, - inserted_at -> Timestamp, - } -} - -diesel::table! { - current_staking_pool_voter (staking_pool_address) { - #[max_length = 66] - staking_pool_address -> Varchar, - #[max_length = 66] - voter_address -> Varchar, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - #[max_length = 66] - operator_address -> Varchar, - } -} - -diesel::table! { - current_table_items (table_handle, key_hash) { - #[max_length = 66] - table_handle -> Varchar, - #[max_length = 64] - key_hash -> Varchar, - key -> Text, - decoded_key -> Jsonb, - decoded_value -> Nullable, - is_deleted -> Bool, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - } -} - -diesel::table! { - current_token_datas (token_data_id_hash) { - #[max_length = 64] - token_data_id_hash -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - maximum -> Numeric, - supply -> Numeric, - largest_property_version -> Numeric, - #[max_length = 512] - metadata_uri -> Varchar, - #[max_length = 66] - payee_address -> Varchar, - royalty_points_numerator -> Numeric, - royalty_points_denominator -> Numeric, - maximum_mutable -> Bool, - uri_mutable -> Bool, - description_mutable -> Bool, - properties_mutable -> Bool, - royalty_mutable -> Bool, - default_properties -> Jsonb, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - #[max_length = 64] - collection_data_id_hash -> Varchar, - last_transaction_timestamp -> Timestamp, - description -> Text, - } -} - -diesel::table! { - current_token_datas_v2 (token_data_id) { - #[max_length = 66] - token_data_id -> Varchar, - #[max_length = 66] - collection_id -> Varchar, - #[max_length = 128] - token_name -> Varchar, - maximum -> Nullable, - supply -> Numeric, - largest_property_version_v1 -> Nullable, - #[max_length = 512] - token_uri -> Varchar, - description -> Text, - token_properties -> Jsonb, - #[max_length = 10] - token_standard -> Varchar, - is_fungible_v2 -> Nullable, - last_transaction_version -> Int8, - last_transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - decimals -> Int8, - } -} - -diesel::table! { - current_token_ownerships (token_data_id_hash, property_version, owner_address) { - #[max_length = 64] - token_data_id_hash -> Varchar, - property_version -> Numeric, - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - amount -> Numeric, - token_properties -> Jsonb, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - #[max_length = 64] - collection_data_id_hash -> Varchar, - table_type -> Text, - last_transaction_timestamp -> Timestamp, - } -} - -diesel::table! { - current_token_ownerships_v2 (token_data_id, property_version_v1, owner_address, storage_id) { - #[max_length = 66] - token_data_id -> Varchar, - property_version_v1 -> Numeric, - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 66] - storage_id -> Varchar, - amount -> Numeric, - #[max_length = 66] - table_type_v1 -> Nullable, - token_properties_mutated_v1 -> Nullable, - is_soulbound_v2 -> Nullable, - #[max_length = 10] - token_standard -> Varchar, - is_fungible_v2 -> Nullable, - last_transaction_version -> Int8, - last_transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - non_transferrable_by_owner -> Nullable, - } -} - -diesel::table! { - current_token_pending_claims (token_data_id_hash, property_version, from_address, to_address) { - #[max_length = 64] - token_data_id_hash -> Varchar, - property_version -> Numeric, - #[max_length = 66] - from_address -> Varchar, - #[max_length = 66] - to_address -> Varchar, - #[max_length = 64] - collection_data_id_hash -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - amount -> Numeric, - #[max_length = 66] - table_handle -> Varchar, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - last_transaction_timestamp -> Timestamp, - #[max_length = 66] - token_data_id -> Varchar, - #[max_length = 66] - collection_id -> Varchar, - } -} - -diesel::table! { - current_token_v2_metadata (object_address, resource_type) { - #[max_length = 66] - object_address -> Varchar, - #[max_length = 128] - resource_type -> Varchar, - data -> Jsonb, - #[max_length = 66] - state_key_hash -> Varchar, - last_transaction_version -> Int8, - inserted_at -> Timestamp, - } -} - -diesel::table! { - delegated_staking_activities (transaction_version, event_index) { - transaction_version -> Int8, - event_index -> Int8, - #[max_length = 66] - delegator_address -> Varchar, - #[max_length = 66] - pool_address -> Varchar, - event_type -> Text, - amount -> Numeric, - inserted_at -> Timestamp, - } -} - -diesel::table! { - delegated_staking_pool_balances (transaction_version, staking_pool_address) { - transaction_version -> Int8, - #[max_length = 66] - staking_pool_address -> Varchar, - total_coins -> Numeric, - total_shares -> Numeric, - inserted_at -> Timestamp, - operator_commission_percentage -> Numeric, - #[max_length = 66] - inactive_table_handle -> Varchar, - #[max_length = 66] - active_table_handle -> Varchar, - } -} - -diesel::table! { - delegated_staking_pools (staking_pool_address) { - #[max_length = 66] - staking_pool_address -> Varchar, - first_transaction_version -> Int8, - inserted_at -> Timestamp, - } -} - -diesel::table! { - events (account_address, creation_number, sequence_number) { - sequence_number -> Int8, - creation_number -> Int8, - #[max_length = 66] - account_address -> Varchar, - transaction_version -> Int8, - transaction_block_height -> Int8, - #[sql_name = "type"] - type_ -> Text, - data -> Jsonb, - inserted_at -> Timestamp, - event_index -> Nullable, - } -} - -diesel::table! { - indexer_status (db) { - #[max_length = 50] - db -> Varchar, - is_indexer_up -> Bool, - inserted_at -> Timestamp, - } -} - -diesel::table! { - ledger_infos (chain_id) { - chain_id -> Int8, - } -} - -diesel::table! { - move_modules (transaction_version, write_set_change_index) { - transaction_version -> Int8, - write_set_change_index -> Int8, - transaction_block_height -> Int8, - name -> Text, - #[max_length = 66] - address -> Varchar, - bytecode -> Nullable, - friends -> Nullable, - exposed_functions -> Nullable, - structs -> Nullable, - is_deleted -> Bool, - inserted_at -> Timestamp, - } -} - -diesel::table! { - move_resources (transaction_version, write_set_change_index) { - transaction_version -> Int8, - write_set_change_index -> Int8, - transaction_block_height -> Int8, - name -> Text, - #[max_length = 66] - address -> Varchar, - #[sql_name = "type"] - type_ -> Text, - module -> Text, - generic_type_params -> Nullable, - data -> Nullable, - is_deleted -> Bool, - inserted_at -> Timestamp, - #[max_length = 66] - state_key_hash -> Varchar, - } -} - -diesel::table! { - nft_points (transaction_version) { - transaction_version -> Int8, - #[max_length = 66] - owner_address -> Varchar, - token_name -> Text, - point_type -> Text, - amount -> Numeric, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - objects (transaction_version, write_set_change_index) { - transaction_version -> Int8, - write_set_change_index -> Int8, - #[max_length = 66] - object_address -> Varchar, - #[max_length = 66] - owner_address -> Varchar, - #[max_length = 66] - state_key_hash -> Varchar, - guid_creation_num -> Numeric, - allow_ungated_transfer -> Bool, - is_deleted -> Bool, - inserted_at -> Timestamp, - } -} - -diesel::table! { - processor_status (processor) { - #[max_length = 50] - processor -> Varchar, - last_success_version -> Int8, - last_updated -> Timestamp, - } -} - -diesel::table! { - proposal_votes (transaction_version, proposal_id, voter_address) { - transaction_version -> Int8, - proposal_id -> Int8, - #[max_length = 66] - voter_address -> Varchar, - #[max_length = 66] - staking_pool_address -> Varchar, - num_votes -> Numeric, - should_pass -> Bool, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - signatures (transaction_version, multi_agent_index, multi_sig_index, is_sender_primary) { - transaction_version -> Int8, - multi_agent_index -> Int8, - multi_sig_index -> Int8, - transaction_block_height -> Int8, - #[max_length = 66] - signer -> Varchar, - is_sender_primary -> Bool, - #[sql_name = "type"] - type_ -> Varchar, - #[max_length = 66] - public_key -> Varchar, - #[max_length = 200] - signature -> Varchar, - threshold -> Int8, - public_key_indices -> Jsonb, - inserted_at -> Timestamp, - } -} - -diesel::table! { - table_items (transaction_version, write_set_change_index) { - key -> Text, - transaction_version -> Int8, - write_set_change_index -> Int8, - transaction_block_height -> Int8, - #[max_length = 66] - table_handle -> Varchar, - decoded_key -> Jsonb, - decoded_value -> Nullable, - is_deleted -> Bool, - inserted_at -> Timestamp, - } -} - -diesel::table! { - table_metadatas (handle) { - #[max_length = 66] - handle -> Varchar, - key_type -> Text, - value_type -> Text, - inserted_at -> Timestamp, - } -} - -diesel::table! { - token_activities (transaction_version, event_account_address, event_creation_number, event_sequence_number) { - transaction_version -> Int8, - #[max_length = 66] - event_account_address -> Varchar, - event_creation_number -> Int8, - event_sequence_number -> Int8, - #[max_length = 64] - collection_data_id_hash -> Varchar, - #[max_length = 64] - token_data_id_hash -> Varchar, - property_version -> Numeric, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - #[max_length = 50] - transfer_type -> Varchar, - #[max_length = 66] - from_address -> Nullable, - #[max_length = 66] - to_address -> Nullable, - token_amount -> Numeric, - coin_type -> Nullable, - coin_amount -> Nullable, - inserted_at -> Timestamp, - transaction_timestamp -> Timestamp, - event_index -> Nullable, - } -} - -diesel::table! { - token_activities_v2 (transaction_version, event_index) { - transaction_version -> Int8, - event_index -> Int8, - #[max_length = 66] - event_account_address -> Varchar, - #[max_length = 66] - token_data_id -> Varchar, - property_version_v1 -> Numeric, - #[sql_name = "type"] - type_ -> Varchar, - #[max_length = 66] - from_address -> Nullable, - #[max_length = 66] - to_address -> Nullable, - token_amount -> Numeric, - before_value -> Nullable, - after_value -> Nullable, - #[max_length = 100] - entry_function_id_str -> Nullable, - #[max_length = 10] - token_standard -> Varchar, - is_fungible_v2 -> Nullable, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - } -} - -diesel::table! { - token_datas (token_data_id_hash, transaction_version) { - #[max_length = 64] - token_data_id_hash -> Varchar, - transaction_version -> Int8, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - maximum -> Numeric, - supply -> Numeric, - largest_property_version -> Numeric, - #[max_length = 512] - metadata_uri -> Varchar, - #[max_length = 66] - payee_address -> Varchar, - royalty_points_numerator -> Numeric, - royalty_points_denominator -> Numeric, - maximum_mutable -> Bool, - uri_mutable -> Bool, - description_mutable -> Bool, - properties_mutable -> Bool, - royalty_mutable -> Bool, - default_properties -> Jsonb, - inserted_at -> Timestamp, - #[max_length = 64] - collection_data_id_hash -> Varchar, - transaction_timestamp -> Timestamp, - description -> Text, - } -} - -diesel::table! { - token_datas_v2 (transaction_version, write_set_change_index) { - transaction_version -> Int8, - write_set_change_index -> Int8, - #[max_length = 66] - token_data_id -> Varchar, - #[max_length = 66] - collection_id -> Varchar, - #[max_length = 128] - token_name -> Varchar, - maximum -> Nullable, - supply -> Numeric, - largest_property_version_v1 -> Nullable, - #[max_length = 512] - token_uri -> Varchar, - token_properties -> Jsonb, - description -> Text, - #[max_length = 10] - token_standard -> Varchar, - is_fungible_v2 -> Nullable, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - decimals -> Int8, - } -} - -diesel::table! { - token_ownerships (token_data_id_hash, property_version, transaction_version, table_handle) { - #[max_length = 64] - token_data_id_hash -> Varchar, - property_version -> Numeric, - transaction_version -> Int8, - #[max_length = 66] - table_handle -> Varchar, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - #[max_length = 66] - owner_address -> Nullable, - amount -> Numeric, - table_type -> Nullable, - inserted_at -> Timestamp, - #[max_length = 64] - collection_data_id_hash -> Varchar, - transaction_timestamp -> Timestamp, - } -} - -diesel::table! { - token_ownerships_v2 (transaction_version, write_set_change_index) { - transaction_version -> Int8, - write_set_change_index -> Int8, - #[max_length = 66] - token_data_id -> Varchar, - property_version_v1 -> Numeric, - #[max_length = 66] - owner_address -> Nullable, - #[max_length = 66] - storage_id -> Varchar, - amount -> Numeric, - #[max_length = 66] - table_type_v1 -> Nullable, - token_properties_mutated_v1 -> Nullable, - is_soulbound_v2 -> Nullable, - #[max_length = 10] - token_standard -> Varchar, - is_fungible_v2 -> Nullable, - transaction_timestamp -> Timestamp, - inserted_at -> Timestamp, - non_transferrable_by_owner -> Nullable, - } -} - -diesel::table! { - tokens (token_data_id_hash, property_version, transaction_version) { - #[max_length = 64] - token_data_id_hash -> Varchar, - property_version -> Numeric, - transaction_version -> Int8, - #[max_length = 66] - creator_address -> Varchar, - #[max_length = 128] - collection_name -> Varchar, - #[max_length = 128] - name -> Varchar, - token_properties -> Jsonb, - inserted_at -> Timestamp, - #[max_length = 64] - collection_data_id_hash -> Varchar, - transaction_timestamp -> Timestamp, - } -} - -diesel::table! { - transactions (version) { - version -> Int8, - block_height -> Int8, - #[max_length = 66] - hash -> Varchar, - #[sql_name = "type"] - type_ -> Varchar, - payload -> Nullable, - #[max_length = 66] - state_change_hash -> Varchar, - #[max_length = 66] - event_root_hash -> Varchar, - #[max_length = 66] - state_checkpoint_hash -> Nullable, - gas_used -> Numeric, - success -> Bool, - vm_status -> Text, - #[max_length = 66] - accumulator_root_hash -> Varchar, - num_events -> Int8, - num_write_set_changes -> Int8, - inserted_at -> Timestamp, - epoch -> Int8, - } -} - -diesel::table! { - user_transactions (version) { - version -> Int8, - block_height -> Int8, - #[max_length = 50] - parent_signature_type -> Varchar, - #[max_length = 66] - sender -> Varchar, - sequence_number -> Int8, - max_gas_amount -> Numeric, - expiration_timestamp_secs -> Timestamp, - gas_unit_price -> Numeric, - timestamp -> Timestamp, - entry_function_id_str -> Text, - inserted_at -> Timestamp, - epoch -> Int8, - } -} - -diesel::table! { - write_set_changes (transaction_version, index) { - transaction_version -> Int8, - index -> Int8, - #[max_length = 66] - hash -> Varchar, - transaction_block_height -> Int8, - #[sql_name = "type"] - type_ -> Text, - #[max_length = 66] - address -> Varchar, - inserted_at -> Timestamp, - } -} - -diesel::allow_tables_to_appear_in_same_query!( - account_transactions, - block_metadata_transactions, - coin_activities, - coin_balances, - coin_infos, - coin_supply, - collection_datas, - collections_v2, - current_ans_lookup, - current_coin_balances, - current_collection_datas, - current_collections_v2, - current_delegated_staking_pool_balances, - current_delegator_balances, - current_objects, - current_staking_pool_voter, - current_table_items, - current_token_datas, - current_token_datas_v2, - current_token_ownerships, - current_token_ownerships_v2, - current_token_pending_claims, - current_token_v2_metadata, - delegated_staking_activities, - delegated_staking_pool_balances, - delegated_staking_pools, - events, - indexer_status, - ledger_infos, - move_modules, - move_resources, - nft_points, - objects, - processor_status, - proposal_votes, - signatures, - table_items, - table_metadatas, - token_activities, - token_activities_v2, - token_datas, - token_datas_v2, - token_ownerships, - token_ownerships_v2, - tokens, - transactions, - user_transactions, - write_set_changes, -); diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/counters.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/counters.rs deleted file mode 100644 index 663423f0337b8..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/counters.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_metrics_core::{ - register_gauge_vec, register_int_counter, register_int_counter_vec, register_int_gauge_vec, - GaugeVec, IntCounter, IntCounterVec, IntGaugeVec, -}; -use once_cell::sync::Lazy; - -/// Data latency when processor receives transactions. -pub static PROCESSOR_DATA_RECEIVED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { - register_gauge_vec!( - "indexer_processor_data_receive_latency_in_secs", - "Data latency when processor receives transactions", - &["request_token", "processor_name"] - ) - .unwrap() -}); - -/// Data latency when processor finishes processing transactions. -pub static PROCESSOR_DATA_PROCESSED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { - register_gauge_vec!( - "indexer_processor_data_processed_latency_in_secs", - "Data latency when processor finishes processing transactions", - &["request_token", "processor_name"] - ) - .unwrap() -}); - -/// Number of times a given processor has been invoked -pub static PROCESSOR_INVOCATIONS_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_processor_invocation_count", - "Number of times a given processor has been invoked", - &["processor_name"] - ) - .unwrap() -}); - -/// Number of times any given processor has raised an error -pub static PROCESSOR_ERRORS_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_processor_errors", - "Number of times any given processor has raised an error", - &["processor_name"] - ) - .unwrap() -}); - -/// Number of times any given processor has completed successfully -pub static PROCESSOR_SUCCESSES_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_processor_success_count", - "Number of times a given processor has completed successfully", - &["processor_name"] - ) - .unwrap() -}); - -/// Number of times the connection pool has timed out when trying to get a connection -pub static UNABLE_TO_GET_CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter!( - "indexer_connection_pool_err", - "Number of times the connection pool has timed out when trying to get a connection" - ) - .unwrap() -}); - -/// Number of times the connection pool got a connection -pub static GOT_CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter!( - "indexer_connection_pool_ok", - "Number of times the connection pool got a connection" - ) - .unwrap() -}); - -#[allow(dead_code)] -/// Number of times the indexer has been unable to fetch a transaction. Ideally zero. -pub static UNABLE_TO_FETCH_TRANSACTION: Lazy = Lazy::new(|| { - register_int_counter!( - "indexer_unable_to_fetch_transaction_count", - "Number of times the indexer has been unable to fetch a transaction" - ) - .unwrap() -}); - -#[allow(dead_code)] -/// Number of times the indexer has been able to fetch a transaction -pub static FETCHED_TRANSACTION: Lazy = Lazy::new(|| { - register_int_counter!( - "indexer_fetched_transaction_count", - "Number of times the indexer has been able to fetch a transaction" - ) - .unwrap() -}); - -/// Max version processed -pub static LATEST_PROCESSED_VERSION: Lazy = Lazy::new(|| { - register_int_gauge_vec!( - "indexer_processor_latest_version", - "Latest version a processor has fully consumed", - &["processor_name"] - ) - .unwrap() -}); diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/database.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/database.rs deleted file mode 100644 index ece3a98d8e3ba..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/database.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -//! Database-related functions -#![allow(clippy::extra_unused_lifetimes)] -use crate::utils::util::remove_null_bytes; -use diesel::{ - pg::{Pg, PgConnection}, - query_builder::{AstPass, Query, QueryFragment}, - r2d2::{ConnectionManager, PoolError, PooledConnection}, - QueryResult, RunQueryDsl, -}; -use std::{cmp::min, sync::Arc}; - -pub type PgPool = diesel::r2d2::Pool>; -pub type PgDbPool = Arc; -pub type PgPoolConnection = PooledConnection>; - -#[derive(QueryId)] -/// Using this will append a where clause at the end of the string upsert function, e.g. -/// INSERT INTO ... ON CONFLICT DO UPDATE SET ... WHERE "transaction_version" = excluded."transaction_version" -/// This is needed when we want to maintain a table with only the latest state -pub struct UpsertFilterLatestTransactionQuery { - query: T, - where_clause: Option<&'static str>, -} - -pub const MAX_DIESEL_PARAM_SIZE: u16 = u16::MAX; - -/// Given diesel has a limit of how many parameters can be inserted in a single operation (u16::MAX) -/// we may need to chunk an array of items based on how many columns are in the table. -/// This function returns boundaries of chunks in the form of (start_index, end_index) -pub fn get_chunks(num_items_to_insert: usize, column_count: usize) -> Vec<(usize, usize)> { - let max_item_size = MAX_DIESEL_PARAM_SIZE as usize / column_count; - let mut chunk: (usize, usize) = (0, min(num_items_to_insert, max_item_size)); - let mut chunks = vec![chunk]; - while chunk.1 != num_items_to_insert { - chunk = ( - chunk.0 + max_item_size, - min(num_items_to_insert, chunk.1 + max_item_size), - ); - chunks.push(chunk); - } - chunks -} - -/// This function will clean the data for postgres. Currently it has support for removing -/// null bytes from strings but in the future we will add more functionality. -pub fn clean_data_for_db serde::Deserialize<'de>>( - items: Vec, - should_remove_null_bytes: bool, -) -> Vec { - if should_remove_null_bytes { - items.iter().map(remove_null_bytes).collect() - } else { - items - } -} - -pub fn new_db_pool(database_url: &str) -> Result { - let manager = ConnectionManager::::new(database_url); - PgPool::builder().build(manager).map(Arc::new) -} - -pub fn execute_with_better_error( - conn: &mut PgConnection, - query: U, - mut additional_where_clause: Option<&'static str>, -) -> QueryResult -where - U: QueryFragment + diesel::query_builder::QueryId, -{ - let original_query = diesel::debug_query::(&query).to_string(); - // This is needed because if we don't insert any row, then diesel makes a call like this - // SELECT 1 FROM TABLE WHERE 1=0 - if original_query.to_lowercase().contains("where") { - additional_where_clause = None; - } - let final_query = UpsertFilterLatestTransactionQuery { - query, - where_clause: additional_where_clause, - }; - let debug_string = diesel::debug_query::(&final_query).to_string(); - tracing::debug!("Executing query: {:?}", debug_string); - let res = final_query.execute(conn); - if let Err(ref e) = res { - tracing::warn!("Error running query: {:?}\n{:?}", e, debug_string); - } - res -} - -/// Section below is required to modify the query. -impl Query for UpsertFilterLatestTransactionQuery { - type SqlType = T::SqlType; -} - -impl RunQueryDsl for UpsertFilterLatestTransactionQuery {} - -impl QueryFragment for UpsertFilterLatestTransactionQuery -where - T: QueryFragment, -{ - fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { - self.query.walk_ast(out.reborrow())?; - if let Some(w) = self.where_clause { - out.push_sql(w); - } - Ok(()) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[tokio::test] - async fn test_get_chunks_logic() { - assert_eq!(get_chunks(10, 5), vec![(0, 10)]); - assert_eq!(get_chunks(65535, 1), vec![(0, 65535)]); - // 200,000 total items will take 6 buckets. Each bucket can only be 3276 size. - assert_eq!(get_chunks(10000, 20), vec![ - (0, 3276), - (3276, 6552), - (6552, 9828), - (9828, 10000) - ]); - assert_eq!(get_chunks(65535, 2), vec![ - (0, 32767), - (32767, 65534), - (65534, 65535) - ]); - assert_eq!(get_chunks(65535, 3), vec![ - (0, 21845), - (21845, 43690), - (43690, 65535) - ]); - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/mod.rs deleted file mode 100644 index 4f13167fed4fa..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod counters; -pub mod database; -pub mod util; diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/util.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/util.rs deleted file mode 100644 index d69a7d1dad281..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/utils/util.rs +++ /dev/null @@ -1,580 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::models::property_map::{PropertyMap, TokenObjectPropertyMap}; -use aptos_protos::{ - transaction::v1::{ - multisig_transaction_payload::Payload as MultisigPayloadType, - transaction_payload::Payload as PayloadType, write_set::WriteSet as WriteSetType, - EntryFunctionId, EntryFunctionPayload, MoveScriptBytecode, MoveType, ScriptPayload, - TransactionPayload, UserTransactionRequest, WriteSet, - }, - util::timestamp::Timestamp, -}; -use bigdecimal::{BigDecimal, Signed, ToPrimitive, Zero}; -use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::Value; -use sha2::Digest; -use std::str::FromStr; - -// 9999-12-31 23:59:59, this is the max supported by Google BigQuery -pub const MAX_TIMESTAMP_SECS: i64 = 253_402_300_799; -// Max length of entry function id string to ensure that db doesn't explode -pub const MAX_ENTRY_FUNCTION_LENGTH: usize = 100; - -// Supporting structs to get clean payload without escaped strings -#[derive(Debug, Deserialize, Serialize)] -pub struct EntryFunctionPayloadClean { - pub function: Option, - pub type_arguments: Vec, - pub arguments: Vec, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct ScriptPayloadClean { - pub code: Option, - pub type_arguments: Vec, - pub arguments: Vec, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct ScriptWriteSetClean { - pub execute_as: String, - pub script: ScriptPayloadClean, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct MultisigPayloadClean { - pub multisig_address: String, - pub transaction_payload: Option, -} - -/// Standardizes all addresses and table handles to be length 66 (0x-64 length hash) -pub fn standardize_address(handle: &str) -> String { - if let Some(handle) = handle.strip_prefix("0x") { - format!("0x{:0>64}", handle) - } else { - format!("0x{:0>64}", handle) - } -} - -pub fn hash_str(val: &str) -> String { - hex::encode(sha2::Sha256::digest(val.as_bytes())) -} - -pub fn truncate_str(val: &str, max_chars: usize) -> String { - let mut trunc = val.to_string(); - trunc.truncate(max_chars); - trunc -} - -pub fn u64_to_bigdecimal(val: u64) -> BigDecimal { - BigDecimal::from(val) -} - -pub fn bigdecimal_to_u64(val: &BigDecimal) -> u64 { - val.to_u64().expect("Unable to convert big decimal to u64") -} - -pub fn ensure_not_negative(val: BigDecimal) -> BigDecimal { - if val.is_negative() { - return BigDecimal::zero(); - } - val -} - -pub fn get_entry_function_from_user_request( - user_request: &UserTransactionRequest, -) -> Option { - let entry_function_id_str: String = match &user_request.payload.as_ref().unwrap().payload { - Some(PayloadType::EntryFunctionPayload(payload)) => payload.entry_function_id_str.clone(), - Some(PayloadType::MultisigPayload(payload)) => { - if let Some(payload) = payload.transaction_payload.as_ref() { - match payload.payload.as_ref().unwrap() { - MultisigPayloadType::EntryFunctionPayload(payload) => { - Some(payload.entry_function_id_str.clone()) - }, - }; - } - return None; - }, - _ => return None, - }; - Some(truncate_str( - &entry_function_id_str, - MAX_ENTRY_FUNCTION_LENGTH, - )) -} - -/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way -pub fn get_clean_payload(payload: &TransactionPayload, version: i64) -> Option { - match payload.payload.as_ref().unwrap() { - PayloadType::EntryFunctionPayload(inner) => { - let clean = get_clean_entry_function_payload(inner, version); - Some(serde_json::to_value(clean).unwrap_or_else(|_| { - tracing::error!(version = version, "Unable to serialize payload into value"); - panic!() - })) - }, - PayloadType::ScriptPayload(inner) => { - let clean = get_clean_script_payload(inner, version); - Some(serde_json::to_value(clean).unwrap_or_else(|_| { - tracing::error!(version = version, "Unable to serialize payload into value"); - panic!() - })) - }, - PayloadType::ModuleBundlePayload(inner) => { - Some(serde_json::to_value(inner).unwrap_or_else(|_| { - tracing::error!(version = version, "Unable to serialize payload into value"); - panic!() - })) - }, - PayloadType::WriteSetPayload(inner) => { - if let Some(writeset) = inner.write_set.as_ref() { - get_clean_writeset(writeset, version) - } else { - None - } - }, - PayloadType::MultisigPayload(inner) => { - let clean = if let Some(payload) = inner.transaction_payload.as_ref() { - let payload_clean = match payload.payload.as_ref().unwrap() { - MultisigPayloadType::EntryFunctionPayload(payload) => { - let clean = get_clean_entry_function_payload(payload, version); - Some(serde_json::to_value(clean).unwrap_or_else(|_| { - tracing::error!( - version = version, - "Unable to serialize payload into value" - ); - panic!() - })) - }, - }; - MultisigPayloadClean { - multisig_address: inner.multisig_address.clone(), - transaction_payload: payload_clean, - } - } else { - MultisigPayloadClean { - multisig_address: inner.multisig_address.clone(), - transaction_payload: None, - } - }; - Some(serde_json::to_value(clean).unwrap_or_else(|_| { - tracing::error!(version = version, "Unable to serialize payload into value"); - panic!() - })) - }, - } -} - -/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way -/// Note that DirectWriteSet is just events + writeset which is already represented separately -pub fn get_clean_writeset(writeset: &WriteSet, version: i64) -> Option { - match writeset.write_set.as_ref().unwrap() { - WriteSetType::ScriptWriteSet(inner) => { - let payload = inner.script.as_ref().unwrap(); - Some( - serde_json::to_value(get_clean_script_payload(payload, version)).unwrap_or_else( - |_| { - tracing::error!( - version = version, - "Unable to serialize payload into value" - ); - panic!() - }, - ), - ) - }, - WriteSetType::DirectWriteSet(_) => None, - } -} - -/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way -fn get_clean_entry_function_payload( - payload: &EntryFunctionPayload, - version: i64, -) -> EntryFunctionPayloadClean { - EntryFunctionPayloadClean { - function: payload.function.clone(), - type_arguments: payload.type_arguments.clone(), - arguments: payload - .arguments - .iter() - .map(|arg| { - serde_json::from_str(arg).unwrap_or_else(|_| { - tracing::error!(version = version, "Unable to serialize payload into value"); - panic!() - }) - }) - .collect(), - } -} - -/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way -fn get_clean_script_payload(payload: &ScriptPayload, version: i64) -> ScriptPayloadClean { - ScriptPayloadClean { - code: payload.code.clone(), - type_arguments: payload.type_arguments.clone(), - arguments: payload - .arguments - .iter() - .map(|arg| { - serde_json::from_str(arg).unwrap_or_else(|_| { - tracing::error!(version = version, "Unable to serialize payload into value"); - panic!() - }) - }) - .collect(), - } -} - -pub fn parse_timestamp(ts: &Timestamp, version: i64) -> chrono::NaiveDateTime { - chrono::NaiveDateTime::from_timestamp_opt(ts.seconds, ts.nanos as u32) - .unwrap_or_else(|| panic!("Could not parse timestamp {:?} for version {}", ts, version)) -} - -pub fn parse_timestamp_secs(ts: u64, version: i64) -> chrono::NaiveDateTime { - chrono::NaiveDateTime::from_timestamp_opt( - std::cmp::min(ts, MAX_TIMESTAMP_SECS as u64) as i64, - 0, - ) - .unwrap_or_else(|| panic!("Could not parse timestamp {:?} for version {}", ts, version)) -} - -pub fn remove_null_bytes serde::Deserialize<'de>>(input: &T) -> T { - let mut txn_json = serde_json::to_value(input).unwrap(); - recurse_remove_null_bytes_from_json(&mut txn_json); - serde_json::from_value::(txn_json).unwrap() -} - -fn recurse_remove_null_bytes_from_json(sub_json: &mut Value) { - match sub_json { - Value::Array(array) => { - for item in array { - recurse_remove_null_bytes_from_json(item); - } - }, - Value::Object(object) => { - for (_key, value) in object { - recurse_remove_null_bytes_from_json(value); - } - }, - Value::String(str) => { - if !str.is_empty() { - let replacement = string_null_byte_replacement(str); - *str = replacement; - } - }, - _ => {}, - } -} - -fn string_null_byte_replacement(value: &mut str) -> String { - value.replace('\u{0000}', "").replace("\\u0000", "") -} - -/// convert the bcs encoded inner value of property_map to its original value in string format -pub fn deserialize_property_map_from_bcs_hexstring<'de, D>( - deserializer: D, -) -> core::result::Result -where - D: Deserializer<'de>, -{ - let s = serde_json::Value::deserialize(deserializer)?; - // iterate the json string to convert key-value pair - // assume the format of {“map”: {“data”: [{“key”: “Yuri”, “value”: {“type”: “String”, “value”: “0x42656e”}}, {“key”: “Tarded”, “value”: {“type”: “String”, “value”: “0x446f766572"}}]}} - // if successfully parsing we return the decoded property_map string otherwise return the original string - Ok(convert_bcs_propertymap(s.clone()).unwrap_or(s)) -} - -/// convert the bcs encoded inner value of property_map to its original value in string format -pub fn deserialize_token_object_property_map_from_bcs_hexstring<'de, D>( - deserializer: D, -) -> core::result::Result -where - D: Deserializer<'de>, -{ - let s = serde_json::Value::deserialize(deserializer)?; - // iterate the json string to convert key-value pair - Ok(convert_bcs_token_object_propertymap(s.clone()).unwrap_or(s)) -} - -pub fn deserialize_string_from_hexstring<'de, D>( - deserializer: D, -) -> core::result::Result -where - D: Deserializer<'de>, -{ - let s = ::deserialize(deserializer)?; - Ok(convert_hex(s.clone()).unwrap_or(s)) -} - -/// Convert the bcs serialized vector to its original string format -pub fn convert_bcs_hex(typ: String, value: String) -> Option { - let decoded = hex::decode(value.strip_prefix("0x").unwrap_or(&*value)).ok()?; - - match typ.as_str() { - "0x1::string::String" => bcs::from_bytes::(decoded.as_slice()), - "u8" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - "u64" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - "u128" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - "bool" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - "address" => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("0x{}", e)), - _ => Ok(value), - } - .ok() -} - -/// Convert the bcs serialized vector to its original string format for token v2 property map. -pub fn convert_bcs_hex_new(typ: u8, value: String) -> Option { - let decoded = hex::decode(value.strip_prefix("0x").unwrap_or(&*value)).ok()?; - - match typ { - 0 /* bool */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 1 /* u8 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 2 /* u16 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 3 /* u32 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 4 /* u64 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 5 /* u128 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 6 /* u256 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), - 7 /* address */ => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("0x{}", e)), - 8 /* byte_vector */ => bcs::from_bytes::>(decoded.as_slice()).map(|e| format!("0x{}", hex::encode(e))), - 9 /* string */ => bcs::from_bytes::(decoded.as_slice()), - _ => Ok(value), - } - .ok() -} - -/// Convert the json serialized PropertyMap's inner BCS fields to their original value in string format -pub fn convert_bcs_propertymap(s: Value) -> Option { - match PropertyMap::from_bcs_encode_str(s) { - Some(e) => match serde_json::to_value(&e) { - Ok(val) => Some(val), - Err(_) => None, - }, - None => None, - } -} - -pub fn convert_bcs_token_object_propertymap(s: Value) -> Option { - match TokenObjectPropertyMap::from_bcs_encode_str(s) { - Some(e) => match serde_json::to_value(&e) { - Ok(val) => Some(val), - Err(_) => None, - }, - None => None, - } -} - -/// Convert the vector that is directly generated from b"xxx" -pub fn convert_hex(val: String) -> Option { - let decoded = hex::decode(val.strip_prefix("0x").unwrap_or(&*val)).ok()?; - String::from_utf8(decoded).ok() -} - -/// Deserialize from string to type T -pub fn deserialize_from_string<'de, D, T>(deserializer: D) -> Result -where - D: Deserializer<'de>, - T: FromStr, - ::Err: std::fmt::Display, -{ - use serde::de::Error; - - let s = ::deserialize(deserializer)?; - s.parse::().map_err(D::Error::custom) -} - -#[cfg(test)] -mod tests { - use super::*; - use chrono::Datelike; - use serde::Serialize; - - #[derive(Serialize, Deserialize, Debug)] - struct TypeInfoMock { - #[serde(deserialize_with = "deserialize_string_from_hexstring")] - pub module_name: String, - #[serde(deserialize_with = "deserialize_string_from_hexstring")] - pub struct_name: String, - } - - #[derive(Serialize, Deserialize, Debug)] - struct TokenDataMock { - #[serde(deserialize_with = "deserialize_property_map_from_bcs_hexstring")] - pub default_properties: serde_json::Value, - } - - #[derive(Serialize, Deserialize, Debug)] - struct TokenObjectDataMock { - #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] - pub default_properties: serde_json::Value, - } - - #[test] - fn test_parse_timestamp() { - let ts = parse_timestamp( - &Timestamp { - seconds: 1649560602, - nanos: 0, - }, - 1, - ); - assert_eq!(ts.timestamp(), 1649560602); - assert_eq!(ts.year(), 2022); - - let ts2 = parse_timestamp_secs(600000000000000, 2); - assert_eq!(ts2.year(), 9999); - - let ts3 = parse_timestamp_secs(1659386386, 2); - assert_eq!(ts3.timestamp(), 1659386386); - } - - #[test] - fn test_deserialize_string_from_bcs() { - let test_struct = TypeInfoMock { - module_name: String::from("0x6170746f735f636f696e"), - struct_name: String::from("0x4170746f73436f696e"), - }; - let val = serde_json::to_string(&test_struct).unwrap(); - let d: TypeInfoMock = serde_json::from_str(val.as_str()).unwrap(); - assert_eq!(d.module_name.as_str(), "aptos_coin"); - assert_eq!(d.struct_name.as_str(), "AptosCoin"); - } - - #[test] - fn test_deserialize_property_map() { - let test_property_json = r#" - { - "map":{ - "data":[ - { - "key":"type", - "value":{ - "type":"0x1::string::String", - "value":"0x06646f6d61696e" - } - }, - { - "key":"creation_time_sec", - "value":{ - "type":"u64", - "value":"0x140f4f6300000000" - } - }, - { - "key":"expiration_time_sec", - "value":{ - "type":"u64", - "value":"0x9442306500000000" - } - } - ] - } - }"#; - let test_property_json: serde_json::Value = - serde_json::from_str(test_property_json).unwrap(); - let test_struct = TokenDataMock { - default_properties: test_property_json, - }; - let val = serde_json::to_string(&test_struct).unwrap(); - let d: TokenDataMock = serde_json::from_str(val.as_str()).unwrap(); - assert_eq!(d.default_properties["type"], "domain"); - assert_eq!(d.default_properties["creation_time_sec"], "1666125588"); - assert_eq!(d.default_properties["expiration_time_sec"], "1697661588"); - } - - #[test] - fn test_empty_property_map() { - let test_property_json = r#"{"map": {"data": []}}"#; - let test_property_json: serde_json::Value = - serde_json::from_str(test_property_json).unwrap(); - let test_struct = TokenDataMock { - default_properties: test_property_json, - }; - let val = serde_json::to_string(&test_struct).unwrap(); - let d: TokenDataMock = serde_json::from_str(val.as_str()).unwrap(); - assert_eq!(d.default_properties, Value::Object(serde_json::Map::new())); - } - - #[test] - fn test_deserialize_token_object_property_map() { - let test_property_json = r#" - { - "data": [{ - "key": "Rank", - "value": { - "type": 9, - "value": "0x0642726f6e7a65" - } - }, - { - "key": "address_property", - "value": { - "type": 7, - "value": "0x2b4d540735a4e128fda896f988415910a45cab41c9ddd802b32dd16e8f9ca3cd" - } - }, - { - "key": "bytes_property", - "value": { - "type": 8, - "value": "0x0401020304" - } - }, - { - "key": "u64_property", - "value": { - "type": 4, - "value": "0x0000000000000001" - } - } - ] - } - "#; - let test_property_json: serde_json::Value = - serde_json::from_str(test_property_json).unwrap(); - let test_struct = TokenObjectDataMock { - default_properties: test_property_json, - }; - let val = serde_json::to_string(&test_struct).unwrap(); - let d: TokenObjectDataMock = serde_json::from_str(val.as_str()).unwrap(); - assert_eq!(d.default_properties["Rank"], "Bronze"); - assert_eq!( - d.default_properties["address_property"], - "0x2b4d540735a4e128fda896f988415910a45cab41c9ddd802b32dd16e8f9ca3cd" - ); - assert_eq!(d.default_properties["bytes_property"], "0x01020304"); - assert_eq!(d.default_properties["u64_property"], "72057594037927936"); - } - - #[test] - fn test_empty_token_object_property_map() { - let test_property_json = r#"{"data": []}"#; - let test_property_json: serde_json::Value = - serde_json::from_str(test_property_json).unwrap(); - let test_struct = TokenObjectDataMock { - default_properties: test_property_json, - }; - let val = serde_json::to_string(&test_struct).unwrap(); - let d: TokenObjectDataMock = serde_json::from_str(val.as_str()).unwrap(); - assert_eq!(d.default_properties, Value::Object(serde_json::Map::new())); - } -} - -// #[derive(Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] -// pub struct AccountAddress([u8; 32]); - -// impl FromStr for AccountAddress { -// type Err = AccountAddressParseError; - -// fn from_str(s: &str) -> Result { -// // Accept 0xADDRESS or ADDRESS -// if let Ok(address) = AccountAddress::from_hex_literal(s) { -// Ok(address) -// } else { -// Self::from_hex(s) -// } -// } -// } diff --git a/ecosystem/indexer-grpc/indexer-grpc-parser/src/worker.rs b/ecosystem/indexer-grpc/indexer-grpc-parser/src/worker.rs deleted file mode 100644 index 41e69baa0a246..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-parser/src/worker.rs +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - models::{ledger_info::LedgerInfo, processor_status::ProcessorStatusQuery}, - processors::{ - coin_processor::CoinTransactionProcessor, - default_processor::DefaultTransactionProcessor, - processor_trait::{ProcessingResult, ProcessorTrait}, - stake_processor::StakeTransactionProcessor, - token_processor::TokenTransactionProcessor, - Processor, - }, - schema::ledger_infos, - utils::{ - counters::{ - LATEST_PROCESSED_VERSION, PROCESSOR_DATA_PROCESSED_LATENCY_IN_SECS, - PROCESSOR_DATA_RECEIVED_LATENCY_IN_SECS, PROCESSOR_ERRORS_COUNT, - PROCESSOR_INVOCATIONS_COUNT, PROCESSOR_SUCCESSES_COUNT, - }, - database::{execute_with_better_error, new_db_pool, PgDbPool}, - }, -}; -use anyhow::Context; -use aptos_indexer_grpc_utils::{ - constants::BLOB_STORAGE_SIZE, time_diff_since_pb_timestamp_in_secs, -}; -use aptos_moving_average::MovingAverage; -use aptos_protos::indexer::v1::{ - raw_data_client::RawDataClient, GetTransactionsRequest, TransactionsResponse, -}; -use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; -use futures::StreamExt; -use std::sync::Arc; -use tracing::{error, info}; - -pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); - -pub struct Worker { - pub db_pool: PgDbPool, - pub processor_name: String, - pub postgres_connection_string: String, - pub indexer_grpc_data_service_address: String, - pub indexer_grpc_http2_ping_interval: std::time::Duration, - pub indexer_grpc_http2_ping_timeout: std::time::Duration, - pub auth_token: String, - pub starting_version: Option, - pub ending_version: Option, - pub number_concurrent_processing_tasks: usize, - pub ans_address: Option, - pub nft_points_contract: Option, -} - -impl Worker { - pub async fn new( - processor_name: String, - postgres_connection_string: String, - indexer_grpc_data_service_address: String, - indexer_grpc_http2_ping_interval: std::time::Duration, - indexer_grpc_http2_ping_timeout: std::time::Duration, - auth_token: String, - starting_version: Option, - ending_version: Option, - number_concurrent_processing_tasks: Option, - ans_address: Option, - nft_points_contract: Option, - ) -> Self { - info!(processor_name = processor_name, "[Parser] Kicking off"); - - info!( - processor_name = processor_name, - "[Parser] Creating connection pool" - ); - let conn_pool = - new_db_pool(&postgres_connection_string).expect("Failed to create connection pool"); - info!( - processor_name = processor_name, - "[Parser] Finish creating the connection pool" - ); - let number_concurrent_processing_tasks = number_concurrent_processing_tasks.unwrap_or(10); - Self { - db_pool: conn_pool, - processor_name, - postgres_connection_string, - indexer_grpc_data_service_address, - indexer_grpc_http2_ping_interval, - indexer_grpc_http2_ping_timeout, - starting_version, - ending_version, - auth_token, - number_concurrent_processing_tasks, - ans_address, - nft_points_contract, - } - } - - pub async fn run(&mut self) { - let processor_name = self.processor_name.clone(); - - info!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - "[Parser] Connecting to GRPC endpoint", - ); - - let channel = tonic::transport::Channel::from_shared(format!( - "http://{}", - self.indexer_grpc_data_service_address.clone() - )) - .expect("[Parser] Endpoint is not a valid URI") - .http2_keep_alive_interval(self.indexer_grpc_http2_ping_interval) - .keep_alive_timeout(self.indexer_grpc_http2_ping_timeout); - - let mut rpc_client = match RawDataClient::connect(channel).await { - Ok(client) => client, - Err(e) => { - error!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - error = ?e, - "[Parser] Error connecting to grpc_stream" - ); - panic!(); - }, - }; - info!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - "[Parser] Connected to GRPC endpoint", - ); - - info!( - processor_name = processor_name, - "[Parser] Running migrations" - ); - self.run_migrations(); - info!( - processor_name = processor_name, - "[Parser] Finished migrations" - ); - - let starting_version_from_db = self - .get_start_version() - .expect("[Parser] Database error when getting starting version") - .unwrap_or_else(|| { - info!( - processor_name = processor_name, - "[Parser] No starting version from db so starting from version 0" - ); - 0 - }); - - let starting_version = match self.starting_version { - None => starting_version_from_db, - Some(version) => version, - }; - - info!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - final_start_version = starting_version, - start_version_from_config = self.starting_version, - start_version_from_db = starting_version_from_db, - "[Parser] Making request to GRPC endpoint", - ); - - let request = grpc_request_builder( - starting_version, - self.ending_version - .map(|v| (v as i64 - starting_version as i64 + 1) as u64), - self.auth_token.clone(), - self.processor_name.clone(), - ); - - let mut resp_stream = rpc_client - .get_transactions(request) - .await - .expect("[Parser] Failed to get grpc response. Is the server running?") - .into_inner(); - - let concurrent_tasks = self.number_concurrent_processing_tasks; - info!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - starting_version = starting_version, - concurrent_tasks = concurrent_tasks, - "[Parser] Successfully connected to GRPC endpoint. Now instantiating processor", - ); - - // Instantiates correct processor based on config - let processor_enum = Processor::from_string(&processor_name); - let processor: Arc = match processor_enum { - Processor::CoinProcessor => { - Arc::new(CoinTransactionProcessor::new(self.db_pool.clone())) - }, - Processor::DefaultProcessor => { - Arc::new(DefaultTransactionProcessor::new(self.db_pool.clone())) - }, - Processor::TokenProcessor => Arc::new(TokenTransactionProcessor::new( - self.db_pool.clone(), - self.ans_address.clone(), - self.nft_points_contract.clone(), - )), - Processor::StakeProcessor => { - Arc::new(StakeTransactionProcessor::new(self.db_pool.clone())) - }, - }; - let processor_name = processor.name(); - - let mut ma = MovingAverage::new(10_000); - info!(processor_name = processor_name, "[Parser] Starting stream"); - let mut batch_start_version = starting_version; - let mut chain_matched = false; - - loop { - let mut transactions_batches = vec![]; - if let Some(ending_version) = self.ending_version { - if ending_version < batch_start_version { - info!( - processor_name = processor_name, - ending_version = self.ending_version, - batch_start_version = batch_start_version, - "[Parser] We reached the end version." - ); - break; - } - } - // Gets a batch of transactions from the stream. Batch size is set in the grpc server. - // The number of batches depends on our config - // There could be several special scenarios: - // 1. If we're at the head, we will break out of the loop as soon as we get a partial (transaction counts < 1000) batch. - // 2. If we lose the connection, we will panic. - // 3. If we specified an end version and we hit that, we will break out of the loop as soon as we get an empty batch. - for _ in 0..concurrent_tasks { - let next_stream = match resp_stream.next().await { - Some(Ok(r)) => { - if !chain_matched { - self.validate_grpc_chain_id(r.clone()) - .await - .expect("[Parser] Invalid grpc response with INIT frame."); - chain_matched = true; - } - let start_version = r.transactions.as_slice().first().unwrap().version; - let end_version = r.transactions.as_slice().last().unwrap().version; - info!( - start_version = start_version, - end_version = end_version, - "[Parser] Received chunk of transactions." - ); - r - }, - None => { - // If we get a None, then the stream has ended, i.e., this is a finite stream. - break; - }, - _ => { - panic!("[Parser] Error receiving datastream response."); - }, - }; - let transactions = next_stream.transactions; - - let current_batch_size = transactions.len(); - if current_batch_size == 0 { - error!( - batch_start_version = batch_start_version, - "[Parser] Received empty batch from GRPC stream" - ); - panic!(); - } - transactions_batches.push(transactions); - // If it is a partial batch, then skip polling and head to process it first. - if current_batch_size < BLOB_STORAGE_SIZE { - break; - } - } - - // Process the transactions in parallel - let mut tasks = vec![]; - if transactions_batches.is_empty() { - // If we get an empty batch, we want to skip and continue polling. - continue; - } - for transactions in transactions_batches { - let processor_clone = processor.clone(); - let auth_token = self.auth_token.clone(); - let task = tokio::spawn(async move { - let start_version = transactions.as_slice().first().unwrap().version; - let end_version = transactions.as_slice().last().unwrap().version; - let txn_time = transactions.as_slice().first().unwrap().timestamp.clone(); - if let Some(ref t) = txn_time { - PROCESSOR_DATA_RECEIVED_LATENCY_IN_SECS - .with_label_values(&[auth_token.as_str(), processor_name]) - .set(time_diff_since_pb_timestamp_in_secs(t)); - } - PROCESSOR_INVOCATIONS_COUNT - .with_label_values(&[processor_name]) - .inc(); - let processed_result = processor_clone - .process_transactions(transactions, start_version, end_version) - .await; - if let Some(ref t) = txn_time { - PROCESSOR_DATA_PROCESSED_LATENCY_IN_SECS - .with_label_values(&[auth_token.as_str(), processor_name]) - .set(time_diff_since_pb_timestamp_in_secs(t)); - } - processed_result - }); - tasks.push(task); - } - let batches = match futures::future::try_join_all(tasks).await { - Ok(res) => res, - Err(err) => panic!("[Parser] Error processing transaction batches: {:?}", err), - }; - // Update states depending on results of the batch processing - let mut processed_versions = vec![]; - for res in batches { - let processed: ProcessingResult = match res { - Ok(versions) => { - PROCESSOR_SUCCESSES_COUNT - .with_label_values(&[processor_name]) - .inc(); - versions - }, - Err(e) => { - error!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - error = ?e, - "[Parser] Error processing transactions" - ); - PROCESSOR_ERRORS_COUNT - .with_label_values(&[processor_name]) - .inc(); - panic!(); - }, - }; - processed_versions.push(processed); - } - - // Make sure there are no gaps and advance states - processed_versions.sort(); - let mut prev_start = None; - let mut prev_end = None; - let processed_versions_sorted = processed_versions.clone(); - for (start, end) in processed_versions { - if prev_start.is_none() { - prev_start = Some(start); - prev_end = Some(end); - } else { - if prev_end.unwrap() + 1 != start { - error!( - processor_name = processor_name, - stream_address = self.indexer_grpc_data_service_address.clone(), - processed_versions = processed_versions_sorted - .iter() - .map(|(s, e)| format!("{}-{}", s, e)) - .collect::>() - .join(", "), - "[Parser] Gaps in processing stream" - ); - panic!(); - } - prev_start = Some(start); - prev_end = Some(end); - } - } - let batch_start = processed_versions_sorted.first().unwrap().0; - let batch_end = processed_versions_sorted.last().unwrap().1; - batch_start_version = batch_end + 1; - - LATEST_PROCESSED_VERSION - .with_label_values(&[processor_name]) - .set(batch_end as i64); - processor - .update_last_processed_version(batch_end) - .await - .unwrap(); - - ma.tick_now(batch_end - batch_start + 1); - info!( - processor_name = processor_name, - start_version = batch_start, - end_version = batch_end, - batch_size = batch_end - batch_start + 1, - tps = (ma.avg() * 1000.0) as u64, - "[Parser] Processed transactions.", - ); - } - } - - fn run_migrations(&self) { - let _ = &self - .db_pool - .get() - .expect("[Parser] Could not get connection for migrations") - .run_pending_migrations(MIGRATIONS) - .expect("[Parser] migrations failed!"); - } - - /// Gets the start version for the processor. If not found, start from 0. - pub fn get_start_version(&self) -> anyhow::Result> { - let mut conn = self.db_pool.get()?; - - match ProcessorStatusQuery::get_by_processor(&self.processor_name, &mut conn)? { - Some(status) => Ok(Some(status.last_success_version as u64 + 1)), - None => Ok(None), - } - } - - /// Verify the chain id from GRPC against the database. - pub async fn check_or_update_chain_id(&self, grpc_chain_id: i64) -> anyhow::Result { - info!( - processor_name = self.processor_name.as_str(), - "[Parser] Checking if chain id is correct" - ); - let mut conn = self.db_pool.get()?; - - let maybe_existing_chain_id = LedgerInfo::get(&mut conn)?.map(|li| li.chain_id); - - match maybe_existing_chain_id { - Some(chain_id) => { - anyhow::ensure!(chain_id == grpc_chain_id, "[Parser] Wrong chain detected! Trying to index chain {} now but existing data is for chain {}", grpc_chain_id, chain_id); - info!( - processor_name = self.processor_name.as_str(), - chain_id = chain_id, - "[Parser] Chain id matches! Continue to index...", - ); - Ok(chain_id as u64) - }, - None => { - info!( - processor_name = self.processor_name.as_str(), - chain_id = grpc_chain_id, - "[Parser] Adding chain id to db, continue to index.." - ); - execute_with_better_error( - &mut conn, - diesel::insert_into(ledger_infos::table).values(LedgerInfo { - chain_id: grpc_chain_id, - }), - None, - ) - .context(r#"[Parser] Error updating chain_id!"#) - .map(|_| grpc_chain_id as u64) - }, - } - } - - /// GRPC validation - pub async fn validate_grpc_chain_id( - &self, - response: TransactionsResponse, - ) -> anyhow::Result<()> { - let grpc_chain_id = response - .chain_id - .ok_or_else(|| anyhow::Error::msg("Chain Id doesn't exist."))?; - let _chain_id = self.check_or_update_chain_id(grpc_chain_id as i64).await?; - Ok(()) - } -} - -pub fn grpc_request_builder( - starting_version: u64, - transactions_count: Option, - grpc_auth_token: String, - processor_name: String, -) -> tonic::Request { - let mut request = tonic::Request::new(GetTransactionsRequest { - starting_version: Some(starting_version), - transactions_count, - ..GetTransactionsRequest::default() - }); - request.metadata_mut().insert( - aptos_indexer_grpc_utils::constants::GRPC_AUTH_TOKEN_HEADER, - grpc_auth_token.parse().unwrap(), - ); - request.metadata_mut().insert( - aptos_indexer_grpc_utils::constants::GRPC_REQUEST_NAME_HEADER, - processor_name.parse().unwrap(), - ); - request -}