diff --git a/rust/.cargo/config.toml b/rust/.cargo/config.toml new file mode 100644 index 000000000..3e8e420f4 --- /dev/null +++ b/rust/.cargo/config.toml @@ -0,0 +1,17 @@ +[alias] +xclippy = [ + "clippy", + "--workspace", + "--all-targets", + "--", + "-Dwarnings", + "-Wclippy::all", + "-Aclippy::upper_case_acronyms", + "-Aclippy::enum-variant-names", + "-Aclippy::result-large-err", + "-Aclippy::mutable-key-type", + "-Wclippy::needless-borrow", +] + +[build] +rustflags = ["--cfg", "tokio_unstable"] diff --git a/rust/.dockerignore b/rust/.dockerignore new file mode 100644 index 000000000..b1285602d --- /dev/null +++ b/rust/.dockerignore @@ -0,0 +1,7 @@ +target +Dockerfile +.dockerignore +.git +.gitignore +examples +framework diff --git a/rust/Cargo.lock b/rust/Cargo.lock new file mode 100644 index 000000000..90bdee22e --- /dev/null +++ b/rust/Cargo.lock @@ -0,0 +1,4764 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +dependencies = [ + "cfg-if", + "const-random", + "getrandom", + "once_cell", + "serde", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "allocative" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082af274fd02beef17b7f0725a49ecafe6c075ef56cac9d6363eb3916a9817ae" +dependencies = [ + "allocative_derive", + "ctor", +] + +[[package]] +name = "allocative_derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe233a377643e0fc1a56421d7c90acdec45c291b30345eb9f08e8d0ddce5a4ab" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + +[[package]] +name = "antidote" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5" + +[[package]] +name = "anyhow" +version = "1.0.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" + +[[package]] +name = "aptos-moving-average" +version = "0.1.0" +dependencies = [ + "chrono", +] + +[[package]] +name = "aptos-profiler" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93#4541add3fd29826ec57f22658ca286d2d6134b93" +dependencies = [ + "anyhow", + "backtrace", + "jemalloc-sys", + "jemallocator", + "pprof", + "regex", +] + +[[package]] +name = "aptos-protos" +version = "1.3.0" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=d76b5bb423b78b2b9affc72d3853f0d973d3f11f#d76b5bb423b78b2b9affc72d3853f0d973d3f11f" +dependencies = [ + "futures-core", + "pbjson", + "prost 0.12.3", + "serde", + "tonic 0.11.0", +] + +[[package]] +name = "aptos-system-utils" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93#4541add3fd29826ec57f22658ca286d2d6134b93" +dependencies = [ + "anyhow", + "aptos-profiler", + "async-mutex", + "http", + "hyper", + "lazy_static", + "mime", + "pprof", + "regex", + "rstack-self", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + +[[package]] +name = "arrow" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae9728f104939be6d8d9b368a354b4929b0569160ea1641f0721b55a861ce38" +dependencies = [ + "arrow-arith", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-csv", + "arrow-data", + "arrow-ipc", + "arrow-json", + "arrow-ord", + "arrow-row", + "arrow-schema", + "arrow-select", + "arrow-string", +] + +[[package]] +name = "arrow-arith" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7029a5b3efbeafbf4a12d12dc16b8f9e9bff20a410b8c25c5d28acc089e1043" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "num", +] + +[[package]] +name = "arrow-array" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d33238427c60271710695f17742f45b1a5dc5bcfc5c15331c25ddfe7abf70d97" +dependencies = [ + "ahash", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "hashbrown 0.14.0", + "num", +] + +[[package]] +name = "arrow-buffer" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9b95e825ae838efaf77e366c00d3fc8cca78134c9db497d6bda425f2e7b7c1" +dependencies = [ + "bytes", + "half", + "num", +] + +[[package]] +name = "arrow-cast" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cf8385a9d5b5fcde771661dd07652b79b9139fea66193eda6a88664400ccab" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "atoi", + "base64 0.22.0", + "chrono", + "half", + "lexical-core", + "num", + "ryu", +] + +[[package]] +name = "arrow-csv" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea5068bef430a86690059665e40034625ec323ffa4dd21972048eebb0127adc" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "chrono", + "csv", + "csv-core", + "lazy_static", + "lexical-core", + "regex", +] + +[[package]] +name = "arrow-data" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb29be98f987bcf217b070512bb7afba2f65180858bca462edf4a39d84a23e10" +dependencies = [ + "arrow-buffer", + "arrow-schema", + "half", + "num", +] + +[[package]] +name = "arrow-ipc" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc68f6523970aa6f7ce1dc9a33a7d9284cfb9af77d4ad3e617dbe5d79cc6ec8" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "flatbuffers", +] + +[[package]] +name = "arrow-json" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2041380f94bd6437ab648e6c2085a045e45a0c44f91a1b9a4fe3fed3d379bfb1" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "indexmap 2.0.0", + "lexical-core", + "num", + "serde", + "serde_json", +] + +[[package]] +name = "arrow-ord" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb56ed1547004e12203652f12fe12e824161ff9d1e5cf2a7dc4ff02ba94f413" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "half", + "num", +] + +[[package]] +name = "arrow-row" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "575b42f1fc588f2da6977b94a5ca565459f5ab07b60545e17243fb9a7ed6d43e" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "half", + "hashbrown 0.14.0", +] + +[[package]] +name = "arrow-schema" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32aae6a60458a2389c0da89c9de0b7932427776127da1a738e2efc21d32f3393" + +[[package]] +name = "arrow-select" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de36abaef8767b4220d7b4a8c2fe5ffc78b47db81b03d77e2136091c3ba39102" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "num", +] + +[[package]] +name = "arrow-string" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e435ada8409bcafc910bc3e0077f532a4daa20e99060a496685c0e3e53cc2597" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "memchr", + "num", + "regex", + "regex-syntax 0.8.3", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-mutex" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "async-trait" +version = "0.1.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "axum" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" + +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bb8" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98b4b0f25f18bcdc3ac72bdb486ed0acf7e185221fd4dc985bc15db5800b0ba2" +dependencies = [ + "async-trait", + "futures-channel", + "futures-util", + "parking_lot", + "tokio", +] + +[[package]] +name = "bcs" +version = "0.1.4" +source = "git+https://github.com/aptos-labs/bcs.git?rev=d31fab9d81748e2594be5cd5cdf845786a30562d#d31fab9d81748e2594be5cd5cdf845786a30562d" +dependencies = [ + "serde", + "thiserror", +] + +[[package]] +name = "bigdecimal" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06619be423ea5bb86c95f087d5707942791a08a85530df0db2209a3ecfb8bc9" +dependencies = [ + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", + "serde", +] + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "block-padding", + "generic-array", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + +[[package]] +name = "brotli" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bumpalo" +version = "3.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" + +[[package]] +name = "bytemuck" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "canonical_json" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f89083fd014d71c47a718d7f4ac050864dac8587668dbe90baf9e261064c5710" +dependencies = [ + "hex", + "regex", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "jobserver", + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.5", +] + +[[package]] +name = "clap" +version = "4.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" +dependencies = [ + "clap_builder", + "clap_derive", + "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "clap_lex" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + +[[package]] +name = "concurrent-queue" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "cookie" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" +dependencies = [ + "percent-encoding", + "time", + "version_check", +] + +[[package]] +name = "cookie_store" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d606d0fba62e13cf04db20536c05cb7f13673c161cb47a47a82b9b9e7d3f1daa" +dependencies = [ + "cookie", + "idna 0.2.3", + "log", + "publicsuffix", + "serde", + "serde_derive", + "serde_json", + "time", + "url", +] + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" + +[[package]] +name = "cpp_demangle" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "cpufeatures" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "uuid", +] + +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid", + "crypto-bigint", + "pem-rfc7468", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "diesel" +version = "2.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" +dependencies = [ + "bigdecimal", + "bitflags 2.5.0", + "byteorder", + "chrono", + "diesel_derives", + "itoa", + "num-bigint", + "num-integer", + "num-traits", + "pq-sys", + "serde_json", +] + +[[package]] +name = "diesel-async" +version = "0.4.1" +source = "git+https://github.com/weiznich/diesel_async.git?rev=d02798c67065d763154d7272dd0c09b39757d0f2#d02798c67065d763154d7272dd0c09b39757d0f2" +dependencies = [ + "async-trait", + "bb8", + "diesel", + "futures-util", + "scoped-futures", + "tokio", + "tokio-postgres", +] + +[[package]] +name = "diesel_derives" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" +dependencies = [ + "diesel_table_macro_syntax", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "diesel_migrations" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6036b3f0120c5961381b570ee20a02432d7e2d27ea60de9578799cf9156914ac" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn 2.0.48", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "crypto-common", + "subtle", +] + +[[package]] +name = "dw" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef0ed82b765c2ab79fb48e4bf2c95bd583202f4078a702bc714cc6e6f3ca80c3" +dependencies = [ + "dw-sys", + "foreign-types 0.5.0", + "libc", +] + +[[package]] +name = "dw-sys" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14eb35c87ff6626cd1021bb32bc7d9a5372ea72547e1eaf0343a841d9d55a973" +dependencies = [ + "libc", + "pkg-config", +] + +[[package]] +name = "either" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" + +[[package]] +name = "encoding_rs" +version = "0.8.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "enum_dispatch" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f33313078bb8d4d05a2733a94ac4c2d8a0df9a2b84424ebf4f33bfc224a890e" +dependencies = [ + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "field_count" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284d5f85dd574cf01094bca24aefa69a43539dbfc72b1326f038d540b2daadc7" +dependencies = [ + "field_count_derive", +] + +[[package]] +name = "field_count_derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1320970ff3b1c1cacc6a38e8cdb1aced955f29627697cd992c5ded82eb646a8" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] + +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + +[[package]] +name = "flatbuffers" +version = "24.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8add37afff2d4ffa83bc748a70b4b1370984f6980768554182424ef71447c35f" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + +[[package]] +name = "flate2" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared 0.1.1", +] + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared 0.3.1", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "form_urlencoded" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" + +[[package]] +name = "google-cloud-auth" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "931bedb2264cb00f914b0a6a5c304e34865c34306632d3932e0951a073e4a67d" +dependencies = [ + "async-trait", + "base64 0.21.2", + "google-cloud-metadata", + "google-cloud-token", + "home", + "jsonwebtoken", + "reqwest", + "serde", + "serde_json", + "thiserror", + "time", + "tokio", + "tracing", + "urlencoding", +] + +[[package]] +name = "google-cloud-gax" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bdaaa4bc036e8318274d1b25f0f2265b3e95418b765fd1ea1c7ef938fd69bd" +dependencies = [ + "google-cloud-token", + "http", + "thiserror", + "tokio", + "tokio-retry", + "tonic 0.9.2", + "tower", + "tracing", +] + +[[package]] +name = "google-cloud-googleapis" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a3b24a3f57be08afc02344e693afb55e48172c9c2ab86ff3fdb8efff550e4b9" +dependencies = [ + "prost 0.11.9", + "prost-types", + "tonic 0.9.2", +] + +[[package]] +name = "google-cloud-metadata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +dependencies = [ + "reqwest", + "thiserror", + "tokio", +] + +[[package]] +name = "google-cloud-pubsub" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "095b104502b6e1abbad9b9768af944b9202e032dbc7f0947d3c30d4191761071" +dependencies = [ + "async-channel", + "async-stream", + "google-cloud-auth", + "google-cloud-gax", + "google-cloud-googleapis", + "google-cloud-token", + "prost-types", + "thiserror", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "google-cloud-storage" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22c57ca1d971d7c6f852c02eda4e87e88b1247b6ed8be9fa5b2768c68b0f2ca5" +dependencies = [ + "async-stream", + "base64 0.21.2", + "bytes", + "futures-util", + "google-cloud-auth", + "google-cloud-metadata", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "regex", + "reqwest", + "ring 0.16.20", + "rsa", + "serde", + "serde_json", + "sha2 0.10.8", + "thiserror", + "time", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "google-cloud-token" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" +dependencies = [ + "async-trait", +] + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 2.0.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", + "num-traits", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + +[[package]] +name = "headers" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" +dependencies = [ + "base64 0.13.1", + "bitflags 1.3.2", + "bytes", + "headers-core", + "http", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "http" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.4.9", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexer-metrics" +version = "1.0.0" +dependencies = [ + "anyhow", + "async-trait", + "chrono", + "clap", + "futures", + "once_cell", + "prometheus", + "reqwest", + "serde", + "serde_json", + "server-framework", + "tokio", + "tracing", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + +[[package]] +name = "inferno" +version = "0.11.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9" +dependencies = [ + "ahash", + "indexmap 2.0.0", + "is-terminal", + "itoa", + "log", + "num-format", + "once_cell", + "quick-xml", + "rgb", + "str_stack", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "ipnet" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" + +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi", + "rustix 0.38.4", + "windows-sys 0.48.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" + +[[package]] +name = "jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +dependencies = [ + "jemalloc-sys", + "libc", +] + +[[package]] +name = "jobserver" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "8.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +dependencies = [ + "base64 0.21.2", + "pem", + "ring 0.16.20", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "kanal" +version = "0.1.0-pre8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05d55519627edaf7fd0f29981f6dc03fb52df3f5b257130eb8d0bf2801ea1d7" +dependencies = [ + "futures-core", + "lock_api", +] + +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] + +[[package]] +name = "lexical-core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +dependencies = [ + "lexical-parse-integer", + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-parse-integer" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-util" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +dependencies = [ + "static_assertions", +] + +[[package]] +name = "lexical-write-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +dependencies = [ + "lexical-util", + "lexical-write-integer", + "static_assertions", +] + +[[package]] +name = "lexical-write-integer" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" + +[[package]] +name = "lock_api" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" + +[[package]] +name = "lz4_flex" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" +dependencies = [ + "twox-hash", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matches" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + +[[package]] +name = "memchr" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "memmap2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" +dependencies = [ + "libc", +] + +[[package]] +name = "migrations_internals" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce3325ac70e67bbab5bd837a31cae01f1a6db64e0e744a33cb03a543469ef08" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "multer" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "memchr", + "mime", + "spin 0.9.8", + "version_check", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec", + "itoa", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "openssl" +version = "0.10.62" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" +dependencies = [ + "bitflags 2.5.0", + "cfg-if", + "foreign-types 0.3.2", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.48.1", +] + +[[package]] +name = "parquet" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c3b5322cc1bbf67f11c079c42be41a55949099b78732f7dba9e15edde40eab" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-ipc", + "arrow-schema", + "arrow-select", + "base64 0.22.0", + "brotli", + "bytes", + "chrono", + "flate2", + "half", + "hashbrown 0.14.0", + "lz4_flex", + "num", + "num-bigint", + "paste", + "seq-macro", + "snap", + "thrift", + "twox-hash", + "zstd 0.13.0", + "zstd-sys", +] + +[[package]] +name = "parquet_derive" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05a70674ad0e9e49f583a03e477c23cc0116cc49a001c52178f00fb25eb0a882" +dependencies = [ + "parquet", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pbjson" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "048f9ac93c1eab514f9470c4bc8d97ca2a0a236b84f45cc19d69a59fc11467f6" +dependencies = [ + "base64 0.13.1", + "serde", +] + +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "pem-rfc7468" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs1" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" +dependencies = [ + "der", + "pkcs8", + "zeroize", +] + +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der", + "spki", + "zeroize", +] + +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + +[[package]] +name = "postgres-native-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d442770e2b1e244bb5eb03b31c79b65bb2568f413b899eaba850fa945a65954" +dependencies = [ + "futures", + "native-tls", + "tokio", + "tokio-native-tls", + "tokio-postgres", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" +dependencies = [ + "base64 0.21.2", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand", + "sha2 0.10.8", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "pprof" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059" +dependencies = [ + "backtrace", + "cfg-if", + "findshlibs", + "inferno", + "libc", + "log", + "nix", + "once_cell", + "parking_lot", + "protobuf", + "protobuf-codegen-pure", + "smallvec", + "symbolic-demangle", + "tempfile", + "thiserror", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "pq-sys" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +dependencies = [ + "vcpkg", +] + +[[package]] +name = "proc-macro2" +version = "1.0.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "processor" +version = "1.0.0" +dependencies = [ + "ahash", + "allocative", + "allocative_derive", + "anyhow", + "aptos-moving-average", + "aptos-protos", + "arrow", + "async-trait", + "bcs", + "bigdecimal", + "bitflags 2.5.0", + "canonical_json", + "chrono", + "clap", + "diesel", + "diesel-async", + "diesel_migrations", + "enum_dispatch", + "field_count", + "futures", + "futures-util", + "google-cloud-googleapis", + "google-cloud-pubsub", + "google-cloud-storage", + "hex", + "hyper", + "itertools 0.12.1", + "jemallocator", + "kanal", + "lazy_static", + "native-tls", + "num", + "num_cpus", + "once_cell", + "parquet", + "parquet_derive", + "postgres-native-tls", + "prometheus", + "prost 0.12.3", + "regex", + "serde", + "serde_json", + "server-framework", + "sha2 0.9.9", + "sha3", + "strum", + "tiny-keccak", + "tokio", + "tokio-postgres", + "tonic 0.11.0", + "tracing", + "unescape", + "url", + "uuid", +] + +[[package]] +name = "prometheus" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "thiserror", +] + +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive 0.11.9", +] + +[[package]] +name = "prost" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +dependencies = [ + "bytes", + "prost-derive 0.12.3", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-derive" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "prost-types" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +dependencies = [ + "prost 0.11.9", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf-codegen" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" +dependencies = [ + "protobuf", +] + +[[package]] +name = "protobuf-codegen-pure" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" +dependencies = [ + "protobuf", + "protobuf-codegen", +] + +[[package]] +name = "psl-types" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" + +[[package]] +name = "publicsuffix" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" +dependencies = [ + "idna 0.3.0", + "psl-types", +] + +[[package]] +name = "quick-xml" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +dependencies = [ + "memchr", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "regex" +version = "1.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.3", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" + +[[package]] +name = "reqwest" +version = "0.11.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +dependencies = [ + "base64 0.21.2", + "bytes", + "cookie", + "cookie_store", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "mime_guess", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "tokio-util", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "winreg", +] + +[[package]] +name = "rgb" +version = "0.8.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "rsa" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" +dependencies = [ + "byteorder", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "smallvec", + "subtle", + "zeroize", +] + +[[package]] +name = "rstack" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7df9d3ebd4f17b52e6134efe2fa20021c80688cbe823d481a729a993b730493" +dependencies = [ + "cfg-if", + "dw", + "lazy_static", + "libc", + "log", +] + +[[package]] +name = "rstack-self" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd5030da3aba0ec731502f74ec38e63798eea6bc8b8ba5972129afe3eababd2" +dependencies = [ + "antidote", + "backtrace", + "bincode", + "lazy_static", + "libc", + "rstack", + "serde", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.37.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys 0.4.3", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustls" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +dependencies = [ + "log", + "ring 0.16.20", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +dependencies = [ + "log", + "ring 0.16.20", + "rustls-webpki 0.101.4", + "sct", +] + +[[package]] +name = "rustls" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +dependencies = [ + "log", + "ring 0.17.8", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.2", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +dependencies = [ + "base64 0.21.2", +] + +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.0", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +dependencies = [ + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", +] + +[[package]] +name = "rustversion" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "scoped-futures" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1473e24c637950c9bd38763220bea91ec3e095a89f672bbd7a10d03e77ba467" +dependencies = [ + "cfg-if", + "pin-utils", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "security-framework" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "seq-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" + +[[package]] +name = "serde" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "serde_json" +version = "1.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" +dependencies = [ + "indexmap 2.0.0", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +dependencies = [ + "indexmap 1.9.3", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "server-framework" +version = "1.0.0" +dependencies = [ + "anyhow", + "aptos-system-utils", + "async-trait", + "backtrace", + "clap", + "prometheus", + "serde", + "serde_yaml", + "tempfile", + "tokio", + "toml", + "tracing", + "tracing-subscriber", + "warp", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time", +] + +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + +[[package]] +name = "slab" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" + +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + +[[package]] +name = "socket2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "str_stack" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" + +[[package]] +name = "stringprep" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +dependencies = [ + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "symbolic-common" +version = "10.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737" +dependencies = [ + "debugid", + "memmap2", + "stable_deref_trait", + "uuid", +] + +[[package]] +name = "symbolic-demangle" +version = "10.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489" +dependencies = [ + "cpp_demangle", + "rustc-demangle", + "symbolic-common", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "tempfile" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" +dependencies = [ + "autocfg", + "cfg-if", + "fastrand", + "redox_syscall", + "rustix 0.37.23", + "windows-sys 0.48.0", +] + +[[package]] +name = "thiserror" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float", +] + +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.5.5", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-postgres" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand", + "socket2 0.5.5", + "tokio", + "tokio-util", + "whoami", +] + +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.8", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.7", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.3", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite", +] + +[[package]] +name = "tokio-util" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f8751d9c1b03c6500c387e96f81f815a4f8e72d142d2d4a9ffa6fedd51ddee7" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.2", + "bytes", + "flate2", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.11.9", + "rustls-pemfile 1.0.3", + "tokio", + "tokio-rustls 0.24.1", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", + "webpki-roots", +] + +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.2", + "bytes", + "flate2", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.12.3", + "rustls-native-certs", + "rustls-pemfile 2.1.2", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", + "zstd 0.12.4", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "tracing-core" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" + +[[package]] +name = "tungstenite" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" +dependencies = [ + "base64 0.13.1", + "byteorder", + "bytes", + "http", + "httparse", + "log", + "rand", + "sha1", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "unescape" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-ident" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +dependencies = [ + "form_urlencoded", + "idna 0.4.0", + "percent-encoding", + "serde", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +dependencies = [ + "getrandom", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "warp" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba431ef570df1287f7f8b07e376491ad54f84d26ac473489427231e1718e1f69" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "headers", + "http", + "hyper", + "log", + "mime", + "mime_guess", + "multer", + "percent-encoding", + "pin-project", + "rustls-pemfile 1.0.3", + "scoped-tls", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-rustls 0.23.4", + "tokio-stream", + "tokio-tungstenite", + "tokio-util", + "tower-service", + "tracing", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.48", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" + +[[package]] +name = "wasm-streams" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki 0.100.1", +] + +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +dependencies = [ + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.1", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.1", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-targets" +version = "0.48.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" + +[[package]] +name = "winnow" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fac9742fd1ad1bd9643b991319f72dd031016d44b77039a26977eb667141e7" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "zeroize" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" + +[[package]] +name = "zstd" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +dependencies = [ + "zstd-safe 6.0.6", +] + +[[package]] +name = "zstd" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +dependencies = [ + "zstd-safe 7.0.0", +] + +[[package]] +name = "zstd-safe" +version = "6.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-safe" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/rust/Cargo.toml b/rust/Cargo.toml new file mode 100644 index 000000000..39e776297 --- /dev/null +++ b/rust/Cargo.toml @@ -0,0 +1,124 @@ +[workspace] +resolver = "2" + +members = ["indexer-metrics", "moving-average", "processor", "server-framework"] + +[workspace.package] +authors = ["Aptos Labs "] +edition = "2021" +homepage = "https://aptoslabs.com" +license = "Apache-2.0" +publish = false +repository = "https://github.com/aptos-labs/aptos-indexer-processors" +rust-version = "1.75" + +[workspace.dependencies] +processor = { path = "processor" } +server-framework = { path = "server-framework" } +aptos-moving-average = { path = "moving-average" } + +ahash = { version = "0.8.7", features = ["serde"] } +anyhow = "1.0.62" +aptos-protos = { git = "https://github.com/aptos-labs/aptos-core.git", rev = "d76b5bb423b78b2b9affc72d3853f0d973d3f11f" } +aptos-system-utils = { git = "https://github.com/aptos-labs/aptos-core.git", rev = "4541add3fd29826ec57f22658ca286d2d6134b93" } +async-trait = "0.1.53" +backtrace = "0.3.58" +base64 = "0.13.0" +bb8 = "0.8.1" +bcs = { git = "https://github.com/aptos-labs/bcs.git", rev = "d31fab9d81748e2594be5cd5cdf845786a30562d" } +bigdecimal = { version = "0.4.0", features = ["serde"] } +bitflags = "2.5.0" +chrono = { version = "0.4.19", features = ["clock", "serde"] } +clap = { version = "4.3.5", features = ["derive", "unstable-styles"] } +# Do NOT enable the postgres feature here, it is conditionally enabled in a feature +# block in the Cargo.toml file for the processor crate. +# https://github.com/aptos-labs/aptos-indexer-processors/pull/325 +diesel = { version = "2.1", features = [ + "chrono", + "postgres_backend", + "numeric", + "serde_json", +] } +# Use the crate version once this feature gets released on crates.io: +# https://github.com/weiznich/diesel_async/commit/e165e8c96a6c540ebde2d6d7c52df5c5620a4bf1 +diesel-async = { git = "https://github.com/weiznich/diesel_async.git", rev = "d02798c67065d763154d7272dd0c09b39757d0f2", features = [ + "async-connection-wrapper", + "postgres", + "bb8", + "tokio", +] } +diesel_migrations = { version = "2.1.0", features = ["postgres"] } +diesel_async_migrations = { git = "https://github.com/niroco/diesel_async_migrations", rev = "11f331b73c5cfcc894380074f748d8fda710ac12" } +enum_dispatch = "0.3.12" +field_count = "0.1.1" +futures = "0.3.30" +futures-core = "0.3.25" +futures-util = "0.3.21" +gcloud-sdk = { version = "0.20.4", features = [ + "google-cloud-bigquery-storage-v1", +] } +cloud-storage = { version = "0.11.1", features = ["global-client"] } +google-cloud-googleapis = "0.10.0" +google-cloud-pubsub = "0.18.0" +hex = "0.4.3" +itertools = "0.12.1" +lazy_static = "1.4.0" +jemallocator = { version = "0.5.0", features = [ + "profiling", + "unprefixed_malloc_on_supported_platforms", +] } +kanal = { version = "0.1.0-pre8", features = ["async"] } +once_cell = "1.10.0" +num_cpus = "1.16.0" +pbjson = "0.5.1" +prometheus = { version = "0.13.0", default-features = false } +prost = { version = "0.12.3", features = ["no-recursion-limit"] } +prost-types = "0.12.3" +regex = "1.5.5" +reqwest = { version = "0.11.20", features = [ + "blocking", + "cookies", + "json", + "stream", +] } +serde = { version = "1.0.193", features = ["derive", "rc"] } +serde_json = { version = "1.0.81", features = ["preserve_order"] } +serde_yaml = "0.8.24" +sha2 = "0.9.3" +sha3 = "0.9.1" +strum = { version = "0.24.1", features = ["derive"] } +tempfile = "3.3.0" +toml = "0.7.4" +tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } +tiny-keccak = { version = "2.0.2", features = ["keccak", "sha3"] } +tokio = { version = "1.35.1", features = ["full"] } +tonic = { version = "0.11.0", features = [ + "tls", + "tls-roots", + "transport", + "prost", + "gzip", + "codegen", + "zstd", +] } +tracing = "0.1.34" +unescape = "0.1.0" +url = { version = "2.4.0", features = ["serde"] } +warp = { version = "0.3.5", features = ["tls"] } + +# Postgres SSL support +native-tls = "0.2.11" +postgres-native-tls = "0.5.0" +tokio-postgres = "0.7.10" + +# Parquet support +arrow = "52.0.0" +parquet = "52.0.0" +num = "0.4.0" +google-cloud-storage = "0.13.0" +hyper = { version = "0.14.18", features = ["full"] } +parquet_derive = { version = "52.0.0" } +canonical_json = "0.5.0" +allocative = "0.3.3" +allocative_derive = "0.3.3" +uuid = { version = "1.8.0", features = ["v4"] } diff --git a/rust/Dockerfile b/rust/Dockerfile new file mode 100644 index 000000000..fbf528cdd --- /dev/null +++ b/rust/Dockerfile @@ -0,0 +1,59 @@ +### Indexer Processor Image ### + +# Stage 1: Build the binary + +FROM rust:slim-bullseye as builder + +WORKDIR /app + +COPY --link . /app + +RUN apt-get update && apt-get install -y cmake curl clang git pkg-config libssl-dev libdw-dev libpq-dev lld +ENV CARGO_NET_GIT_FETCH_WITH_CLI true +RUN cargo build --locked --release -p processor +RUN cp target/release/processor /usr/local/bin +RUN cargo build --locked --release -p indexer-metrics +RUN cp target/release/indexer-metrics /usr/local/bin + +# add build info +ARG GIT_TAG +ENV GIT_TAG ${GIT_TAG} +ARG GIT_BRANCH +ENV GIT_BRANCH ${GIT_BRANCH} +ARG GIT_SHA +ENV GIT_SHA ${GIT_SHA} + +# Stage 2: Create the final image + +FROM debian:bullseye-slim + +COPY --from=builder /usr/local/bin/processor /usr/local/bin +COPY --from=builder /usr/local/bin/indexer-metrics /usr/local/bin + +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update && apt-get install --no-install-recommends -y \ + libssl1.1 \ + ca-certificates \ + net-tools \ + tcpdump \ + iproute2 \ + netcat \ + libdw-dev \ + libpq-dev \ + curl + +ENV RUST_LOG_FORMAT=json + +# add build info +ARG GIT_TAG +ENV GIT_TAG ${GIT_TAG} +ARG GIT_BRANCH +ENV GIT_BRANCH ${GIT_BRANCH} +ARG GIT_SHA +ENV GIT_SHA ${GIT_SHA} + +# The health check port +EXPOSE 8084 + +ENTRYPOINT ["/usr/local/bin/processor"] diff --git a/rust/clippy.toml b/rust/clippy.toml new file mode 100644 index 000000000..f0b060c59 --- /dev/null +++ b/rust/clippy.toml @@ -0,0 +1,8 @@ +# cyclomatic complexity is not always useful +cognitive-complexity-threshold = 100 +# types are used for safety encoding +type-complexity-threshold = 10000 +# The state sync driver requires a lot of wiring and channel handles +too-many-arguments-threshold = 14 +# Reasonably large enum variants are okay +enum-variant-size-threshold = 1000 diff --git a/rust/indexer-metrics/Cargo.toml b/rust/indexer-metrics/Cargo.toml new file mode 100644 index 000000000..15ec54671 --- /dev/null +++ b/rust/indexer-metrics/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "indexer-metrics" +version = "1.0.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +chrono = { workspace = true } +clap = { workspace = true } +futures = { workspace = true } +once_cell = { workspace = true } +prometheus = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +server-framework = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } diff --git a/rust/indexer-metrics/README.md b/rust/indexer-metrics/README.md new file mode 100644 index 000000000..b98a98808 --- /dev/null +++ b/rust/indexer-metrics/README.md @@ -0,0 +1,7 @@ +This is effective a cron job that checks key metrics for Indexer and publishes them to Prometheus. + +## How to run +``` +cargo run --release -- -c config.yaml +``` +You should also be able to see metrics moving by navigating to `0.0.0.0:{health_check_port}/metrics` diff --git a/rust/indexer-metrics/src/lib.rs b/rust/indexer-metrics/src/lib.rs new file mode 100644 index 000000000..f8b239aa7 --- /dev/null +++ b/rust/indexer-metrics/src/lib.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod metrics; +pub mod util; diff --git a/rust/indexer-metrics/src/main.rs b/rust/indexer-metrics/src/main.rs new file mode 100644 index 000000000..f1515f85f --- /dev/null +++ b/rust/indexer-metrics/src/main.rs @@ -0,0 +1,194 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use chrono::NaiveDateTime; +use clap::Parser; +use indexer_metrics::{ + metrics::{ + HASURA_API_LATEST_TRANSACTION_LATENCY_IN_SECS, HASURA_API_LATEST_TRANSACTION_TIMESTAMP, + HASURA_API_LATEST_VERSION, HASURA_API_LATEST_VERSION_TIMESTAMP, PFN_LEDGER_TIMESTAMP, + PFN_LEDGER_VERSION, TASK_FAILURE_COUNT, + }, + util::{deserialize_from_string, fetch_processor_status_with_timeout, get_url_with_timeout}, +}; +use serde::{Deserialize, Serialize}; +use server_framework::{RunnableConfig, ServerArgs}; +use tokio::time::Duration; + +const QUERY_TIMEOUT_MS: u64 = 500; +const MIN_TIME_QUERIES_MS: u64 = 500; +const MICROSECONDS_MULTIPLIER: f64 = 1_000_000.0; + +#[derive(Debug, Deserialize, Serialize)] +struct FullnodeResponse { + #[serde(deserialize_with = "deserialize_from_string")] + ledger_version: u64, + #[serde(deserialize_with = "deserialize_from_string")] + ledger_timestamp: u64, +} + +#[derive(Debug, Deserialize, Serialize)] +struct ProcessorStatus { + processor: String, + #[serde(deserialize_with = "deserialize_from_string")] + last_updated: NaiveDateTime, + last_success_version: u64, + #[serde(deserialize_with = "deserialize_from_string")] + last_transaction_timestamp: NaiveDateTime, +} + +#[derive(Debug, Deserialize, Serialize)] +struct ProcessorsResponseInner { + processor_status: Vec, +} + +#[derive(Debug, Deserialize, Serialize)] +struct ProcessorsResponse { + data: ProcessorsResponseInner, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct PostProcessorConfig { + pub hasura_graphql_endpoint: Option, + pub fullnode_rest_api_endpoint: Option, + pub chain_name: String, +} + +#[async_trait::async_trait] +impl RunnableConfig for PostProcessorConfig { + async fn run(&self) -> Result<()> { + let mut tasks = vec![]; + let hasura_graphql_endpoint = self.hasura_graphql_endpoint.clone(); + let fullnode_rest_api_endpoint = self.fullnode_rest_api_endpoint.clone(); + let chain_name = self.chain_name.clone(); + + if let Some(endpoint) = hasura_graphql_endpoint { + tasks.push(tokio::spawn(start_processor_status_fetch( + endpoint, + chain_name.clone(), + ))); + } + if let Some(fullnode) = fullnode_rest_api_endpoint { + tasks.push(tokio::spawn(start_fn_fetch(fullnode, chain_name))); + } + + let _ = futures::future::join_all(tasks).await; + unreachable!("All tasks should run forever"); + } + + fn get_server_name(&self) -> String { + "idxbg".to_string() + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = ServerArgs::parse(); + args.run::(tokio::runtime::Handle::current()) + .await +} + +async fn start_fn_fetch(url: String, chain_name: String) { + loop { + let result = get_url_with_timeout(&url, QUERY_TIMEOUT_MS).await; + let time_now = tokio::time::Instant::now(); + + // Handle the result + match result { + Ok(Ok(response)) => match response.json::().await { + Ok(resp) => { + tracing::info!(url = &url, response = ?resp, "Request succeeded"); + PFN_LEDGER_VERSION + .with_label_values(&[&chain_name]) + .set(resp.ledger_version as i64); + PFN_LEDGER_TIMESTAMP + .with_label_values(&[&chain_name]) + .set(resp.ledger_timestamp as f64 / MICROSECONDS_MULTIPLIER); + }, + Err(err) => { + tracing::error!(url = &url, error = ?err, "Parsing error"); + TASK_FAILURE_COUNT + .with_label_values(&["fullnode", &chain_name]) + .inc(); + }, + }, + Ok(Err(err)) => { + // Request encountered an error within the timeout + tracing::error!(url = &url, error = ?err, "Request error"); + TASK_FAILURE_COUNT + .with_label_values(&["fullnode", &chain_name]) + .inc(); + }, + Err(_) => { + // Request timed out + tracing::error!(url = &url, "Request timed out"); + TASK_FAILURE_COUNT + .with_label_values(&["fullnode", &chain_name]) + .inc(); + }, + } + let elapsed = time_now.elapsed().as_millis() as u64; + // Sleep for a max of 500ms between queries + if elapsed < MIN_TIME_QUERIES_MS { + tokio::time::sleep(Duration::from_millis(MIN_TIME_QUERIES_MS - elapsed)).await; + } + } +} + +async fn start_processor_status_fetch(url: String, chain_name: String) { + loop { + let result = fetch_processor_status_with_timeout(&url, QUERY_TIMEOUT_MS).await; + let time_now = tokio::time::Instant::now(); + + // Handle the result + match result { + Ok(Ok(response)) => match response.json::().await { + Ok(resp) => { + tracing::info!(url = &url, response = ?resp, "Request succeeded"); + // Process the data as needed + let system_time_now = chrono::Utc::now().naive_utc(); + for processor in resp.data.processor_status { + HASURA_API_LATEST_VERSION + .with_label_values(&[&processor.processor, &chain_name]) + .set(processor.last_success_version as i64); + HASURA_API_LATEST_VERSION_TIMESTAMP + .with_label_values(&[&processor.processor, &chain_name]) + .set(processor.last_updated.and_utc().timestamp_micros() as f64 * 1e-6); + HASURA_API_LATEST_TRANSACTION_TIMESTAMP + .with_label_values(&[&processor.processor, &chain_name]) + .set( + processor.last_transaction_timestamp.and_utc().timestamp_micros() as f64 + * 1e-6, + ); + let latency = system_time_now - processor.last_transaction_timestamp; + HASURA_API_LATEST_TRANSACTION_LATENCY_IN_SECS + .with_label_values(&[&processor.processor, &chain_name]) + .set(latency.num_milliseconds() as f64 * 1e-3); + } + }, + Err(err) => { + tracing::error!(url = &url, error = ?err, "Parsing error"); + // Increment failure count or other error handling + }, + }, + Ok(Err(err)) => { + // Request encountered an error within the timeout + tracing::error!(url = &url, error = ?err, "Request error"); + // Increment failure count or other error handling + }, + Err(_) => { + // Request timed out + tracing::error!(url = &url, "Request timed out"); + // Increment failure count or other error handling + }, + } + + let elapsed = time_now.elapsed().as_millis() as u64; + // Sleep for a max of 500ms between queries + if elapsed < MIN_TIME_QUERIES_MS { + tokio::time::sleep(Duration::from_millis(MIN_TIME_QUERIES_MS - elapsed)).await; + } + } +} diff --git a/rust/indexer-metrics/src/metrics.rs b/rust/indexer-metrics/src/metrics.rs new file mode 100644 index 000000000..6d8603fdf --- /dev/null +++ b/rust/indexer-metrics/src/metrics.rs @@ -0,0 +1,72 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use once_cell::sync::Lazy; +use prometheus::{ + register_gauge_vec, register_int_counter_vec, register_int_gauge_vec, GaugeVec, IntCounterVec, + IntGaugeVec, +}; + +/// Task failure count. +pub static TASK_FAILURE_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_metrics_task_failure_count", + "Task failure count from indexer metrics service", + &["task_name", "chain_name"], + ) + .unwrap() +}); + +pub static HASURA_API_LATEST_VERSION: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_metrics_hasura_latest_version", + "Processor latest version measured from indexer metrics service", + &["processor_name", "chain_name"], + ) + .unwrap() +}); + +pub static HASURA_API_LATEST_VERSION_TIMESTAMP: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_metrics_hasura_latest_version_timestamp_secs", + "Processor latest timestamp (unix timestamp) measured from indexer metrics service", + &["processor_name", "chain_name"], + ) + .unwrap() +}); + +pub static HASURA_API_LATEST_TRANSACTION_TIMESTAMP: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_metrics_hasura_latest_transaction_timestamp_secs", + "Latest transaction timestamp (unix timestamp), i.e., block timestamp.", + &["processor_name", "chain_name"], + ) + .unwrap() +}); + +pub static HASURA_API_LATEST_TRANSACTION_LATENCY_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_metrics_hasura_latest_transaction_latency_secs", + "Latest transaction e2e latency, from block timestamp to insertion time of db.", + &["processor_name", "chain_name"], + ) + .unwrap() +}); + +pub static PFN_LEDGER_VERSION: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_metrics_pfn_ledger_version", + "Ledger latest version measured from indexer metrics service", + &["chain_name"], + ) + .unwrap() +}); + +pub static PFN_LEDGER_TIMESTAMP: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_metrics_pfn_ledger_timestamp_secs", + "Ledger latest timestamp (unix timestamp) measured from indexer metrics service", + &["chain_name"], + ) + .unwrap() +}); diff --git a/rust/indexer-metrics/src/util.rs b/rust/indexer-metrics/src/util.rs new file mode 100644 index 000000000..732270492 --- /dev/null +++ b/rust/indexer-metrics/src/util.rs @@ -0,0 +1,66 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use reqwest::Client; +use serde::{Deserialize, Deserializer}; +use std::{str::FromStr, time::Duration}; +use tokio::time::{error::Elapsed, timeout}; + +/// Deserialize from string to type T +pub fn deserialize_from_string<'de, D, T>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: FromStr, + ::Err: std::fmt::Display, +{ + use serde::de::Error; + + let s = ::deserialize(deserializer)?; + s.parse::().map_err(D::Error::custom) +} + +pub async fn fetch_processor_status_with_timeout( + url: &str, + timeout_ms: u64, +) -> Result, Elapsed> { + let data = serde_json::json!({ + "query": r#" + { + processor_status { + processor + last_updated + last_success_version + last_transaction_timestamp + } + } + "# + }); + post_url_with_timeout(url, data, timeout_ms).await +} + +async fn post_url_with_timeout( + url: &str, + data: serde_json::Value, + timeout_ms: u64, +) -> Result, Elapsed> { + let client = Client::new(); + + // Set the timeout duration + let timeout_duration = Duration::from_millis(timeout_ms); + + // Use tokio::time::timeout to set a timeout for the request + timeout(timeout_duration, client.post(url).json(&data).send()).await +} + +pub async fn get_url_with_timeout( + url: &str, + timeout_ms: u64, +) -> Result, Elapsed> { + let client = Client::new(); + + // Set the timeout duration + let timeout_duration = Duration::from_millis(timeout_ms); + + // Use tokio::time::timeout to set a timeout for the request + timeout(timeout_duration, client.get(url).send()).await +} diff --git a/rust/moving-average/Cargo.toml b/rust/moving-average/Cargo.toml new file mode 100644 index 000000000..657a33534 --- /dev/null +++ b/rust/moving-average/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "aptos-moving-average" +description = "Utility to calculate moving average such as TPS" +version = "0.1.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +chrono = { workspace = true } diff --git a/rust/moving-average/src/lib.rs b/rust/moving-average/src/lib.rs new file mode 100644 index 000000000..826949de5 --- /dev/null +++ b/rust/moving-average/src/lib.rs @@ -0,0 +1,84 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![forbid(unsafe_code)] + +use std::collections::VecDeque; + +// TPS data +pub struct MovingAverage { + window_millis: u64, + // (timestamp_millis, value) + values: VecDeque<(u64, u64)>, + sum: u64, +} + +impl MovingAverage { + pub fn new(window_millis: u64) -> Self { + let now = chrono::Utc::now().naive_utc().and_utc().timestamp_millis() as u64; + let mut queue = VecDeque::new(); + queue.push_back((now, 0)); + Self { + window_millis, + values: queue, + sum: 0, + } + } + + pub fn tick_now(&mut self, value: u64) { + let now = chrono::Utc::now().naive_utc().and_utc().timestamp_millis() as u64; + self.tick(now, value); + } + + pub fn tick(&mut self, timestamp_millis: u64, value: u64) -> f64 { + self.values.push_back((timestamp_millis, value)); + self.sum += value; + while self.values.len() > 2 { + match self.values.front() { + None => break, + Some((ts, val)) => { + if timestamp_millis - ts > self.window_millis { + self.sum -= val; + self.values.pop_front(); + } else { + break; + } + }, + } + } + self.avg() + } + + // Only be called after tick_now/tick is called. + pub fn avg(&self) -> f64 { + if self.values.len() < 2 { + 0.0 + } else { + let elapsed = self.values.back().unwrap().0 - self.values.front().unwrap().0; + (self.sum * 1000) as f64 / elapsed as f64 + } + } + + pub fn sum(&self) -> u64 { + self.sum + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_moving_average() { + // 10 Second window. + let mut ma = MovingAverage::new(10_000); + // 9 seconds spent at 100 TPS. + for _ in 0..9 { + ma.tick_now(100); + std::thread::sleep(std::time::Duration::from_secs(1)); + } + // No matter what algorithm we use, the average should be 99 at least. + let avg = ma.avg(); + assert!(avg >= 99.0, "Average is too low: {}", avg); + } +} diff --git a/rust/parquet-bq-scripts/move_resources-create.sql b/rust/parquet-bq-scripts/move_resources-create.sql new file mode 100644 index 000000000..67197dd02 --- /dev/null +++ b/rust/parquet-bq-scripts/move_resources-create.sql @@ -0,0 +1,21 @@ +CREATE TABLE `{}` +( + txn_version INT64, + write_set_change_index INT64, + block_height INT64, + block_timestamp TIMESTAMP, + resource_address STRING, + resource_type STRING, + module STRING, + fun STRING, + is_deleted BOOL, + generic_type_params STRING, + data STRING, + state_key_hash STRING, + + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + PRIMARY KEY(txn_version, write_set_change_index) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp, DAY) +CLUSTER BY txn_version, resource_type, block_height, state_key_hash, resource_address +; diff --git a/rust/parquet-bq-scripts/move_resources-merge.sql b/rust/parquet-bq-scripts/move_resources-merge.sql new file mode 100644 index 000000000..0fa25b6a9 --- /dev/null +++ b/rust/parquet-bq-scripts/move_resources-merge.sql @@ -0,0 +1,49 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version, + write_set_change_index + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON + main.txn_version = staging.txn_version -- primary key(s) + AND main.write_set_change_index = staging.write_set_change_index +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + txn_version, + write_set_change_index, + block_height, + block_timestamp, + resource_address, + resource_type, + module, + fun, + is_deleted, + generic_type_params, + data, + state_key_hash + ) + VALUES ( + staging.txn_version, + staging.write_set_change_index, + staging.block_height, + staging.block_timestamp, + staging.resource_address, + staging.resource_type, + staging.module, + staging.fun, + staging.is_deleted, + staging.generic_type_params, + staging.data, + staging.state_key_hash + ); + \ No newline at end of file diff --git a/rust/parquet-bq-scripts/transactions-create.sql b/rust/parquet-bq-scripts/transactions-create.sql new file mode 100644 index 000000000..bcf75efa1 --- /dev/null +++ b/rust/parquet-bq-scripts/transactions-create.sql @@ -0,0 +1,26 @@ +CREATE TABLE `{}` +( + txn_version INT64, + block_height INT64, + epoch INT64, + txn_type STRING, + payload STRING, + payload_type STRING, + gas_used INT64, + success BOOL, + vm_status STRING, + num_events INT64, + num_write_set_changes INT64, + txn_hash STRING, + state_change_hash STRING, + event_root_hash STRING, + state_checkpoint_hash STRING, + accumulator_root_hash STRING, + block_timestamp TIMESTAMP, + -- + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + PRIMARY KEY(txn_version) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp,DAY) +CLUSTER BY txn_version, txn_type, block_height, txn_hash +; diff --git a/rust/parquet-bq-scripts/transactions-merge.sql b/rust/parquet-bq-scripts/transactions-merge.sql new file mode 100644 index 000000000..fe5fd7cd2 --- /dev/null +++ b/rust/parquet-bq-scripts/transactions-merge.sql @@ -0,0 +1,57 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM + ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON main.txn_version = staging.txn_version -- primary key(s) +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + txn_version, + block_height, + epoch, + txn_type, + payload, + payload_type, + gas_used, + success, + vm_status, + num_events, + num_write_set_changes, + txn_hash, + state_change_hash, + event_root_hash, + state_checkpoint_hash, + accumulator_root_hash, + block_timestamp + ) + VALUES ( + staging.txn_version, + staging.block_height, + staging.epoch, + staging.txn_type, + staging.payload, + staging.payload_type, + staging.gas_used, + staging.success, + staging.vm_status, + staging.num_events, + staging.num_write_set_changes, + staging.txn_hash, + staging.state_change_hash, + staging.event_root_hash, + staging.state_checkpoint_hash, + staging.accumulator_root_hash, + staging.block_timestamp + ) +; diff --git a/rust/parquet-bq-scripts/write_set_changes_create.sql b/rust/parquet-bq-scripts/write_set_changes_create.sql new file mode 100644 index 000000000..8cd33f7e9 --- /dev/null +++ b/rust/parquet-bq-scripts/write_set_changes_create.sql @@ -0,0 +1,16 @@ +CREATE TABLE `{}` +( + txn_version INT64, + write_set_change_index INT64, + state_key_hash STRING, + change_type STRING, + resource_address STRING, + block_height INT64, + block_timestamp TIMESTAMP, + -- + bq_inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP(), + PRIMARY KEY(txn_version, write_set_change_index) NOT ENFORCED +) +PARTITION BY TIMESTAMP_TRUNC(block_timestamp, DAY) +CLUSTER BY txn_version, change_type, block_height, state_key_hash +; \ No newline at end of file diff --git a/rust/parquet-bq-scripts/write_set_changes_merge.sql b/rust/parquet-bq-scripts/write_set_changes_merge.sql new file mode 100644 index 000000000..025c38b5b --- /dev/null +++ b/rust/parquet-bq-scripts/write_set_changes_merge.sql @@ -0,0 +1,40 @@ +MERGE INTO `{}` AS main +USING ( + SELECT * + FROM + ( + SELECT + *, + ROW_NUMBER() OVER ( + PARTITION BY -- primary key(s) + txn_version, + write_set_change_index + ORDER BY inserted_at DESC + ) AS row_num + FROM `{}` + ) AS foo + WHERE foo.row_num = 1 +) AS staging + ON main.txn_version = staging.txn_version -- primary key(s) + AND main.write_set_change_index = staging.write_set_change_index +WHEN NOT MATCHED BY TARGET +THEN + INSERT ( + txn_version, + write_set_change_index, + state_key_hash, + change_type, + resource_address, + block_height, + block_timestamp + ) + VALUES ( + staging.txn_version, + staging.write_set_change_index, + staging.state_key_hash, + staging.change_type, + staging.resource_address, + staging.block_height, + stagin.block_timestamp + ) +; diff --git a/rust/processor/Cargo.lock b/rust/processor/Cargo.lock new file mode 100644 index 000000000..07ea9c44e --- /dev/null +++ b/rust/processor/Cargo.lock @@ -0,0 +1,2382 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys", +] + +[[package]] +name = "anyhow" +version = "1.0.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" + +[[package]] +name = "aptos-indexer-grpc-parser" +version = "1.0.0" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.13.1", + "bcs", + "bigdecimal", + "chrono", + "clap", + "diesel", + "diesel_migrations", + "field_count", + "futures", + "futures-util", + "gcloud-sdk", + "hex", + "once_cell", + "prost", + "prost-types", + "serde", + "serde_json", + "sha2", + "tokio", + "tonic 0.8.3", + "tracing", +] + +[[package]] +name = "async-compression" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" +dependencies = [ + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "async-trait" +version = "0.1.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "axum" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" + +[[package]] +name = "bcs" +version = "0.1.4" +source = "git+https://github.com/aptos-labs/bcs.git?rev=d31fab9d81748e2594be5cd5cdf845786a30562d#d31fab9d81748e2594be5cd5cdf845786a30562d" +dependencies = [ + "serde", + "thiserror", +] + +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" + +[[package]] +name = "cc" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "time 0.1.45", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "clap" +version = "4.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" +dependencies = [ + "clap_builder", + "clap_derive", + "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "clap_lex" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" + +[[package]] +name = "cpufeatures" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "diesel" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" +dependencies = [ + "bigdecimal", + "bitflags 2.3.3", + "byteorder", + "chrono", + "diesel_derives", + "itoa", + "num-bigint", + "num-integer", + "num-traits", + "pq-sys", + "r2d2", + "serde_json", +] + +[[package]] +name = "diesel_derives" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74398b79d81e52e130d991afeed9c86034bb1b7735f46d2f5bf7deb261d80303" +dependencies = [ + "diesel_table_macro_syntax", + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "diesel_migrations" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6036b3f0120c5961381b570ee20a02432d7e2d27ea60de9578799cf9156914ac" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn 2.0.25", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "either" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" + +[[package]] +name = "encoding_rs" +version = "0.8.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "field_count" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284d5f85dd574cf01094bca24aefa69a43539dbfc72b1326f038d540b2daadc7" +dependencies = [ + "field_count_derive", +] + +[[package]] +name = "field_count_derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1320970ff3b1c1cacc6a38e8cdb1aced955f29627697cd992c5ded82eb646a8" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "flate2" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" + +[[package]] +name = "futures-executor" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" + +[[package]] +name = "futures-macro" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "futures-sink" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" + +[[package]] +name = "futures-task" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" + +[[package]] +name = "futures-util" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "gcemeta" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47d460327b24cc34c86d53d60a90e9e6044817f7906ebd9baa5c3d0ee13e1ecf" +dependencies = [ + "bytes", + "hyper", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "gcloud-sdk" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46b7a2dbf38a2c5558ebc6b9053ca468155e95db4da6f5f376e5608dd26ee7" +dependencies = [ + "async-trait", + "chrono", + "futures", + "gcemeta", + "hyper", + "jsonwebtoken", + "once_cell", + "prost", + "prost-types", + "reqwest", + "secret-vault-value", + "serde", + "serde_json", + "tokio", + "tonic 0.9.2", + "tower", + "tower-layer", + "tower-util", + "tracing", + "url", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" + +[[package]] +name = "h2" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 1.9.3", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +dependencies = [ + "futures-util", + "http", + "hyper", + "rustls 0.21.5", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + +[[package]] +name = "ipnet" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" + +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi", + "rustix", + "windows-sys", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" + +[[package]] +name = "js-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "8.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +dependencies = [ + "base64 0.21.2", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "libc" +version = "0.2.147" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" + +[[package]] +name = "linux-raw-sys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" + +[[package]] +name = "lock_api" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" + +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "migrations_internals" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce3325ac70e67bbab5bd837a31cae01f1a6db64e0e744a33cb03a543469ef08" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "percent-encoding" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" + +[[package]] +name = "pin-project" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" +dependencies = [ + "pin-project-internal 0.4.30", +] + +[[package]] +name = "pin-project" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +dependencies = [ + "pin-project-internal 1.1.2", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "pq-sys" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +dependencies = [ + "vcpkg", +] + +[[package]] +name = "proc-macro2" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +dependencies = [ + "prost", +] + +[[package]] +name = "quote" +version = "1.0.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "reqwest" +version = "0.11.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +dependencies = [ + "async-compression", + "base64 0.21.2", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls", + "ipnet", + "js-sys", + "log", + "mime", + "mime_guess", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls 0.21.5", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-rustls 0.24.1", + "tokio-util", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +dependencies = [ + "bitflags 2.3.3", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustls" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +dependencies = [ + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +dependencies = [ + "base64 0.21.2", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" + +[[package]] +name = "ryu" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "secret-vault-value" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddaf2631e82016a3262ce75575ec245ceef9a7115ddf8576851302efe6bdece" +dependencies = [ + "prost", + "prost-types", + "serde", + "serde_json", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "serde_json" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5062a995d481b2308b6064e9af76011f2921c35f97b0468811ed9f6cd91dfed" +dependencies = [ + "indexmap 2.0.0", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer", + "cfg-if", + "cpufeatures", + "digest", + "opaque-debug", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time 0.3.23", +] + +[[package]] +name = "slab" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" + +[[package]] +name = "socket2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "thiserror" +version = "1.0.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "time" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" +dependencies = [ + "itoa", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" + +[[package]] +name = "time-macros" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" +dependencies = [ + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +dependencies = [ + "autocfg", + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.8", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.5", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tonic" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.13.1", + "bytes", + "flate2", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project 1.1.2", + "prost", + "prost-derive", + "rustls-native-certs", + "rustls-pemfile", + "tokio", + "tokio-rustls 0.23.4", + "tokio-stream", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", +] + +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.2", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project 1.1.2", + "prost", + "rustls-native-certs", + "rustls-pemfile", + "tokio", + "tokio-rustls 0.24.1", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project 1.1.2", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tower-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" +dependencies = [ + "futures-core", + "futures-util", + "pin-project 0.4.30", + "tower-service", +] + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] + +[[package]] +name = "tracing-core" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +dependencies = [ + "once_cell", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.1.2", + "tracing", +] + +[[package]] +name = "try-lock" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-ident" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.25", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" + +[[package]] +name = "wasm-streams" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +dependencies = [ + "webpki", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a2094c43cc94775293eaa0e499fbc30048a6d824ac82c0351a8c0bf9112529" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + +[[package]] +name = "zeroize" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] diff --git a/rust/processor/Cargo.toml b/rust/processor/Cargo.toml new file mode 100644 index 000000000..e944d5a86 --- /dev/null +++ b/rust/processor/Cargo.toml @@ -0,0 +1,82 @@ +[package] +name = "processor" +description = "Indexer GRPC processor in Rust." +version = "1.0.0" + +# Workspace inherited keys +authors = ["Aptos Labs "] +edition = "2021" +homepage = "https://aptoslabs.com" +license = "Apache-2.0" +publish = false +repository = "https://github.com/aptos-labs/aptos-core" +rust-version = { workspace = true } + +[dependencies] +ahash = { workspace = true } +anyhow = { workspace = true } +aptos-moving-average = { workspace = true } +aptos-protos = { workspace = true } +async-trait = { workspace = true } +bcs = { workspace = true } +bigdecimal = { workspace = true } +bitflags = { workspace = true } +chrono = { workspace = true } +clap = { workspace = true } +diesel = { workspace = true } +diesel-async = { workspace = true } +diesel_migrations = { workspace = true } +enum_dispatch = { workspace = true } +field_count = { workspace = true } +futures = { workspace = true } +futures-util = { workspace = true } +google-cloud-googleapis = { workspace = true } +google-cloud-pubsub = { workspace = true } +hex = { workspace = true } +itertools = { workspace = true } +kanal = { workspace = true } +lazy_static = { workspace = true } +num_cpus = { workspace = true } +once_cell = { workspace = true } +prometheus = { workspace = true } +prost = { workspace = true } +regex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +server-framework = { workspace = true } +sha2 = { workspace = true } +sha3 = { workspace = true } +strum = { workspace = true } +tokio = { workspace = true } +tonic = { workspace = true } +tracing = { workspace = true } +unescape = { workspace = true } +url = { workspace = true } + +# Postgres SSL support +native-tls = { workspace = true } +postgres-native-tls = { workspace = true } +tiny-keccak = { workspace = true } +tokio-postgres = { workspace = true } + +[target.'cfg(unix)'.dependencies] +jemallocator = { workspace = true } + +# Parquet support +parquet = { workspace = true } +arrow = { workspace = true } +num = { workspace = true } +google-cloud-storage = { workspace = true } +hyper = { workspace = true } +lazy_static = { workspace = true } +parquet_derive = { workspace = true } +canonical_json = { workspace = true } +allocative = { workspace = true } +allocative_derive = { workspace = true } +uuid = { workspace = true } +[features] +libpq = ["diesel/postgres"] +# When using the default features we enable the diesel/postgres feature. We configure +# it in a feature so the CLI can opt out, since it cannot tolerate the libpq dep. +# Recall that features should always be additive. +default = ["libpq"] diff --git a/rust/processor/README.md b/rust/processor/README.md new file mode 100644 index 000000000..ad6338be6 --- /dev/null +++ b/rust/processor/README.md @@ -0,0 +1,76 @@ +# Indexer GRPC Parser + +Indexer GRPC parser is to indexer data processor that leverages the indexer grpc data. + +- **Note: We'll launch an official endpoint soon; stay tuned!** + +## Tutorial + +### Prerequisite + +- A running PostgreSQL instance, with a valid database. More tutorial can be + found [here](https://github.com/aptos-labs/aptos-core/tree/main/crates/indexer#postgres) + + - A config YAML file + - For example, `parser.yaml` + + ```yaml + health_check_port: 8084 + server_config: + processor_config: + type: default_processor + postgres_connection_string: postgresql://postgres:@localhost:5432/postgres_v2 + indexer_grpc_data_service_address: 127.0.0.1:50051 + indexer_grpc_http2_ping_interval_in_secs: 60 + indexer_grpc_http2_ping_timeout_in_secs: 10 + number_concurrent_processing_tasks: 10 + auth_token: AUTH_TOKEN + starting_version: 0 # optional + ending_version: 0 # optional + transaction_filter: + # Only allow transactions from these contract addresses + # focus_contract_addresses: + # - "0x0" + # Skip transactions from these sender addresses + skip_sender_addresses: + - "0x07" + # Skip all transactions that aren't user transactions + focus_user_transactions: false + deprecated_tables: [ + "MOVE_RESOURCES", + "WRITE_SET_CHANGES", + "TRANSACTIONS", + ] + ``` + +#### Config Explanation + +- `type` in `processor_config`: purpose of this processor; also used for monitoring purpose. +- `postgres_connection_string`: PostgresQL DB connection string +- `indexer_grpc_data_service_address`: Data service non-TLS endpoint address. +- `indexer_grpc_http2_ping_interval_in_secs`: client-side grpc HTTP2 ping interval. +- `indexer_grpc_http2_ping_timeout_in_secs`: client-side grpc HTTP2 ping timeout. +- `auth_token`: Auth token used for connection. +- `starting_version`: start processor at starting_version. +- `ending_version`: stop processor after ending_version. +- `number_concurrent_processing_tasks`: number of tasks to parse and insert; 1 means sequential processing, otherwise, +- `deprecated_tables`: a list of tables to skip writing to alloyDB. + transactions are splitted into tasks and inserted with random order. + +### Use docker image for existing parsers(Only for **Unix/Linux**) + +- Use the provided `Dockerfile` and `config.yaml`(update accordingly) + - Build: `cd ecosystem/indexer-grpc/indexer-grpc-parser && docker build . -t indexer-processor` + - Run: `docker run indexer-processor:latest` + +### Use source code for existing parsers + +- Use the provided `Dockerfile` and `config.yaml`(update accordingly) +- Run `cd rust/processor && cargo run --release -- -c config.yaml` + +### Use a custom parser + +- Check our [indexer processors](https://github.com/aptos-labs/aptos-indexer-processors)! + +### Manually running diesel-cli +- `cd` into the database folder you use under `src/db/` (e.g. `src/db/postgres`), then run it. diff --git a/rust/processor/parser.yaml b/rust/processor/parser.yaml new file mode 100644 index 000000000..63fef1540 --- /dev/null +++ b/rust/processor/parser.yaml @@ -0,0 +1,26 @@ +# This is a template yaml for the indexer processor. It assumes you have a data service +# running locally, for example as part of local testnet you ran with this command: +# aptos node run-local-testnet + health_check_port: 8084 + server_config: + processor_config: + type: default_parquet_processor + bucket_name: "aptos-indexer-data-etl-yuun" + parquet_handler_response_channel_size: 100 + postgres_connection_string: postgresql://postgres:@localhost:5432/postgres + # indexer_grpc_data_service_address: 127.0.0.1:50057 + indexer_grpc_data_service_address: "https://grpc.testnet.aptoslabs.com:443" + indexer_grpc_http2_ping_interval_in_secs: 60 + indexer_grpc_http2_ping_timeout_in_secs: 10 + grpc_response_item_timeout_in_secs: 300 + number_concurrent_processing_tasks: 10 + pb_channel_txn_chunk_size: 1000000 + auth_token: aptoslabs_L7pzPsqiShp_EwzUCbhb6ehTe8PSvFp6ZeUr3B3gBdGaX + starting_version: 2000000 + is_parquet_processor: true +# deprecated_tables: [ +## "MOVE_RESOURCES", +## "WRITE_SET_CHANGES", +## "TRANSACTIONS" +# ] + diff --git a/rust/processor/src/config.rs b/rust/processor/src/config.rs new file mode 100644 index 000000000..93d209e19 --- /dev/null +++ b/rust/processor/src/config.rs @@ -0,0 +1,167 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + gap_detectors::DEFAULT_GAP_DETECTION_BATCH_SIZE, processors::ProcessorConfig, + transaction_filter::TransactionFilter, worker::Worker, +}; +use ahash::AHashMap; +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use server_framework::RunnableConfig; +use std::{collections::HashSet, time::Duration}; +use url::Url; + +pub const QUERY_DEFAULT_RETRIES: u32 = 5; +pub const QUERY_DEFAULT_RETRY_DELAY_MS: u64 = 500; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcProcessorConfig { + pub processor_config: ProcessorConfig, + pub postgres_connection_string: String, + // TODO: Add TLS support. + pub indexer_grpc_data_service_address: Url, + #[serde(flatten)] + pub grpc_http2_config: IndexerGrpcHttp2Config, + pub auth_token: String, + // Version to start indexing from + pub starting_version: Option, + // Version to end indexing at + pub ending_version: Option, + // Number of tasks waiting to pull transaction batches from the channel and process them + pub number_concurrent_processing_tasks: Option, + // Size of the pool for writes/reads to the DB. Limits maximum number of queries in flight + pub db_pool_size: Option, + // Maximum number of batches "missing" before we assume we have an issue with gaps and abort + #[serde(default = "IndexerGrpcProcessorConfig::default_gap_detection_batch_size")] + pub gap_detection_batch_size: u64, + // Maximum number of batches "missing" before we assume we have an issue with gaps and abort + #[serde(default = "IndexerGrpcProcessorConfig::default_gap_detection_batch_size")] + pub parquet_gap_detection_batch_size: u64, + // Number of protobuff transactions to send per chunk to the processor tasks + #[serde(default = "IndexerGrpcProcessorConfig::default_pb_channel_txn_chunk_size")] + pub pb_channel_txn_chunk_size: usize, + // Number of rows to insert, per chunk, for each DB table. Default per table is ~32,768 (2**16/2) + #[serde(default = "AHashMap::new")] + pub per_table_chunk_sizes: AHashMap, + pub enable_verbose_logging: Option, + + #[serde(default = "IndexerGrpcProcessorConfig::default_grpc_response_item_timeout_in_secs")] + pub grpc_response_item_timeout_in_secs: u64, + + #[serde(default)] + pub transaction_filter: TransactionFilter, + // String vector for deprecated tables to skip db writes + #[serde(default)] + pub deprecated_tables: HashSet, + + pub parquet_bucket_name: Option, + + pub is_parquet_processor: Option, +} + +impl IndexerGrpcProcessorConfig { + pub const fn default_gap_detection_batch_size() -> u64 { + DEFAULT_GAP_DETECTION_BATCH_SIZE + } + + pub const fn default_query_retries() -> u32 { + QUERY_DEFAULT_RETRIES + } + + pub const fn default_query_retry_delay_ms() -> u64 { + QUERY_DEFAULT_RETRY_DELAY_MS + } + + /// Make the default very large on purpose so that by default it's not chunked + /// This prevents any unexpected changes in behavior + pub const fn default_pb_channel_txn_chunk_size() -> usize { + 100_000 + } + + /// Default timeout for grpc response item in seconds. Defaults to 60 seconds. + pub const fn default_grpc_response_item_timeout_in_secs() -> u64 { + 60 + } +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcProcessorConfig { + async fn run(&self) -> Result<()> { + let mut worker = Worker::new( + self.processor_config.clone(), + self.postgres_connection_string.clone(), + self.indexer_grpc_data_service_address.clone(), + self.grpc_http2_config.clone(), + self.auth_token.clone(), + self.starting_version, + self.ending_version, + self.number_concurrent_processing_tasks, + self.db_pool_size, + self.gap_detection_batch_size, + self.parquet_gap_detection_batch_size, + self.pb_channel_txn_chunk_size, + self.per_table_chunk_sizes.clone(), + self.enable_verbose_logging, + self.transaction_filter.clone(), + self.grpc_response_item_timeout_in_secs, + self.deprecated_tables.clone(), + self.is_parquet_processor, + ) + .await + .context("Failed to build worker")?; + worker.run().await; + Ok(()) + } + + fn get_server_name(&self) -> String { + // Get the part before the first _ and trim to 12 characters. + let before_underscore = self + .processor_config + .name() + .split('_') + .next() + .unwrap_or("unknown"); + before_underscore[..before_underscore.len().min(12)].to_string() + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub struct IndexerGrpcHttp2Config { + /// Indexer GRPC http2 ping interval in seconds. Defaults to 30. + /// Tonic ref: https://docs.rs/tonic/latest/tonic/transport/channel/struct.Endpoint.html#method.http2_keep_alive_interval + indexer_grpc_http2_ping_interval_in_secs: u64, + + /// Indexer GRPC http2 ping timeout in seconds. Defaults to 10. + indexer_grpc_http2_ping_timeout_in_secs: u64, + + /// Seconds before timeout for grpc connection. + indexer_grpc_connection_timeout_secs: u64, +} + +impl IndexerGrpcHttp2Config { + pub fn grpc_http2_ping_interval_in_secs(&self) -> Duration { + Duration::from_secs(self.indexer_grpc_http2_ping_interval_in_secs) + } + + pub fn grpc_http2_ping_timeout_in_secs(&self) -> Duration { + Duration::from_secs(self.indexer_grpc_http2_ping_timeout_in_secs) + } + + pub fn grpc_connection_timeout_secs(&self) -> Duration { + Duration::from_secs(self.indexer_grpc_connection_timeout_secs) + } +} + +impl Default for IndexerGrpcHttp2Config { + fn default() -> Self { + Self { + indexer_grpc_http2_ping_interval_in_secs: 30, + indexer_grpc_http2_ping_timeout_in_secs: 10, + indexer_grpc_connection_timeout_secs: 5, + } + } +} diff --git a/rust/processor/src/db/common/mod.rs b/rust/processor/src/db/common/mod.rs new file mode 100644 index 000000000..c446ac883 --- /dev/null +++ b/rust/processor/src/db/common/mod.rs @@ -0,0 +1 @@ +pub mod models; diff --git a/rust/processor/src/db/common/models/account_transaction_models/account_transactions.rs b/rust/processor/src/db/common/models/account_transaction_models/account_transactions.rs new file mode 100644 index 000000000..fcf0c4666 --- /dev/null +++ b/rust/processor/src/db/common/models/account_transaction_models/account_transactions.rs @@ -0,0 +1,153 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{ + db::common::models::{ + object_models::v2_object_utils::ObjectWithMetadata, + user_transactions_models::user_transactions::UserTransaction, + }, + schema::account_transactions, + utils::{counters::PROCESSOR_UNKNOWN_TYPE_COUNT, util::standardize_address}, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{ + transaction::TxnData, write_set_change::Change, DeleteResource, Event, Transaction, + WriteResource, +}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +pub type AccountTransactionPK = (String, i64); + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(account_address, transaction_version))] +#[diesel(table_name = account_transactions)] +pub struct AccountTransaction { + pub transaction_version: i64, + pub account_address: String, +} + +impl AccountTransaction { + /// This table will record every transaction that touch an account which could be + /// a user account, an object, or a resource account. + /// We will consider all transactions that modify a resource or event associated with a particular account. + /// We will do 1 level of redirection for now (e.g. if it's an object, we will record the owner as account address). + /// We will also consider transactions that the account signed or is part of a multi sig / multi agent. + /// TODO: recursively find the parent account of an object + /// TODO: include table items in the detection path + pub fn from_transaction(transaction: &Transaction) -> AHashMap { + let txn_version = transaction.version as i64; + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["AccountTransaction"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return AHashMap::new(); + }, + }; + let transaction_info = transaction.info.as_ref().unwrap_or_else(|| { + panic!("Transaction info doesn't exist for version {}", txn_version) + }); + let wscs = &transaction_info.changes; + let (events, signatures) = match txn_data { + TxnData::User(inner) => ( + &inner.events, + UserTransaction::get_signatures( + inner.request.as_ref().unwrap_or_else(|| { + panic!("User request doesn't exist for version {}", txn_version) + }), + txn_version, + transaction.block_height as i64, + ), + ), + TxnData::Genesis(inner) => (&inner.events, vec![]), + TxnData::BlockMetadata(inner) => (&inner.events, vec![]), + _ => { + return AHashMap::new(); + }, + }; + let mut account_transactions = AHashMap::new(); + for sig in &signatures { + account_transactions.insert((sig.signer.clone(), txn_version), Self { + transaction_version: txn_version, + account_address: sig.signer.clone(), + }); + } + for event in events { + account_transactions.extend(Self::from_event(event, txn_version)); + } + for wsc in wscs { + match wsc.change.as_ref().unwrap() { + Change::DeleteResource(res) => { + account_transactions + .extend(Self::from_delete_resource(res, txn_version).unwrap()); + }, + Change::WriteResource(res) => { + account_transactions + .extend(Self::from_write_resource(res, txn_version).unwrap()); + }, + _ => {}, + } + } + account_transactions + } + + /// Base case, record event account address. We don't really have to worry about + /// objects here because it'll be taken care of in the resource section + fn from_event(event: &Event, txn_version: i64) -> AHashMap { + let account_address = + standardize_address(event.key.as_ref().unwrap().account_address.as_str()); + AHashMap::from([((account_address.clone(), txn_version), Self { + transaction_version: txn_version, + account_address, + })]) + } + + /// Base case, record resource account. If the resource is an object, then we record the owner as well + /// This handles partial deletes as well + fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let mut result = AHashMap::new(); + let account_address = standardize_address(write_resource.address.as_str()); + result.insert((account_address.clone(), txn_version), Self { + transaction_version: txn_version, + account_address, + }); + if let Some(inner) = &ObjectWithMetadata::from_write_resource(write_resource, txn_version)? + { + result.insert((inner.object_core.get_owner_address(), txn_version), Self { + transaction_version: txn_version, + account_address: inner.object_core.get_owner_address(), + }); + } + Ok(result) + } + + /// Base case, record resource account. + /// TODO: If the resource is an object, then we need to look for the latest owner. This isn't really possible + /// right now given we have parallel threads so it'll be very difficult to ensure that we have the correct + /// latest owner + fn from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + ) -> anyhow::Result> { + let mut result = AHashMap::new(); + let account_address = standardize_address(delete_resource.address.as_str()); + result.insert((account_address.clone(), txn_version), Self { + transaction_version: txn_version, + account_address, + }); + Ok(result) + } +} diff --git a/rust/processor/src/db/common/models/account_transaction_models/mod.rs b/rust/processor/src/db/common/models/account_transaction_models/mod.rs new file mode 100644 index 000000000..450747921 --- /dev/null +++ b/rust/processor/src/db/common/models/account_transaction_models/mod.rs @@ -0,0 +1,4 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod account_transactions; diff --git a/rust/processor/src/db/common/models/ans_models/ans_lookup.rs b/rust/processor/src/db/common/models/ans_models/ans_lookup.rs new file mode 100644 index 000000000..dc7a52cbb --- /dev/null +++ b/rust/processor/src/db/common/models/ans_models/ans_lookup.rs @@ -0,0 +1,324 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::ans_utils::AnsTableItem; +use crate::{ + schema::{ans_lookup, ans_primary_name, current_ans_lookup, current_ans_primary_name}, + utils::util::{get_name_from_unnested_move_type, standardize_address}, +}; +use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; +use diesel::prelude::*; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +type Domain = String; +type Subdomain = String; +// PK of current_ans_lookup, i.e. domain and subdomain name +type CurrentAnsLookupPK = (Domain, Subdomain); +// PK of current_ans_primary_name, i.e. registered_address +type CurrentAnsPrimaryNamePK = String; + +#[derive( + Clone, + Default, + Debug, + Deserialize, + FieldCount, + Identifiable, + Insertable, + Serialize, + PartialEq, + Eq, +)] +#[diesel(primary_key(domain, subdomain))] +#[diesel(table_name = current_ans_lookup)] +#[diesel(treat_none_as_null = true)] +pub struct CurrentAnsLookup { + pub domain: String, + pub subdomain: String, + pub registered_address: Option, + pub last_transaction_version: i64, + pub expiration_timestamp: chrono::NaiveDateTime, + pub token_name: String, + pub is_deleted: bool, +} + +#[derive(Clone, Default, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = ans_lookup)] +#[diesel(treat_none_as_null = true)] +pub struct AnsLookup { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub domain: String, + pub subdomain: String, + pub registered_address: Option, + pub expiration_timestamp: chrono::NaiveDateTime, + pub token_name: String, + pub is_deleted: bool, +} + +#[derive( + Clone, + Default, + Debug, + Deserialize, + FieldCount, + Identifiable, + Insertable, + Serialize, + PartialEq, + Eq, +)] +#[diesel(primary_key(registered_address, token_name))] +#[diesel(table_name = current_ans_primary_name)] +#[diesel(treat_none_as_null = true)] +pub struct CurrentAnsPrimaryName { + pub registered_address: String, + pub domain: Option, + pub subdomain: Option, + pub token_name: Option, + pub is_deleted: bool, + pub last_transaction_version: i64, +} + +#[derive(Clone, Default, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index, domain, subdomain))] +#[diesel(table_name = ans_primary_name)] +#[diesel(treat_none_as_null = true)] +pub struct AnsPrimaryName { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub registered_address: String, + pub domain: Option, + pub subdomain: Option, + pub token_name: Option, + pub is_deleted: bool, +} + +impl Ord for CurrentAnsLookup { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.domain + .cmp(&other.domain) + .then(self.subdomain.cmp(&other.subdomain)) + } +} + +impl PartialOrd for CurrentAnsLookup { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentAnsLookup { + pub fn pk(&self) -> CurrentAnsLookupPK { + (self.domain.clone(), self.subdomain.clone()) + } + + // Parse name record from write table item. + // The table key has the domain and subdomain. + // The table value data has the metadata (expiration, property version, target address). + pub fn parse_name_record_from_write_table_item_v1( + write_table_item: &WriteTableItem, + ans_v1_name_records_table_handle: &str, + txn_version: i64, + write_set_change_index: i64, + ) -> anyhow::Result> { + let table_handle = standardize_address(&write_table_item.handle.to_string()); + if table_handle == standardize_address(ans_v1_name_records_table_handle) { + if let Some(data) = write_table_item.data.as_ref() { + // Get the name only, e.g. 0x1::domain::Name. This will return Name + let key_type_name = get_name_from_unnested_move_type(data.key_type.as_ref()); + + if let Some(AnsTableItem::NameRecordKeyV1(name_record_key)) = + &AnsTableItem::from_table_item(key_type_name, &data.key, txn_version)? + { + let value_type_name: &str = + get_name_from_unnested_move_type(data.value_type.as_ref()); + if let Some(AnsTableItem::NameRecordV1(name_record)) = + &AnsTableItem::from_table_item(value_type_name, &data.value, txn_version)? + { + return Ok(Some(( + Self { + domain: name_record_key.get_domain_trunc(), + subdomain: name_record_key.get_subdomain_trunc(), + registered_address: name_record.get_target_address(), + expiration_timestamp: name_record.get_expiration_time(), + token_name: name_record_key.get_token_name(), + last_transaction_version: txn_version, + is_deleted: false, + }, + AnsLookup { + transaction_version: txn_version, + write_set_change_index, + domain: name_record_key.get_domain_trunc(), + subdomain: name_record_key.get_subdomain_trunc(), + registered_address: name_record.get_target_address(), + expiration_timestamp: name_record.get_expiration_time(), + token_name: name_record_key.get_token_name(), + is_deleted: false, + }, + ))); + } + } + } + } + Ok(None) + } + + // Parse name record from delete table item. + // This change results in marking the domain name record as deleted and setting + // the rest of the fields to default values. + pub fn parse_name_record_from_delete_table_item_v1( + delete_table_item: &DeleteTableItem, + ans_v1_name_records_table_handle: &str, + txn_version: i64, + write_set_change_index: i64, + ) -> anyhow::Result> { + let table_handle = standardize_address(&delete_table_item.handle.to_string()); + if table_handle == standardize_address(ans_v1_name_records_table_handle) { + if let Some(data) = delete_table_item.data.as_ref() { + let key_type_name = get_name_from_unnested_move_type(data.key_type.as_ref()); + + if let Some(AnsTableItem::NameRecordKeyV1(name_record_key)) = + &AnsTableItem::from_table_item(key_type_name, &data.key, txn_version)? + { + return Ok(Some(( + Self { + domain: name_record_key.get_domain_trunc(), + subdomain: name_record_key.get_subdomain_trunc(), + registered_address: None, + expiration_timestamp: chrono::NaiveDateTime::default(), + token_name: name_record_key.get_token_name(), + last_transaction_version: txn_version, + is_deleted: true, + }, + AnsLookup { + transaction_version: txn_version, + write_set_change_index, + domain: name_record_key.get_domain_trunc(), + subdomain: name_record_key.get_subdomain_trunc(), + registered_address: None, + expiration_timestamp: chrono::NaiveDateTime::default(), + token_name: name_record_key.get_token_name(), + is_deleted: true, + }, + ))); + } + } + } + Ok(None) + } +} + +impl Ord for CurrentAnsPrimaryName { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.registered_address.cmp(&other.registered_address) + } +} + +impl PartialOrd for CurrentAnsPrimaryName { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentAnsPrimaryName { + pub fn pk(&self) -> CurrentAnsPrimaryNamePK { + self.registered_address.clone() + } + + // Parse the primary name reverse record from write table item. + // The table key is the target address the primary name points to. + // The table value data has the domain and subdomain of the primary name. + pub fn parse_primary_name_record_from_write_table_item_v1( + write_table_item: &WriteTableItem, + ans_v1_primary_names_table_handle: &str, + txn_version: i64, + write_set_change_index: i64, + ) -> anyhow::Result> { + let table_handle = standardize_address(&write_table_item.handle.to_string()); + if table_handle == standardize_address(ans_v1_primary_names_table_handle) { + if let Some(data) = write_table_item.data.as_ref() { + // Return early if key is not address type. This should not be possible but just a precaution + // in case we input the wrong table handle + if data.key_type != "address" { + return Ok(None); + } + let decoded_key: String = serde_json::from_str(data.key.as_str()).unwrap(); + let registered_address = standardize_address(decoded_key.as_str()); + let value_type_name = get_name_from_unnested_move_type(data.value_type.as_ref()); + if let Some(AnsTableItem::NameRecordKeyV1(name_record_key)) = + &AnsTableItem::from_table_item(value_type_name, &data.value, txn_version)? + { + return Ok(Some(( + Self { + registered_address: registered_address.clone(), + domain: Some(name_record_key.get_domain_trunc()), + subdomain: Some(name_record_key.get_subdomain_trunc()), + token_name: Some(name_record_key.get_token_name()), + last_transaction_version: txn_version, + is_deleted: false, + }, + AnsPrimaryName { + transaction_version: txn_version, + write_set_change_index, + registered_address, + domain: Some(name_record_key.get_domain_trunc()), + subdomain: Some(name_record_key.get_subdomain_trunc()), + token_name: Some(name_record_key.get_token_name()), + is_deleted: false, + }, + ))); + } + } + } + Ok(None) + } + + // Parse primary name from delete table item + // We need to lookup which domain the address points to so we can mark it as non-primary. + pub fn parse_primary_name_record_from_delete_table_item_v1( + delete_table_item: &DeleteTableItem, + ans_v1_primary_names_table_handle: &str, + txn_version: i64, + write_set_change_index: i64, + ) -> anyhow::Result> { + let table_handle = standardize_address(&delete_table_item.handle.to_string()); + if table_handle == standardize_address(ans_v1_primary_names_table_handle) { + if let Some(data) = delete_table_item.data.as_ref() { + // Return early if key is not address type. This should not be possible but just a precaution + // in case we input the wrong table handle + if data.key_type != "address" { + return Ok(None); + } + let decoded_key: String = serde_json::from_str(data.key.as_str()).unwrap(); + let registered_address = standardize_address(decoded_key.as_str()); + return Ok(Some(( + Self { + registered_address: registered_address.clone(), + domain: None, + subdomain: None, + token_name: None, + last_transaction_version: txn_version, + is_deleted: true, + }, + AnsPrimaryName { + transaction_version: txn_version, + write_set_change_index, + registered_address, + domain: None, + subdomain: None, + token_name: None, + is_deleted: true, + }, + ))); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/ans_models/ans_lookup_v2.rs b/rust/processor/src/db/common/models/ans_models/ans_lookup_v2.rs new file mode 100644 index 000000000..7b68d7891 --- /dev/null +++ b/rust/processor/src/db/common/models/ans_models/ans_lookup_v2.rs @@ -0,0 +1,330 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + ans_lookup::{AnsLookup, AnsPrimaryName, CurrentAnsLookup, CurrentAnsPrimaryName}, + ans_utils::{get_token_name, NameRecordV2, SetReverseLookupEvent, SubdomainExtV2}, +}; +use crate::{ + db::common::models::token_v2_models::v2_token_utils::TokenStandard, + schema::{ + ans_lookup_v2, ans_primary_name_v2, current_ans_lookup_v2, current_ans_primary_name_v2, + }, + utils::util::standardize_address, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{Event, WriteResource}; +use diesel::prelude::*; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +type Domain = String; +type Subdomain = String; +type TokenStandardType = String; +type RegisteredAddress = String; +// PK of current_ans_lookup_v2 +type CurrentAnsLookupV2PK = (Domain, Subdomain, TokenStandardType); +// PK of current_ans_primary_name +type CurrentAnsPrimaryNameV2PK = (RegisteredAddress, TokenStandardType); + +#[derive( + Clone, + Default, + Debug, + Deserialize, + FieldCount, + Identifiable, + Insertable, + Serialize, + PartialEq, + Eq, +)] +#[diesel(primary_key(domain, subdomain, token_standard))] +#[diesel(table_name = current_ans_lookup_v2)] +#[diesel(treat_none_as_null = true)] +pub struct CurrentAnsLookupV2 { + pub domain: String, + pub subdomain: String, + pub token_standard: String, + pub registered_address: Option, + pub last_transaction_version: i64, + pub expiration_timestamp: chrono::NaiveDateTime, + pub token_name: String, + pub is_deleted: bool, + pub subdomain_expiration_policy: Option, +} + +#[derive(Clone, Default, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = ans_lookup_v2)] +#[diesel(treat_none_as_null = true)] +pub struct AnsLookupV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub domain: String, + pub subdomain: String, + pub token_standard: String, + pub registered_address: Option, + pub expiration_timestamp: chrono::NaiveDateTime, + pub token_name: String, + pub is_deleted: bool, + pub subdomain_expiration_policy: Option, +} + +#[derive( + Clone, + Default, + Debug, + Deserialize, + FieldCount, + Identifiable, + Insertable, + Serialize, + PartialEq, + Eq, +)] +#[diesel(primary_key(registered_address, token_standard))] +#[diesel(table_name = current_ans_primary_name_v2)] +#[diesel(treat_none_as_null = true)] +pub struct CurrentAnsPrimaryNameV2 { + pub registered_address: String, + pub token_standard: String, + pub domain: Option, + pub subdomain: Option, + pub token_name: Option, + pub is_deleted: bool, + pub last_transaction_version: i64, +} + +#[derive(Clone, Default, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = ans_primary_name_v2)] +#[diesel(treat_none_as_null = true)] +pub struct AnsPrimaryNameV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub registered_address: String, + pub token_standard: String, + pub domain: Option, + pub subdomain: Option, + pub token_name: Option, + pub is_deleted: bool, +} + +impl Ord for CurrentAnsLookupV2 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.domain + .cmp(&other.domain) + .then(self.subdomain.cmp(&other.subdomain)) + } +} + +impl PartialOrd for CurrentAnsLookupV2 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentAnsLookupV2 { + pub fn pk(&self) -> CurrentAnsLookupV2PK { + ( + self.domain.clone(), + self.subdomain.clone(), + self.token_standard.clone(), + ) + } + + pub fn get_v2_from_v1( + v1_current_ans_lookup: CurrentAnsLookup, + v1_ans_lookup: AnsLookup, + ) -> (Self, AnsLookupV2) { + ( + Self { + domain: v1_current_ans_lookup.domain, + subdomain: v1_current_ans_lookup.subdomain, + token_standard: TokenStandard::V1.to_string(), + registered_address: v1_current_ans_lookup.registered_address, + last_transaction_version: v1_current_ans_lookup.last_transaction_version, + expiration_timestamp: v1_current_ans_lookup.expiration_timestamp, + token_name: v1_current_ans_lookup.token_name, + is_deleted: v1_current_ans_lookup.is_deleted, + subdomain_expiration_policy: None, + }, + AnsLookupV2 { + transaction_version: v1_ans_lookup.transaction_version, + write_set_change_index: v1_ans_lookup.write_set_change_index, + domain: v1_ans_lookup.domain, + subdomain: v1_ans_lookup.subdomain, + token_standard: TokenStandard::V1.to_string(), + registered_address: v1_ans_lookup.registered_address, + expiration_timestamp: v1_ans_lookup.expiration_timestamp, + token_name: v1_ans_lookup.token_name, + is_deleted: v1_ans_lookup.is_deleted, + subdomain_expiration_policy: None, + }, + ) + } + + pub fn parse_name_record_from_write_resource_v2( + write_resource: &WriteResource, + ans_v2_contract_address: &str, + txn_version: i64, + write_set_change_index: i64, + address_to_subdomain_ext: &AHashMap, + ) -> anyhow::Result> { + if let Some(inner) = + NameRecordV2::from_write_resource(write_resource, ans_v2_contract_address, txn_version) + .unwrap() + { + // If this resource account has a SubdomainExt, then it's a subdomain + let (subdomain_name, subdomain_expiration_policy) = match address_to_subdomain_ext + .get(&standardize_address(write_resource.address.as_str())) + { + Some(s) => (s.get_subdomain_trunc(), Some(s.subdomain_expiration_policy)), + None => ("".to_string(), None), + }; + + let token_name = get_token_name( + inner.get_domain_trunc().as_str(), + subdomain_name.clone().as_str(), + ); + + return Ok(Some(( + Self { + domain: inner.get_domain_trunc(), + subdomain: subdomain_name.clone().to_string(), + token_standard: TokenStandard::V2.to_string(), + registered_address: inner.get_target_address(), + expiration_timestamp: inner.get_expiration_time(), + token_name: token_name.clone(), + last_transaction_version: txn_version, + is_deleted: false, + subdomain_expiration_policy, + }, + AnsLookupV2 { + transaction_version: txn_version, + write_set_change_index, + domain: inner.get_domain_trunc().clone(), + subdomain: subdomain_name.clone().to_string(), + token_standard: TokenStandard::V2.to_string(), + registered_address: inner.get_target_address().clone(), + expiration_timestamp: inner.get_expiration_time(), + token_name, + is_deleted: false, + subdomain_expiration_policy, + }, + ))); + } + Ok(None) + } +} + +impl Ord for CurrentAnsPrimaryNameV2 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.registered_address.cmp(&other.registered_address) + } +} + +impl PartialOrd for CurrentAnsPrimaryNameV2 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentAnsPrimaryNameV2 { + pub fn pk(&self) -> CurrentAnsPrimaryNameV2PK { + (self.registered_address.clone(), self.token_standard.clone()) + } + + pub fn get_v2_from_v1( + v1_current_primary_name: CurrentAnsPrimaryName, + v1_primary_name: AnsPrimaryName, + ) -> (Self, AnsPrimaryNameV2) { + ( + Self { + registered_address: v1_current_primary_name.registered_address, + token_standard: TokenStandard::V1.to_string(), + domain: v1_current_primary_name.domain, + subdomain: v1_current_primary_name.subdomain, + token_name: v1_current_primary_name.token_name, + is_deleted: v1_current_primary_name.is_deleted, + last_transaction_version: v1_current_primary_name.last_transaction_version, + }, + AnsPrimaryNameV2 { + transaction_version: v1_primary_name.transaction_version, + write_set_change_index: v1_primary_name.write_set_change_index, + registered_address: v1_primary_name.registered_address, + token_standard: TokenStandard::V1.to_string(), + domain: v1_primary_name.domain, + subdomain: v1_primary_name.subdomain, + token_name: v1_primary_name.token_name, + is_deleted: v1_primary_name.is_deleted, + }, + ) + } + + // Parse v2 primary name record from SetReverseLookupEvent + pub fn parse_v2_primary_name_record_from_event( + event: &Event, + txn_version: i64, + event_index: i64, + ans_v2_contract_address: &str, + ) -> anyhow::Result> { + if let Some(set_reverse_lookup_event) = + SetReverseLookupEvent::from_event(event, ans_v2_contract_address, txn_version).unwrap() + { + if set_reverse_lookup_event.get_curr_domain_trunc().is_empty() { + // Handle case where the address's primary name is unset + return Ok(Some(( + Self { + registered_address: set_reverse_lookup_event.get_account_addr().clone(), + token_standard: TokenStandard::V2.to_string(), + domain: None, + subdomain: None, + token_name: None, + last_transaction_version: txn_version, + is_deleted: true, + }, + AnsPrimaryNameV2 { + transaction_version: txn_version, + write_set_change_index: -(event_index + 1), + registered_address: set_reverse_lookup_event.get_account_addr().clone(), + token_standard: TokenStandard::V2.to_string(), + domain: None, + subdomain: None, + token_name: None, + is_deleted: true, + }, + ))); + } else { + // Handle case where the address is set to a new primary name + return Ok(Some(( + Self { + registered_address: set_reverse_lookup_event.get_account_addr().clone(), + token_standard: TokenStandard::V2.to_string(), + domain: Some(set_reverse_lookup_event.get_curr_domain_trunc()), + subdomain: Some(set_reverse_lookup_event.get_curr_subdomain_trunc()), + token_name: Some(set_reverse_lookup_event.get_curr_token_name()), + last_transaction_version: txn_version, + is_deleted: false, + }, + AnsPrimaryNameV2 { + transaction_version: txn_version, + write_set_change_index: -(event_index + 1), + registered_address: set_reverse_lookup_event.get_account_addr().clone(), + token_standard: TokenStandard::V2.to_string(), + domain: Some(set_reverse_lookup_event.get_curr_domain_trunc()), + subdomain: Some(set_reverse_lookup_event.get_curr_subdomain_trunc()), + token_name: Some(set_reverse_lookup_event.get_curr_token_name()), + is_deleted: false, + }, + ))); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/ans_models/ans_utils.rs b/rust/processor/src/db/common/models/ans_models/ans_utils.rs new file mode 100644 index 000000000..4f6ee6ed9 --- /dev/null +++ b/rust/processor/src/db/common/models/ans_models/ans_utils.rs @@ -0,0 +1,368 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + db::common::models::default_models::move_resources::MoveResource, + utils::util::{ + bigdecimal_to_u64, deserialize_from_string, parse_timestamp_secs, standardize_address, + truncate_str, + }, +}; +use anyhow::Context; +use aptos_protos::transaction::v1::{Event, WriteResource}; +use bigdecimal::BigDecimal; +use serde::{Deserialize, Serialize}; + +pub const DOMAIN_LENGTH: usize = 64; + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct OptionalString { + vec: Vec, +} + +impl OptionalString { + fn get_string(&self) -> Option { + if self.vec.is_empty() { + None + } else { + Some(self.vec[0].clone()) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OptionalBigDecimal { + vec: Vec, +} + +pub fn get_token_name(domain_name: &str, subdomain_name: &str) -> String { + let domain = truncate_str(domain_name, DOMAIN_LENGTH); + let subdomain = truncate_str(subdomain_name, DOMAIN_LENGTH); + let mut token_name = format!("{}.apt", &domain); + if !subdomain.is_empty() { + token_name = format!("{}.{}", &subdomain, token_name); + } + token_name +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct BigDecimalWrapper(#[serde(deserialize_with = "deserialize_from_string")] pub BigDecimal); + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct NameRecordKeyV1 { + domain_name: String, + subdomain_name: OptionalString, +} + +impl NameRecordKeyV1 { + pub fn get_domain_trunc(&self) -> String { + truncate_str(self.domain_name.as_str(), DOMAIN_LENGTH) + } + + pub fn get_subdomain_trunc(&self) -> String { + truncate_str( + self.subdomain_name + .get_string() + .unwrap_or_default() + .as_str(), + DOMAIN_LENGTH, + ) + } + + pub fn get_token_name(&self) -> String { + let domain = self.get_domain_trunc(); + let subdomain = self.get_subdomain_trunc(); + get_token_name(&domain, &subdomain) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct NameRecordV1 { + #[serde(deserialize_with = "deserialize_from_string")] + expiration_time_sec: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + property_version: BigDecimal, + target_address: OptionalString, +} + +impl NameRecordV1 { + pub fn get_expiration_time(&self) -> chrono::NaiveDateTime { + parse_timestamp_secs(bigdecimal_to_u64(&self.expiration_time_sec), 0) + } + + pub fn get_target_address(&self) -> Option { + self.target_address + .get_string() + .map(|addr| standardize_address(&addr)) + } +} + +pub enum AnsTableItem { + NameRecordKeyV1(NameRecordKeyV1), + NameRecordV1(NameRecordV1), +} + +impl AnsTableItem { + /// Matches based on the type name (last part of a full qualified type) instead of the fully qualified type + /// because we already know what the table handle is + pub fn from_table_item( + data_type_name: &str, + data: &str, + txn_version: i64, + ) -> anyhow::Result> { + match data_type_name { + "NameRecordKeyV1" => { + serde_json::from_str(data).map(|inner| Some(Self::NameRecordKeyV1(inner))) + }, + "NameRecordV1" => { + serde_json::from_str(data).map(|inner| Some(Self::NameRecordV1(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type_name, data + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct NameRecordV2 { + domain_name: String, + #[serde(deserialize_with = "deserialize_from_string")] + expiration_time_sec: BigDecimal, + target_address: OptionalString, +} + +impl NameRecordV2 { + pub fn get_domain_trunc(&self) -> String { + truncate_str(self.domain_name.as_str(), DOMAIN_LENGTH) + } + + pub fn get_expiration_time(&self) -> chrono::NaiveDateTime { + parse_timestamp_secs(bigdecimal_to_u64(&self.expiration_time_sec), 0) + } + + pub fn get_target_address(&self) -> Option { + self.target_address + .get_string() + .map(|addr| standardize_address(&addr)) + } + + pub fn from_write_resource( + write_resource: &WriteResource, + ans_v2_contract_address: &str, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(AnsWriteResource::NameRecordV2(inner)) = AnsWriteResource::from_write_resource( + write_resource, + ans_v2_contract_address, + txn_version, + )? { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct SubdomainExtV2 { + pub subdomain_expiration_policy: i64, + subdomain_name: String, +} + +impl SubdomainExtV2 { + pub fn get_subdomain_trunc(&self) -> String { + truncate_str(self.subdomain_name.as_str(), DOMAIN_LENGTH) + } + + pub fn from_write_resource( + write_resource: &WriteResource, + ans_v2_contract_address: &str, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(AnsWriteResource::SubdomainExtV2(inner)) = + AnsWriteResource::from_write_resource( + write_resource, + ans_v2_contract_address, + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +pub enum AnsWriteResource { + NameRecordV2(NameRecordV2), + SubdomainExtV2(SubdomainExtV2), +} + +impl AnsWriteResource { + pub fn from_write_resource( + write_resource: &WriteResource, + ans_v2_contract_address: &str, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + let data = write_resource.data.as_str(); + + match type_str.clone() { + x if x == format!("{}::v2_1_domains::NameRecord", ans_v2_contract_address) => { + serde_json::from_str(data).map(|inner| Some(Self::NameRecordV2(inner))) + }, + x if x == format!("{}::v2_1_domains::SubdomainExt", ans_v2_contract_address) => { + serde_json::from_str(data).map(|inner| Some(Self::SubdomainExtV2(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, + type_str.clone(), + data + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct RenewNameEvent { + domain_name: String, + #[serde(deserialize_with = "deserialize_from_string")] + expiration_time_secs: BigDecimal, + is_primary_name: bool, + subdomain_name: OptionalString, + target_address: OptionalString, +} + +impl RenewNameEvent { + pub fn from_event( + event: &Event, + ans_v2_contract_address: &str, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(V2AnsEvent::RenewNameEvent(inner)) = + V2AnsEvent::from_event(event, ans_v2_contract_address, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct SetReverseLookupEvent { + account_addr: String, + curr_domain_name: OptionalString, + curr_expiration_time_secs: OptionalBigDecimal, + curr_subdomain_name: OptionalString, + prev_domain_name: OptionalString, + prev_expiration_time_secs: OptionalBigDecimal, + prev_subdomain_name: OptionalString, +} + +impl SetReverseLookupEvent { + pub fn get_account_addr(&self) -> String { + standardize_address(&self.account_addr) + } + + pub fn get_curr_domain_trunc(&self) -> String { + truncate_str( + self.curr_domain_name + .get_string() + .unwrap_or_default() + .as_str(), + DOMAIN_LENGTH, + ) + } + + pub fn get_curr_subdomain_trunc(&self) -> String { + truncate_str( + self.curr_subdomain_name + .get_string() + .unwrap_or_default() + .as_str(), + DOMAIN_LENGTH, + ) + } + + pub fn get_curr_token_name(&self) -> String { + let domain = self.get_curr_domain_trunc(); + let subdomain = self.get_curr_subdomain_trunc(); + get_token_name(&domain, &subdomain) + } + + pub fn from_event( + event: &Event, + ans_v2_contract_address: &str, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(V2AnsEvent::SetReverseLookupEvent(inner)) = + V2AnsEvent::from_event(event, ans_v2_contract_address, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2AnsEvent { + SetReverseLookupEvent(SetReverseLookupEvent), + RenewNameEvent(RenewNameEvent), +} + +impl V2AnsEvent { + pub fn is_event_supported(event_type: &str, ans_v2_contract_address: &str) -> bool { + [ + format!( + "{}::v2_1_domains::SetReverseLookupEvent", + ans_v2_contract_address + ), + format!("{}::v2_1_domains::RenewNameEvent", ans_v2_contract_address), + ] + .contains(&event_type.to_string()) + } + + pub fn from_event( + event: &Event, + ans_v2_contract_address: &str, + txn_version: i64, + ) -> anyhow::Result> { + let type_str: String = event.type_str.clone(); + let data = event.data.as_str(); + + if !Self::is_event_supported(type_str.as_str(), ans_v2_contract_address) { + return Ok(None); + } + + match type_str.clone() { + x if x + == format!( + "{}::v2_1_domains::SetReverseLookupEvent", + ans_v2_contract_address + ) => + { + serde_json::from_str(data).map(|inner| Some(Self::SetReverseLookupEvent(inner))) + }, + x if x == format!("{}::v2_1_domains::RenewNameEvent", ans_v2_contract_address) => { + serde_json::from_str(data).map(|inner| Some(Self::RenewNameEvent(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, + type_str.clone(), + data + )) + } +} diff --git a/rust/processor/src/db/common/models/ans_models/mod.rs b/rust/processor/src/db/common/models/ans_models/mod.rs new file mode 100644 index 000000000..5b82f289c --- /dev/null +++ b/rust/processor/src/db/common/models/ans_models/mod.rs @@ -0,0 +1,6 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod ans_lookup; +pub mod ans_lookup_v2; +pub mod ans_utils; diff --git a/rust/processor/src/db/common/models/coin_models/coin_activities.rs b/rust/processor/src/db/common/models/coin_models/coin_activities.rs new file mode 100644 index 000000000..e1f1cdf1d --- /dev/null +++ b/rust/processor/src/db/common/models/coin_models/coin_activities.rs @@ -0,0 +1,312 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + coin_balances::{CoinBalance, CurrentCoinBalance}, + coin_infos::CoinInfo, + coin_utils::{CoinEvent, EventGuidResource}, +}; +use crate::{ + db::common::models::{ + fungible_asset_models::{ + v2_fungible_asset_activities::{ + CoinType, CurrentCoinBalancePK, EventToCoinType, BURN_GAS_EVENT_CREATION_NUM, + BURN_GAS_EVENT_INDEX, GAS_FEE_EVENT, + }, + v2_fungible_asset_utils::FeeStatement, + }, + user_transactions_models::signatures::Signature, + }, + schema::coin_activities, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{ + get_entry_function_from_user_request, standardize_address, u64_to_bigdecimal, + APTOS_COIN_TYPE_STR, + }, + }, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{ + transaction::TxnData, write_set_change::Change as WriteSetChangeEnum, Event as EventPB, + Transaction as TransactionPB, TransactionInfo, UserTransactionRequest, +}; +use bigdecimal::{BigDecimal, Zero}; +use chrono::NaiveDateTime; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key( + transaction_version, + event_account_address, + event_creation_number, + event_sequence_number +))] +#[diesel(table_name = coin_activities)] +pub struct CoinActivity { + pub transaction_version: i64, + pub event_account_address: String, + pub event_creation_number: i64, + pub event_sequence_number: i64, + pub owner_address: String, + pub coin_type: String, + pub amount: BigDecimal, + pub activity_type: String, + pub is_gas_fee: bool, + pub is_transaction_success: bool, + pub entry_function_id_str: Option, + pub block_height: i64, + pub transaction_timestamp: chrono::NaiveDateTime, + pub event_index: Option, + pub gas_fee_payer_address: Option, + pub storage_refund_amount: BigDecimal, +} + +impl CoinActivity { + /// There are different objects containing different information about balances and coins. + /// Events: Withdraw and Deposit event containing amounts. There is no coin type so we need to get that from Resources. (from event guid) + /// CoinInfo Resource: Contains name, symbol, decimals and supply. (if supply is aggregator, however, actual supply amount will live in a separate table) + /// CoinStore Resource: Contains owner address and coin type information used to complete events + /// Aggregator Table Item: Contains current supply of a coin + /// Note, we're not currently tracking supply + pub fn from_transaction( + transaction: &TransactionPB, + ) -> ( + Vec, + Vec, + AHashMap, + AHashMap, + ) { + // All the items we want to track + let mut coin_activities = Vec::new(); + let mut coin_balances = Vec::new(); + let mut coin_infos: AHashMap = AHashMap::new(); + let mut current_coin_balances: AHashMap = + AHashMap::new(); + // This will help us get the coin type when we see coin deposit/withdraw events for coin activities + let mut all_event_to_coin_type: EventToCoinType = AHashMap::new(); + + // Extracts events and user request from genesis and user transactions. Other transactions won't have coin events + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["CoinActivity"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return Default::default(); + }, + }; + let (events, maybe_user_request): (&Vec, Option<&UserTransactionRequest>) = + match txn_data { + TxnData::Genesis(inner) => (&inner.events, None), + TxnData::User(inner) => (&inner.events, inner.request.as_ref()), + _ => return Default::default(), + }; + + // The rest are fields common to all transactions + let txn_version = transaction.version as i64; + let block_height = transaction.block_height as i64; + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + let txn_timestamp = transaction + .timestamp + .as_ref() + .expect("Transaction timestamp doesn't exist!") + .seconds; + #[allow(deprecated)] + let txn_timestamp = + NaiveDateTime::from_timestamp_opt(txn_timestamp, 0).expect("Txn Timestamp is invalid!"); + + // Handling gas first + let mut entry_function_id_str = None; + if let Some(user_request) = maybe_user_request { + let fee_statement = events.iter().find_map(|event| { + let event_type = event.type_str.as_str(); + FeeStatement::from_event(event_type, &event.data, txn_version) + }); + + entry_function_id_str = get_entry_function_from_user_request(user_request); + coin_activities.push(Self::get_gas_event( + transaction_info, + user_request, + &entry_function_id_str, + txn_version, + txn_timestamp, + block_height, + fee_statement, + )); + } + + // Need coin info from move resources + for wsc in &transaction_info.changes { + let (maybe_coin_info, maybe_coin_balance_data) = + if let WriteSetChangeEnum::WriteResource(write_resource) = + &wsc.change.as_ref().unwrap() + { + ( + CoinInfo::from_write_resource(write_resource, txn_version, txn_timestamp) + .unwrap(), + CoinBalance::from_write_resource( + write_resource, + txn_version, + txn_timestamp, + ) + .unwrap(), + ) + } else { + (None, None) + }; + + if let Some(coin_info) = maybe_coin_info { + coin_infos.insert(coin_info.coin_type.clone(), coin_info); + } + if let Some((coin_balance, current_coin_balance, event_to_coin_type)) = + maybe_coin_balance_data + { + current_coin_balances.insert( + ( + coin_balance.owner_address.clone(), + coin_balance.coin_type.clone(), + ), + current_coin_balance, + ); + coin_balances.push(coin_balance); + all_event_to_coin_type.extend(event_to_coin_type); + } + } + for (index, event) in events.iter().enumerate() { + let event_type = event.type_str.clone(); + if let Some(parsed_event) = + CoinEvent::from_event(event_type.as_str(), &event.data, txn_version).unwrap() + { + coin_activities.push(Self::from_parsed_event( + &event_type, + event, + &parsed_event, + txn_version, + &all_event_to_coin_type, + block_height, + &entry_function_id_str, + txn_timestamp, + index as i64, + )); + }; + } + ( + coin_activities, + coin_balances, + coin_infos, + current_coin_balances, + ) + } + + fn from_parsed_event( + event_type: &str, + event: &EventPB, + coin_event: &CoinEvent, + txn_version: i64, + event_to_coin_type: &EventToCoinType, + block_height: i64, + entry_function_id_str: &Option, + transaction_timestamp: chrono::NaiveDateTime, + event_index: i64, + ) -> Self { + let amount = match coin_event { + CoinEvent::WithdrawCoinEvent(inner) => inner.amount.clone(), + CoinEvent::DepositCoinEvent(inner) => inner.amount.clone(), + }; + let event_move_guid = EventGuidResource { + addr: standardize_address(event.key.as_ref().unwrap().account_address.as_str()), + creation_num: event.key.as_ref().unwrap().creation_number as i64, + }; + let coin_type = + event_to_coin_type + .get(&event_move_guid) + .unwrap_or_else(|| { + panic!( + "Could not find event in resources (CoinStore), version: {}, event guid: {:?}, mapping: {:?}", + txn_version, event_move_guid, event_to_coin_type + ) + }).clone(); + + Self { + transaction_version: txn_version, + event_account_address: standardize_address( + &event.key.as_ref().unwrap().account_address, + ), + event_creation_number: event.key.as_ref().unwrap().creation_number as i64, + event_sequence_number: event.sequence_number as i64, + owner_address: standardize_address(&event.key.as_ref().unwrap().account_address), + coin_type, + amount, + activity_type: event_type.to_string(), + is_gas_fee: false, + is_transaction_success: true, + entry_function_id_str: entry_function_id_str.clone(), + block_height, + transaction_timestamp, + event_index: Some(event_index), + gas_fee_payer_address: None, + storage_refund_amount: BigDecimal::zero(), + } + } + + pub fn get_gas_event( + txn_info: &TransactionInfo, + user_transaction_request: &UserTransactionRequest, + entry_function_id_str: &Option, + transaction_version: i64, + transaction_timestamp: chrono::NaiveDateTime, + block_height: i64, + fee_statement: Option, + ) -> Self { + let aptos_coin_burned = + BigDecimal::from(txn_info.gas_used * user_transaction_request.gas_unit_price); + let signature = user_transaction_request + .signature + .as_ref() + .unwrap_or_else(|| { + tracing::error!( + transaction_version = transaction_version, + "User transaction must have signature" + ); + panic!("User transaction must have signature") + }); + let gas_fee_payer_address = + Signature::get_fee_payer_address(signature, transaction_version); + + Self { + transaction_version, + event_account_address: standardize_address( + &user_transaction_request.sender.to_string(), + ), + event_creation_number: BURN_GAS_EVENT_CREATION_NUM, + event_sequence_number: user_transaction_request.sequence_number as i64, + owner_address: standardize_address(&user_transaction_request.sender.to_string()), + coin_type: APTOS_COIN_TYPE_STR.to_string(), + amount: aptos_coin_burned, + activity_type: GAS_FEE_EVENT.to_string(), + is_gas_fee: true, + is_transaction_success: txn_info.success, + entry_function_id_str: entry_function_id_str.clone(), + block_height, + transaction_timestamp, + event_index: Some(BURN_GAS_EVENT_INDEX), + gas_fee_payer_address, + storage_refund_amount: fee_statement + .map(|fs| u64_to_bigdecimal(fs.storage_fee_refund_octas)) + .unwrap_or(BigDecimal::zero()), + } + } +} diff --git a/rust/processor/src/db/common/models/coin_models/coin_balances.rs b/rust/processor/src/db/common/models/coin_models/coin_balances.rs new file mode 100644 index 000000000..a50691a77 --- /dev/null +++ b/rust/processor/src/db/common/models/coin_models/coin_balances.rs @@ -0,0 +1,94 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::coin_utils::{CoinInfoType, CoinResource}; +use crate::{ + db::common::models::fungible_asset_models::v2_fungible_asset_activities::EventToCoinType, + schema::{coin_balances, current_coin_balances}, + utils::util::standardize_address, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::WriteResource; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, owner_address, coin_type))] +#[diesel(table_name = coin_balances)] +pub struct CoinBalance { + pub transaction_version: i64, + pub owner_address: String, + pub coin_type_hash: String, + pub coin_type: String, + pub amount: BigDecimal, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(owner_address, coin_type))] +#[diesel(table_name = current_coin_balances)] +pub struct CurrentCoinBalance { + pub owner_address: String, + pub coin_type_hash: String, + pub coin_type: String, + pub amount: BigDecimal, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl CoinBalance { + /// Getting coin balances from resources + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + match &CoinResource::from_write_resource(write_resource, txn_version)? { + Some(CoinResource::CoinStoreResource(inner)) => { + let coin_info_type = &CoinInfoType::from_move_type( + &write_resource.r#type.as_ref().unwrap().generic_type_params[0], + write_resource.type_str.as_ref(), + txn_version, + ); + let owner_address = standardize_address(write_resource.address.as_str()); + let coin_balance = Self { + transaction_version: txn_version, + owner_address: owner_address.clone(), + coin_type_hash: coin_info_type.to_hash(), + coin_type: coin_info_type.get_coin_type_trunc(), + amount: inner.coin.value.clone(), + transaction_timestamp: txn_timestamp, + }; + let current_coin_balance = CurrentCoinBalance { + owner_address, + coin_type_hash: coin_info_type.to_hash(), + coin_type: coin_info_type.get_coin_type_trunc(), + amount: inner.coin.value.clone(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }; + let event_to_coin_mapping: EventToCoinType = AHashMap::from([ + ( + inner.withdraw_events.guid.id.get_standardized(), + coin_balance.coin_type.clone(), + ), + ( + inner.deposit_events.guid.id.get_standardized(), + coin_balance.coin_type.clone(), + ), + ]); + Ok(Some(( + coin_balance, + current_coin_balance, + event_to_coin_mapping, + ))) + }, + _ => Ok(None), + } + } +} diff --git a/rust/processor/src/db/common/models/coin_models/coin_infos.rs b/rust/processor/src/db/common/models/coin_models/coin_infos.rs new file mode 100644 index 000000000..9efca28d7 --- /dev/null +++ b/rust/processor/src/db/common/models/coin_models/coin_infos.rs @@ -0,0 +1,65 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::coin_utils::{CoinInfoType, CoinResource}; +use crate::schema::coin_infos; +use aptos_protos::transaction::v1::WriteResource; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(coin_type_hash))] +#[diesel(table_name = coin_infos)] +pub struct CoinInfo { + pub coin_type_hash: String, + pub coin_type: String, + pub transaction_version_created: i64, + pub creator_address: String, + pub name: String, + pub symbol: String, + pub decimals: i32, + pub transaction_created_timestamp: chrono::NaiveDateTime, + pub supply_aggregator_table_handle: Option, + pub supply_aggregator_table_key: Option, +} + +impl CoinInfo { + /// We can find coin info from resources. If the coin info appears multiple times we will only keep the first transaction because it can't be modified. + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + match &CoinResource::from_write_resource(write_resource, txn_version)? { + Some(CoinResource::CoinInfoResource(inner)) => { + let coin_info_type = &CoinInfoType::from_move_type( + &write_resource.r#type.as_ref().unwrap().generic_type_params[0], + write_resource.type_str.as_ref(), + txn_version, + ); + let (supply_aggregator_table_handle, supply_aggregator_table_key) = inner + .get_aggregator_metadata() + .map(|agg| (Some(agg.handle), Some(agg.key))) + .unwrap_or((None, None)); + + Ok(Some(Self { + coin_type_hash: coin_info_type.to_hash(), + coin_type: coin_info_type.get_coin_type_trunc(), + transaction_version_created: txn_version, + creator_address: coin_info_type.get_creator_address(), + name: inner.get_name_trunc(), + symbol: inner.get_symbol_trunc(), + decimals: inner.decimals, + transaction_created_timestamp: txn_timestamp, + supply_aggregator_table_handle, + supply_aggregator_table_key, + })) + }, + _ => Ok(None), + } + } +} diff --git a/rust/processor/src/db/common/models/coin_models/coin_supply.rs b/rust/processor/src/db/common/models/coin_models/coin_supply.rs new file mode 100644 index 000000000..f3942dae8 --- /dev/null +++ b/rust/processor/src/db/common/models/coin_models/coin_supply.rs @@ -0,0 +1,88 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{ + db::common::models::default_models::move_tables::TableItem, + schema::coin_supply, + utils::util::{hash_str, APTOS_COIN_TYPE_STR}, +}; +use anyhow::Context; +use aptos_protos::transaction::v1::WriteTableItem; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +const APTOS_COIN_SUPPLY_TABLE_HANDLE: &str = + "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca"; +const APTOS_COIN_SUPPLY_TABLE_KEY: &str = + "0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935"; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, coin_type_hash))] +#[diesel(table_name = coin_supply)] +pub struct CoinSupply { + pub transaction_version: i64, + pub coin_type_hash: String, + pub coin_type: String, + pub supply: BigDecimal, + pub transaction_timestamp: chrono::NaiveDateTime, + pub transaction_epoch: i64, +} + +impl CoinSupply { + /// Currently only supports aptos_coin. Aggregator table detail is in CoinInfo which for aptos coin appears during genesis. + /// We query for the aggregator table details (handle and key) once upon indexer initiation and use it to fetch supply. + pub fn from_write_table_item( + write_table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + txn_epoch: i64, + ) -> anyhow::Result> { + if let Some(data) = &write_table_item.data { + // Return early if not aggregator table type + if !(data.key_type == "address" && data.value_type == "u128") { + return Ok(None); + } + // Return early if not aggregator table handle + if write_table_item.handle.as_str() != APTOS_COIN_SUPPLY_TABLE_HANDLE { + return Ok(None); + } + + // Convert to TableItem model. Some fields are just placeholders + let (table_item_model, _) = + TableItem::from_write_table_item(write_table_item, 0, txn_version, 0); + + // Return early if not aptos coin aggregator key + let table_key = table_item_model.decoded_key.as_str().unwrap(); + if table_key != APTOS_COIN_SUPPLY_TABLE_KEY { + return Ok(None); + } + // Everything matches. Get the coin supply + let supply = table_item_model + .decoded_value + .as_ref() + .unwrap() + .as_str() + .unwrap() + .parse::() + .context(format!( + "cannot parse string as u128: {:?}, version {}", + table_item_model.decoded_value.as_ref(), + txn_version + ))?; + return Ok(Some(Self { + transaction_version: txn_version, + coin_type_hash: hash_str(APTOS_COIN_TYPE_STR), + coin_type: APTOS_COIN_TYPE_STR.to_string(), + supply, + transaction_timestamp: txn_timestamp, + transaction_epoch: txn_epoch, + })); + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/coin_models/coin_utils.rs b/rust/processor/src/db/common/models/coin_models/coin_utils.rs new file mode 100644 index 000000000..ec46f532c --- /dev/null +++ b/rust/processor/src/db/common/models/coin_models/coin_utils.rs @@ -0,0 +1,334 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + db::common::models::default_models::move_resources::MoveResource, + utils::util::{deserialize_from_string, hash_str, standardize_address, truncate_str}, +}; +use anyhow::{bail, Context, Result}; +use aptos_protos::transaction::v1::{move_type::Content, DeleteResource, MoveType, WriteResource}; +use bigdecimal::BigDecimal; +use once_cell::sync::Lazy; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use tracing::error; + +pub const COIN_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000001"; +const COIN_TYPE_HASH_LENGTH: usize = 5000; +const COIN_TYPE_MAX: usize = 1000; + +/** + * This file defines deserialized coin types as defined in our 0x1 contracts. + */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CoinInfoResource { + name: String, + symbol: String, + pub decimals: i32, + pub supply: OptionalAggregatorWrapperResource, +} + +impl CoinInfoResource { + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, 32) + } + + pub fn get_symbol_trunc(&self) -> String { + truncate_str(&self.symbol, 10) + } + + /// Getting the table item location of the supply aggregator + pub fn get_aggregator_metadata(&self) -> Option { + if let Some(inner) = self.supply.vec.first() { + inner.aggregator.get_aggregator_metadata() + } else { + None + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OptionalAggregatorWrapperResource { + pub vec: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OptionalAggregatorResource { + pub aggregator: AggregatorWrapperResource, + pub integer: IntegerWrapperResource, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AggregatorWrapperResource { + pub vec: Vec, +} + +impl AggregatorWrapperResource { + /// In case we do want to track supply + pub fn get_aggregator_metadata(&self) -> Option { + self.vec.first().cloned() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct IntegerWrapperResource { + pub vec: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AggregatorResource { + pub handle: String, + pub key: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct IntegerResource { + #[serde(deserialize_with = "deserialize_from_string")] + pub value: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CoinStoreResource { + pub coin: Coin, + pub deposit_events: DepositEventResource, + pub withdraw_events: WithdrawEventResource, + pub frozen: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Coin { + #[serde(deserialize_with = "deserialize_from_string")] + pub value: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DepositEventResource { + pub guid: EventGuidResourceWrapper, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct WithdrawEventResource { + pub guid: EventGuidResourceWrapper, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EventGuidResourceWrapper { + pub id: EventGuidResource, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Hash, Eq, PartialEq)] +pub struct EventGuidResource { + pub addr: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub creation_num: i64, +} + +impl EventGuidResource { + pub fn get_address(&self) -> String { + standardize_address(&self.addr) + } + + pub fn get_standardized(&self) -> Self { + Self { + addr: self.get_address(), + creation_num: self.creation_num, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct WithdrawCoinEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DepositCoinEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, +} + +pub struct CoinInfoType { + coin_type: String, + creator_address: String, +} + +static RE: Lazy = Lazy::new(|| Regex::new(r"(<(.*)>)").unwrap()); + +static COIN_RESOURCES: Lazy<[String; 2]> = Lazy::new(|| { + [ + format!("{}::coin::CoinInfo", COIN_ADDR), + format!("{}::coin::CoinStore", COIN_ADDR), + ] +}); + +impl CoinInfoType { + /// get creator address from move_type, and get coin type from move_type_str + /// Since move_type_str will contain things we don't need, e.g. 0x1::coin::CoinInfo. We will use + /// regex to extract T. + pub fn from_move_type(move_type: &MoveType, move_type_str: &str, txn_version: i64) -> Self { + if let Content::Struct(struct_tag) = move_type.content.as_ref().unwrap() { + let matched = RE.captures(move_type_str).unwrap_or_else(|| { + error!( + txn_version = txn_version, + move_type_str = move_type_str, + "move_type should look like 0x1::coin::CoinInfo" + ); + panic!(); + }); + let coin_type = matched.get(2).unwrap().as_str(); + Self { + coin_type: coin_type.to_string(), + creator_address: struct_tag.address.clone(), + } + } else { + error!(txn_version = txn_version, move_type = ?move_type, "Expected struct tag"); + panic!(); + } + } + + pub fn get_creator_address(&self) -> String { + standardize_address(&self.creator_address) + } + + pub fn to_hash(&self) -> String { + hash_str(&self.coin_type.to_string()) + } + + /// This function gets the hash of the owner address and the coin type, similar to + /// how token v2 gets the named object address for the fungible asset store. + pub fn get_storage_id(coin_type: &str, owner_address: &str) -> String { + let key = format!("{}::{}", owner_address, coin_type); + format!("0x{}", hash_str(&key)) + } + + pub fn get_coin_type_trunc(&self) -> String { + truncate_str(&self.coin_type, COIN_TYPE_HASH_LENGTH) + } + + pub fn get_coin_type_below_max(&self) -> Option { + if self.coin_type.len() > COIN_TYPE_MAX { + None + } else { + Some(self.coin_type.clone()) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum CoinResource { + CoinInfoResource(CoinInfoResource), + CoinStoreResource(CoinStoreResource), + CoinInfoDeletion, + CoinStoreDeletion, +} + +impl CoinResource { + pub fn is_resource_supported(data_type: &str) -> bool { + COIN_RESOURCES.contains(&data_type.to_string()) + } + + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result { + match data_type { + x if x == format!("{}::coin::CoinInfo", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(CoinResource::CoinInfoResource(inner))) + }, + x if x == format!("{}::coin::CoinStore", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(CoinResource::CoinStoreResource(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } + + fn from_delete_resource_internal(data_type: &str, txn_version: i64) -> Result { + match data_type { + x if x == format!("{}::coin::CoinInfo", COIN_ADDR) => { + Ok(CoinResource::CoinInfoDeletion) + }, + x if x == format!("{}::coin::CoinStore", COIN_ADDR) => { + Ok(CoinResource::CoinStoreDeletion) + }, + _ => bail!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, + data_type + ), + } + } + + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !CoinResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + Ok(Some(Self::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )?)) + } + + pub fn from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + ) -> Result> { + let type_str = MoveResource::get_outer_type_from_delete_resource(delete_resource); + if !CoinResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + Ok(Some(Self::from_delete_resource_internal( + &type_str, + txn_version, + )?)) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum CoinEvent { + WithdrawCoinEvent(WithdrawCoinEvent), + DepositCoinEvent(DepositCoinEvent), +} + +impl CoinEvent { + pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { + match data_type { + "0x1::coin::WithdrawEvent" => { + serde_json::from_str(data).map(|inner| Some(CoinEvent::WithdrawCoinEvent(inner))) + }, + "0x1::coin::DepositEvent" => { + serde_json::from_str(data).map(|inner| Some(CoinEvent::DepositCoinEvent(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} diff --git a/rust/processor/src/db/common/models/coin_models/mod.rs b/rust/processor/src/db/common/models/coin_models/mod.rs new file mode 100644 index 000000000..c748a32a0 --- /dev/null +++ b/rust/processor/src/db/common/models/coin_models/mod.rs @@ -0,0 +1,8 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod coin_activities; +pub mod coin_balances; +pub mod coin_infos; +pub mod coin_supply; +pub mod coin_utils; diff --git a/rust/processor/src/db/common/models/default_models/block_metadata_transactions.rs b/rust/processor/src/db/common/models/default_models/block_metadata_transactions.rs new file mode 100644 index 000000000..6c7f30c2e --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/block_metadata_transactions.rs @@ -0,0 +1,63 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::transactions::Transaction; +use crate::{ + schema::block_metadata_transactions, + utils::util::{parse_timestamp, standardize_address}, +}; +use aptos_protos::{ + transaction::v1::BlockMetadataTransaction as BlockMetadataTransactionPB, + util::timestamp::Timestamp, +}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, +)] +#[diesel(belongs_to(Transaction, foreign_key = version))] +#[diesel(primary_key(version))] +#[diesel(table_name = block_metadata_transactions)] +pub struct BlockMetadataTransaction { + pub version: i64, + pub block_height: i64, + pub id: String, + pub round: i64, + pub epoch: i64, + pub previous_block_votes_bitvec: serde_json::Value, + pub proposer: String, + pub failed_proposer_indices: serde_json::Value, + pub timestamp: chrono::NaiveDateTime, +} + +impl BlockMetadataTransaction { + pub fn from_transaction( + txn: &BlockMetadataTransactionPB, + version: i64, + block_height: i64, + epoch: i64, + timestamp: &Timestamp, + ) -> Self { + Self { + version, + block_height, + id: txn.id.to_string(), + epoch, + round: txn.round as i64, + proposer: standardize_address(txn.proposer.as_str()), + failed_proposer_indices: serde_json::to_value(&txn.failed_proposer_indices).unwrap(), + previous_block_votes_bitvec: serde_json::to_value(&txn.previous_block_votes_bitvec) + .unwrap(), + // time is in microseconds + timestamp: parse_timestamp(timestamp, version), + } + } +} + +// Prevent conflicts with other things named `Transaction` +pub type BlockMetadataTransactionModel = BlockMetadataTransaction; diff --git a/rust/processor/src/db/common/models/default_models/mod.rs b/rust/processor/src/db/common/models/default_models/mod.rs new file mode 100644 index 000000000..d3d54d58f --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/mod.rs @@ -0,0 +1,15 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod block_metadata_transactions; +pub mod move_modules; +pub mod move_resources; +pub mod move_tables; +pub mod transactions; +pub mod write_set_changes; + +// parquet models +pub mod parquet_move_resources; +pub mod parquet_move_tables; +pub mod parquet_transactions; +pub mod parquet_write_set_changes; diff --git a/rust/processor/src/db/common/models/default_models/move_modules.rs b/rust/processor/src/db/common/models/default_models/move_modules.rs new file mode 100644 index 000000000..3315fe456 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/move_modules.rs @@ -0,0 +1,129 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use super::transactions::Transaction; +use crate::{schema::move_modules, utils::util::standardize_address}; +use aptos_protos::transaction::v1::{ + DeleteModule, MoveModule as MoveModulePB, MoveModuleBytecode, WriteModule, +}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, +)] +#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = move_modules)] +pub struct MoveModule { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub transaction_block_height: i64, + pub name: String, + pub address: String, + pub bytecode: Option>, + pub exposed_functions: Option, + pub friends: Option, + pub structs: Option, + pub is_deleted: bool, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct MoveModuleByteCodeParsed { + pub address: String, + pub name: String, + pub bytecode: Vec, + pub exposed_functions: serde_json::Value, + pub friends: serde_json::Value, + pub structs: serde_json::Value, +} + +impl MoveModule { + pub fn from_write_module( + write_module: &WriteModule, + write_set_change_index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> Self { + let parsed_data = Self::convert_move_module_bytecode(write_module.data.as_ref().unwrap()); + Self { + transaction_version, + transaction_block_height, + write_set_change_index, + // TODO: remove the useless_asref lint when new clippy nighly is released. + #[allow(clippy::useless_asref)] + name: parsed_data + .clone() + .map(|d| d.name.clone()) + .unwrap_or_default(), + address: standardize_address(&write_module.address.to_string()), + bytecode: parsed_data.clone().map(|d| d.bytecode.clone()), + exposed_functions: parsed_data.clone().map(|d| d.exposed_functions.clone()), + friends: parsed_data.clone().map(|d| d.friends.clone()), + structs: parsed_data.map(|d| d.structs.clone()), + is_deleted: false, + } + } + + pub fn from_delete_module( + delete_module: &DeleteModule, + write_set_change_index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> Self { + Self { + transaction_version, + transaction_block_height, + write_set_change_index, + // TODO: remove the useless_asref lint when new clippy nighly is released. + #[allow(clippy::useless_asref)] + name: delete_module + .module + .clone() + .map(|d| d.name.clone()) + .unwrap_or_default(), + address: standardize_address(&delete_module.address.to_string()), + bytecode: None, + exposed_functions: None, + friends: None, + structs: None, + is_deleted: true, + } + } + + pub fn convert_move_module_bytecode( + mmb: &MoveModuleBytecode, + ) -> Option { + mmb.abi + .as_ref() + .map(|abi| Self::convert_move_module(abi, mmb.bytecode.clone())) + } + + pub fn convert_move_module( + move_module: &MoveModulePB, + bytecode: Vec, + ) -> MoveModuleByteCodeParsed { + MoveModuleByteCodeParsed { + address: standardize_address(&move_module.address.to_string()), + name: move_module.name.clone(), + bytecode, + exposed_functions: move_module + .exposed_functions + .iter() + .map(|move_func| serde_json::to_value(move_func).unwrap()) + .collect(), + friends: move_module + .friends + .iter() + .map(|move_module_id| serde_json::to_value(move_module_id).unwrap()) + .collect(), + structs: move_module + .structs + .iter() + .map(|move_struct| serde_json::to_value(move_struct).unwrap()) + .collect(), + } + } +} diff --git a/rust/processor/src/db/common/models/default_models/move_resources.rs b/rust/processor/src/db/common/models/default_models/move_resources.rs new file mode 100644 index 000000000..ac73336bd --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/move_resources.rs @@ -0,0 +1,148 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use super::transactions::Transaction; +use crate::{schema::move_resources, utils::util::standardize_address}; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::{ + DeleteResource, MoveStructTag as MoveStructTagPB, WriteResource, +}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, +)] +#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = move_resources)] +pub struct MoveResource { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub transaction_block_height: i64, + pub name: String, + pub type_: String, + pub address: String, + pub module: String, + pub generic_type_params: Option, + pub data: Option, + pub is_deleted: bool, + pub state_key_hash: String, +} + +pub struct MoveStructTag { + address: String, + pub module: String, + pub name: String, + pub generic_type_params: Option, +} + +impl MoveResource { + pub fn from_write_resource( + write_resource: &WriteResource, + write_set_change_index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> Self { + let parsed_data = Self::convert_move_struct_tag( + write_resource + .r#type + .as_ref() + .expect("MoveStructTag Not Exists."), + ); + Self { + transaction_version, + transaction_block_height, + write_set_change_index, + type_: write_resource.type_str.clone(), + name: parsed_data.name.clone(), + address: standardize_address(&write_resource.address.to_string()), + module: parsed_data.module.clone(), + generic_type_params: parsed_data.generic_type_params, + data: Some(serde_json::from_str(write_resource.data.as_str()).unwrap()), + is_deleted: false, + state_key_hash: standardize_address( + hex::encode(write_resource.state_key_hash.as_slice()).as_str(), + ), + } + } + + pub fn from_delete_resource( + delete_resource: &DeleteResource, + write_set_change_index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> Self { + let parsed_data = Self::convert_move_struct_tag( + delete_resource + .r#type + .as_ref() + .expect("MoveStructTag Not Exists."), + ); + Self { + transaction_version, + transaction_block_height, + write_set_change_index, + type_: delete_resource.type_str.clone(), + name: parsed_data.name.clone(), + address: standardize_address(&delete_resource.address.to_string()), + module: parsed_data.module.clone(), + generic_type_params: parsed_data.generic_type_params, + data: None, + is_deleted: true, + state_key_hash: standardize_address( + hex::encode(delete_resource.state_key_hash.as_slice()).as_str(), + ), + } + } + + pub fn convert_move_struct_tag(struct_tag: &MoveStructTagPB) -> MoveStructTag { + MoveStructTag { + address: standardize_address(struct_tag.address.as_str()), + module: struct_tag.module.to_string(), + name: struct_tag.name.to_string(), + generic_type_params: struct_tag + .generic_type_params + .iter() + .map(|move_type| -> Result> { + Ok(Some( + serde_json::to_value(move_type).context("Failed to parse move type")?, + )) + }) + .collect::>>() + .unwrap_or(None), + } + } + + pub fn get_outer_type_from_write_resource(write_resource: &WriteResource) -> String { + let move_struct_tag = + Self::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); + + format!( + "{}::{}::{}", + move_struct_tag.get_address(), + move_struct_tag.module, + move_struct_tag.name, + ) + } + + pub fn get_outer_type_from_delete_resource(delete_resource: &DeleteResource) -> String { + let move_struct_tag = + Self::convert_move_struct_tag(delete_resource.r#type.as_ref().unwrap()); + + format!( + "{}::{}::{}", + move_struct_tag.get_address(), + move_struct_tag.module, + move_struct_tag.name, + ) + } +} + +impl MoveStructTag { + pub fn get_address(&self) -> String { + standardize_address(self.address.as_str()) + } +} diff --git a/rust/processor/src/db/common/models/default_models/move_tables.rs b/rust/processor/src/db/common/models/default_models/move_tables.rs new file mode 100644 index 000000000..f8899d011 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/move_tables.rs @@ -0,0 +1,141 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use super::transactions::Transaction; +use crate::{ + schema::{current_table_items, table_items, table_metadatas}, + utils::util::{hash_str, standardize_address}, +}; +use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(table_handle, key_hash))] +#[diesel(table_name = current_table_items)] +pub struct CurrentTableItem { + pub table_handle: String, + pub key_hash: String, + pub key: String, + pub decoded_key: serde_json::Value, + pub decoded_value: Option, + pub last_transaction_version: i64, + pub is_deleted: bool, +} + +#[derive( + Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, +)] +#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = table_items)] +pub struct TableItem { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub transaction_block_height: i64, + pub key: String, + pub table_handle: String, + pub decoded_key: serde_json::Value, + pub decoded_value: Option, + pub is_deleted: bool, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(handle))] +#[diesel(table_name = table_metadatas)] +pub struct TableMetadata { + pub handle: String, + pub key_type: String, + pub value_type: String, +} + +impl TableItem { + pub fn from_write_table_item( + write_table_item: &WriteTableItem, + write_set_change_index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> (Self, CurrentTableItem) { + ( + Self { + transaction_version, + write_set_change_index, + transaction_block_height, + key: write_table_item.key.to_string(), + table_handle: standardize_address(&write_table_item.handle.to_string()), + decoded_key: serde_json::from_str( + write_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: serde_json::from_str( + write_table_item.data.as_ref().unwrap().value.as_str(), + ) + .unwrap(), + is_deleted: false, + }, + CurrentTableItem { + table_handle: standardize_address(&write_table_item.handle.to_string()), + key_hash: hash_str(&write_table_item.key.to_string()), + key: write_table_item.key.to_string(), + decoded_key: serde_json::from_str( + write_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: serde_json::from_str( + write_table_item.data.as_ref().unwrap().value.as_str(), + ) + .unwrap(), + last_transaction_version: transaction_version, + is_deleted: false, + }, + ) + } + + pub fn from_delete_table_item( + delete_table_item: &DeleteTableItem, + write_set_change_index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> (Self, CurrentTableItem) { + ( + Self { + transaction_version, + write_set_change_index, + transaction_block_height, + key: delete_table_item.key.to_string(), + table_handle: standardize_address(&delete_table_item.handle.to_string()), + decoded_key: serde_json::from_str( + delete_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + + decoded_value: None, + is_deleted: true, + }, + CurrentTableItem { + table_handle: standardize_address(&delete_table_item.handle.to_string()), + key_hash: hash_str(&delete_table_item.key.to_string()), + key: delete_table_item.key.to_string(), + decoded_key: serde_json::from_str( + delete_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: None, + last_transaction_version: transaction_version, + is_deleted: true, + }, + ) + } +} + +impl TableMetadata { + pub fn from_write_table_item(table_item: &WriteTableItem) -> Self { + Self { + handle: table_item.handle.to_string(), + key_type: table_item.data.as_ref().unwrap().key_type.clone(), + value_type: table_item.data.as_ref().unwrap().value_type.clone(), + } + } +} diff --git a/rust/processor/src/db/common/models/default_models/parquet_move_resources.rs b/rust/processor/src/db/common/models/default_models/parquet_move_resources.rs new file mode 100644 index 000000000..d4b8b312d --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_move_resources.rs @@ -0,0 +1,153 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + parquet_processors::generic_parquet_processor::{HasVersion, NamedTable}, + utils::util::standardize_address, +}; +use allocative_derive::Allocative; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::{ + DeleteResource, MoveStructTag as MoveStructTagPB, WriteResource, +}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive( + Allocative, Clone, Debug, Default, Deserialize, FieldCount, Serialize, ParquetRecordWriter, +)] +pub struct MoveResource { + pub txn_version: i64, + pub write_set_change_index: i64, + pub block_height: i64, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, + pub resource_address: String, + pub resource_type: String, + pub module: String, + pub fun: String, + pub is_deleted: bool, + pub generic_type_params: Option, + pub data: Option, + pub state_key_hash: String, +} + +impl NamedTable for MoveResource { + const TABLE_NAME: &'static str = "move_resources"; +} + +impl HasVersion for MoveResource { + fn version(&self) -> i64 { + self.txn_version + } +} + +pub struct MoveStructTag { + resource_address: String, + pub module: String, + pub fun: String, + pub generic_type_params: Option, +} + +impl MoveResource { + pub fn from_write_resource( + write_resource: &WriteResource, + write_set_change_index: i64, + txn_version: i64, + block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> Self { + let parsed_data = Self::convert_move_struct_tag( + write_resource + .r#type + .as_ref() + .expect("MoveStructTag Not Exists."), + ); + Self { + txn_version, + block_height, + write_set_change_index, + resource_type: write_resource.type_str.clone(), + fun: parsed_data.fun.clone(), + resource_address: standardize_address(&write_resource.address.to_string()), + module: parsed_data.module.clone(), + generic_type_params: parsed_data.generic_type_params, + data: Some(write_resource.data.clone()), + is_deleted: false, + state_key_hash: standardize_address( + hex::encode(write_resource.state_key_hash.as_slice()).as_str(), + ), + block_timestamp, + } + } + + pub fn from_delete_resource( + delete_resource: &DeleteResource, + write_set_change_index: i64, + txn_version: i64, + block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> Self { + let parsed_data = Self::convert_move_struct_tag( + delete_resource + .r#type + .as_ref() + .expect("MoveStructTag Not Exists."), + ); + Self { + txn_version, + block_height, + write_set_change_index, + resource_type: delete_resource.type_str.clone(), + fun: parsed_data.fun.clone(), + resource_address: standardize_address(&delete_resource.address.to_string()), + module: parsed_data.module.clone(), + generic_type_params: parsed_data.generic_type_params, + data: None, + is_deleted: true, + state_key_hash: standardize_address( + hex::encode(delete_resource.state_key_hash.as_slice()).as_str(), + ), + block_timestamp, + } + } + + pub fn convert_move_struct_tag(struct_tag: &MoveStructTagPB) -> MoveStructTag { + MoveStructTag { + resource_address: standardize_address(struct_tag.address.as_str()), + module: struct_tag.module.to_string(), + fun: struct_tag.name.to_string(), + generic_type_params: struct_tag + .generic_type_params + .iter() + .map(|move_type| -> Result> { + Ok(Some( + serde_json::to_string(move_type).context("Failed to parse move type")?, + )) + }) + .collect::>>() + .unwrap_or(None), + } + } + + pub fn get_outer_type_from_resource(write_resource: &WriteResource) -> String { + let move_struct_tag = + Self::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); + + format!( + "{}::{}::{}", + move_struct_tag.get_address(), + move_struct_tag.module, + move_struct_tag.fun, + ) + } +} + +impl MoveStructTag { + pub fn get_address(&self) -> String { + standardize_address(self.resource_address.as_str()) + } +} diff --git a/rust/processor/src/db/common/models/default_models/parquet_move_tables.rs b/rust/processor/src/db/common/models/default_models/parquet_move_tables.rs new file mode 100644 index 000000000..2bf336e19 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_move_tables.rs @@ -0,0 +1,139 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + parquet_processors::generic_parquet_processor::{HasVersion, NamedTable}, + utils::util::{hash_str, standardize_address}, +}; +use allocative_derive::Allocative; +use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive( + Allocative, Clone, Debug, Default, Deserialize, FieldCount, Serialize, ParquetRecordWriter, +)] +pub struct TableItem { + pub txn_version: i64, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, + pub write_set_change_index: i64, + pub transaction_block_height: i64, + pub table_key: String, + pub table_handle: String, + pub decoded_key: String, + pub decoded_value: Option, + pub is_deleted: bool, +} + +impl NamedTable for TableItem { + const TABLE_NAME: &'static str = "table_items"; +} + +impl HasVersion for TableItem { + fn version(&self) -> i64 { + self.txn_version + } +} +#[derive(Clone, Debug, Deserialize, FieldCount, Serialize)] +pub struct CurrentTableItem { + pub table_handle: String, + pub key_hash: String, + pub key: String, + pub decoded_key: serde_json::Value, + pub decoded_value: Option, + pub last_transaction_version: i64, + pub is_deleted: bool, +} +#[derive(Clone, Debug, Deserialize, FieldCount, Serialize)] +pub struct TableMetadata { + pub handle: String, + pub key_type: String, + pub value_type: String, +} + +impl TableItem { + pub fn from_write_table_item( + write_table_item: &WriteTableItem, + write_set_change_index: i64, + txn_version: i64, + transaction_block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> (Self, CurrentTableItem) { + ( + Self { + txn_version, + write_set_change_index, + transaction_block_height, + table_key: write_table_item.key.to_string(), + table_handle: standardize_address(&write_table_item.handle.to_string()), + decoded_key: write_table_item.data.as_ref().unwrap().key.clone(), + decoded_value: Some(write_table_item.data.as_ref().unwrap().value.clone()), + is_deleted: false, + block_timestamp, + }, + CurrentTableItem { + table_handle: standardize_address(&write_table_item.handle.to_string()), + key_hash: hash_str(&write_table_item.key.to_string()), + key: write_table_item.key.to_string(), + decoded_key: serde_json::from_str( + write_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: serde_json::from_str( + write_table_item.data.as_ref().unwrap().value.as_str(), + ) + .unwrap(), + last_transaction_version: txn_version, + is_deleted: false, + }, + ) + } + + pub fn from_delete_table_item( + delete_table_item: &DeleteTableItem, + write_set_change_index: i64, + txn_version: i64, + transaction_block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> (Self, CurrentTableItem) { + ( + Self { + txn_version, + write_set_change_index, + transaction_block_height, + table_key: delete_table_item.key.to_string(), + table_handle: standardize_address(&delete_table_item.handle.to_string()), + decoded_key: delete_table_item.data.as_ref().unwrap().key.clone(), + decoded_value: None, + is_deleted: true, + block_timestamp, + }, + CurrentTableItem { + table_handle: standardize_address(&delete_table_item.handle.to_string()), + key_hash: hash_str(&delete_table_item.key.to_string()), + key: delete_table_item.key.to_string(), + decoded_key: serde_json::from_str( + delete_table_item.data.as_ref().unwrap().key.as_str(), + ) + .unwrap(), + decoded_value: None, + last_transaction_version: txn_version, + is_deleted: true, + }, + ) + } +} + +impl TableMetadata { + pub fn from_write_table_item(table_item: &WriteTableItem) -> Self { + Self { + handle: table_item.handle.to_string(), + key_type: table_item.data.as_ref().unwrap().key_type.clone(), + value_type: table_item.data.as_ref().unwrap().value_type.clone(), + } + } +} diff --git a/rust/processor/src/db/common/models/default_models/parquet_transactions.rs b/rust/processor/src/db/common/models/default_models/parquet_transactions.rs new file mode 100644 index 000000000..e4b9d13f0 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_transactions.rs @@ -0,0 +1,381 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + block_metadata_transactions::BlockMetadataTransaction, + parquet_write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, +}; +use crate::{ + parquet_processors::generic_parquet_processor::{HasVersion, NamedTable}, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{get_clean_payload, get_clean_writeset, get_payload_type, standardize_address}, + }, +}; +use ahash::AHashMap; +use allocative_derive::Allocative; +use aptos_protos::transaction::v1::{ + transaction::{TransactionType, TxnData}, + Transaction as TransactionPB, TransactionInfo, +}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive(Allocative, Clone, Debug, Deserialize, FieldCount, Serialize, ParquetRecordWriter)] +pub struct Transaction { + pub txn_version: i64, + pub block_height: i64, + pub epoch: i64, + pub txn_type: String, + pub payload: Option, + pub payload_type: Option, + pub gas_used: u64, + pub success: bool, + pub vm_status: String, + pub num_events: i64, + pub num_write_set_changes: i64, + pub txn_hash: String, + pub state_change_hash: String, + pub event_root_hash: String, + pub state_checkpoint_hash: Option, + pub accumulator_root_hash: String, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, +} + +impl NamedTable for Transaction { + const TABLE_NAME: &'static str = "transactions"; +} + +impl HasVersion for Transaction { + fn version(&self) -> i64 { + self.txn_version + } +} + +impl Default for Transaction { + fn default() -> Self { + Self { + txn_version: 0, + block_height: 0, + txn_hash: "".to_string(), + txn_type: "".to_string(), + payload: None, + state_change_hash: "".to_string(), + event_root_hash: "".to_string(), + state_checkpoint_hash: None, + gas_used: 0, + success: true, + vm_status: "".to_string(), + accumulator_root_hash: "".to_string(), + num_events: 0, + num_write_set_changes: 0, + epoch: 0, + payload_type: None, + #[allow(deprecated)] + block_timestamp: chrono::NaiveDateTime::from_timestamp(0, 0), + } + } +} + +impl Transaction { + fn from_transaction_info( + info: &TransactionInfo, + txn_version: i64, + epoch: i64, + block_height: i64, + ) -> Self { + Self { + txn_version, + block_height, + txn_hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), + state_change_hash: standardize_address( + hex::encode(info.state_change_hash.as_slice()).as_str(), + ), + event_root_hash: standardize_address( + hex::encode(info.event_root_hash.as_slice()).as_str(), + ), + state_checkpoint_hash: info + .state_checkpoint_hash + .as_ref() + .map(|hash| standardize_address(hex::encode(hash).as_str())), + gas_used: info.gas_used, + success: info.success, + vm_status: info.vm_status.clone(), + accumulator_root_hash: standardize_address( + hex::encode(info.accumulator_root_hash.as_slice()).as_str(), + ), + num_write_set_changes: info.changes.len() as i64, + epoch, + ..Default::default() + } + } + + fn from_transaction_info_with_data( + info: &TransactionInfo, + payload: Option, + payload_type: Option, + txn_version: i64, + txn_type: String, + num_events: i64, + block_height: i64, + epoch: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> Self { + Self { + txn_type, + payload, + txn_version, + block_height, + txn_hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), + state_change_hash: standardize_address( + hex::encode(info.state_change_hash.as_slice()).as_str(), + ), + event_root_hash: standardize_address( + hex::encode(info.event_root_hash.as_slice()).as_str(), + ), + state_checkpoint_hash: info + .state_checkpoint_hash + .as_ref() + .map(|hash| standardize_address(hex::encode(hash).as_str())), + gas_used: info.gas_used, + success: info.success, + vm_status: info.vm_status.clone(), + accumulator_root_hash: standardize_address( + hex::encode(info.accumulator_root_hash.as_slice()).as_str(), + ), + num_events, + num_write_set_changes: info.changes.len() as i64, + epoch, + payload_type, + block_timestamp, + } + } + + pub fn from_transaction( + transaction: &TransactionPB, + ) -> ( + Self, + Option, + Vec, + Vec, + ) { + let block_height = transaction.block_height as i64; + let epoch = transaction.epoch as i64; + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + let txn_data = match transaction.txn_data.as_ref() { + Some(txn_data) => txn_data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["Transaction"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + let transaction_out = Self::from_transaction_info( + transaction_info, + transaction.version as i64, + epoch, + block_height, + ); + return (transaction_out, None, Vec::new(), Vec::new()); + }, + }; + let txn_version = transaction.version as i64; + let transaction_type = TransactionType::try_from(transaction.r#type) + .expect("Transaction type doesn't exist!") + .as_str_name() + .to_string(); + let timestamp = transaction + .timestamp + .as_ref() + .expect("Transaction timestamp doesn't exist!"); + #[allow(deprecated)] + let block_timestamp = chrono::NaiveDateTime::from_timestamp_opt(timestamp.seconds, 0) + .expect("Txn Timestamp is invalid!"); + match txn_data { + TxnData::User(user_txn) => { + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + txn_version, + block_height, + block_timestamp, + ); + let payload = user_txn + .request + .as_ref() + .expect("Getting user request failed.") + .payload + .as_ref() + .expect("Getting payload failed."); + let payload_cleaned = get_clean_payload(payload, txn_version); + let payload_type = get_payload_type(payload); + + // let serialized_payload = serde_json::to_string(&payload_cleaned).unwrap(); // Handle errors as needed) + let serialized_payload = + payload_cleaned.map(|payload| canonical_json::to_string(&payload).unwrap()); + ( + Self::from_transaction_info_with_data( + transaction_info, + serialized_payload, + Some(payload_type), + txn_version, + transaction_type, + user_txn.events.len() as i64, + block_height, + epoch, + block_timestamp, + ), + None, + wsc, + wsc_detail, + ) + }, + TxnData::Genesis(genesis_txn) => { + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + txn_version, + block_height, + block_timestamp, + ); + let payload = genesis_txn.payload.as_ref().unwrap(); + let payload_cleaned = get_clean_writeset(payload, txn_version); + // It's genesis so no big deal + // let serialized_payload = serde_json::to_string(&payload_cleaned).unwrap(); // Handle errors as needed + let serialized_payload = + payload_cleaned.map(|payload| canonical_json::to_string(&payload).unwrap()); + + let payload_type = None; + ( + Self::from_transaction_info_with_data( + transaction_info, + serialized_payload, + payload_type, + txn_version, + transaction_type, + genesis_txn.events.len() as i64, + block_height, + epoch, + block_timestamp, + ), + None, + wsc, + wsc_detail, + ) + }, + TxnData::BlockMetadata(block_metadata_txn) => { + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + txn_version, + block_height, + block_timestamp, + ); + ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + block_metadata_txn.events.len() as i64, + block_height, + epoch, + block_timestamp, + ), + Some(BlockMetadataTransaction::from_transaction( + block_metadata_txn, + txn_version, + block_height, + epoch, + timestamp, + )), + wsc, + wsc_detail, + ) + }, + TxnData::StateCheckpoint(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + 0, + block_height, + epoch, + block_timestamp, + ), + None, + vec![], + vec![], + ), + TxnData::Validator(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + txn_version, + transaction_type, + 0, + block_height, + epoch, + block_timestamp, + ), + None, + vec![], + vec![], + ), + } + } + + pub fn from_transactions( + transactions: &[TransactionPB], + transaction_version_to_struct_count: &mut AHashMap, + ) -> ( + Vec, + Vec, + Vec, + Vec, + ) { + let mut txns = vec![]; + let mut block_metadata_txns = vec![]; + let mut wscs = vec![]; + let mut wsc_details = vec![]; + + for txn in transactions { + let (txn, block_metadata, mut wsc_list, mut wsc_detail_list) = + Self::from_transaction(txn); + txns.push(txn.clone()); + transaction_version_to_struct_count + .entry(txn.txn_version) + .and_modify(|e| *e += 1) + .or_insert(1); + + if let Some(a) = block_metadata { + block_metadata_txns.push(a.clone()); + // transaction_version_to_struct_count.entry(a.version).and_modify(|e| *e += 1); + } + wscs.append(&mut wsc_list); + + if !wsc_list.is_empty() { + transaction_version_to_struct_count + .entry(wsc_list[0].txn_version) + .and_modify(|e| *e += wsc_list.len() as i64); + } + wsc_details.append(&mut wsc_detail_list); + } + (txns, block_metadata_txns, wscs, wsc_details) + } +} + +// Prevent conflicts with other things named `Transaction` +pub type TransactionModel = Transaction; diff --git a/rust/processor/src/db/common/models/default_models/parquet_write_set_changes.rs b/rust/processor/src/db/common/models/default_models/parquet_write_set_changes.rs new file mode 100644 index 000000000..2e5a70273 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/parquet_write_set_changes.rs @@ -0,0 +1,254 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use super::{ + move_modules::MoveModule, + parquet_move_resources::MoveResource, + parquet_move_tables::{CurrentTableItem, TableItem, TableMetadata}, +}; +use crate::{ + parquet_processors::generic_parquet_processor::{HasVersion, NamedTable}, + utils::util::standardize_address, +}; +use allocative_derive::Allocative; +use aptos_protos::transaction::v1::{ + write_set_change::{Change as WriteSetChangeEnum, Type as WriteSetChangeTypeEnum}, + WriteSetChange as WriteSetChangePB, +}; +use field_count::FieldCount; +use parquet_derive::ParquetRecordWriter; +use serde::{Deserialize, Serialize}; + +#[derive(Allocative, Clone, Debug, Deserialize, FieldCount, Serialize, ParquetRecordWriter)] +pub struct WriteSetChange { + pub txn_version: i64, + pub write_set_change_index: i64, + pub state_key_hash: String, + pub change_type: String, + pub resource_address: String, + pub block_height: i64, + #[allocative(skip)] + pub block_timestamp: chrono::NaiveDateTime, +} + +impl NamedTable for WriteSetChange { + const TABLE_NAME: &'static str = "write_set_changes"; +} + +impl HasVersion for WriteSetChange { + fn version(&self) -> i64 { + self.txn_version + } +} + +impl Default for WriteSetChange { + fn default() -> Self { + Self { + txn_version: 0, + write_set_change_index: 0, + state_key_hash: "".to_string(), + change_type: "".to_string(), + resource_address: "".to_string(), + block_height: 0, + #[allow(deprecated)] + block_timestamp: chrono::NaiveDateTime::from_timestamp(0, 0), + } + } +} + +impl WriteSetChange { + pub fn from_write_set_change( + write_set_change: &WriteSetChangePB, + write_set_change_index: i64, + txn_version: i64, + block_height: i64, + block_timestamp: chrono::NaiveDateTime, + ) -> (Self, WriteSetChangeDetail) { + let change_type = Self::get_write_set_change_type(write_set_change); + let change = write_set_change + .change + .as_ref() + .expect("WriteSetChange must have a change"); + match change { + WriteSetChangeEnum::WriteModule(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Module(MoveModule::from_write_module( + inner, + write_set_change_index, + txn_version, + block_height, + )), + ), + WriteSetChangeEnum::DeleteModule(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Module(MoveModule::from_delete_module( + inner, + write_set_change_index, + txn_version, + block_height, + )), + ), + WriteSetChangeEnum::WriteResource(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Resource(MoveResource::from_write_resource( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + )), + ), + WriteSetChangeEnum::DeleteResource(inner) => ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: standardize_address(&inner.address.to_string()), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Resource(MoveResource::from_delete_resource( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + )), + ), + WriteSetChangeEnum::WriteTableItem(inner) => { + let (ti, cti) = TableItem::from_write_table_item( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + ); + ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: String::default(), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Table( + ti, + cti, + Some(TableMetadata::from_write_table_item(inner)), + ), + ) + }, + WriteSetChangeEnum::DeleteTableItem(inner) => { + let (ti, cti) = TableItem::from_delete_table_item( + inner, + write_set_change_index, + txn_version, + block_height, + block_timestamp, + ); + ( + Self { + txn_version, + state_key_hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + block_height, + change_type, + resource_address: String::default(), + write_set_change_index, + block_timestamp, + }, + WriteSetChangeDetail::Table(ti, cti, None), + ) + }, + } + } + + pub fn from_write_set_changes( + write_set_changes: &[WriteSetChangePB], + txn_version: i64, + block_height: i64, + timestamp: chrono::NaiveDateTime, + ) -> (Vec, Vec) { + write_set_changes + .iter() + .enumerate() + .map(|(write_set_change_index, write_set_change)| { + Self::from_write_set_change( + write_set_change, + write_set_change_index as i64, + txn_version, + block_height, + timestamp, + ) + }) + .collect::>() + .into_iter() + .unzip() + } + + fn get_write_set_change_type(t: &WriteSetChangePB) -> String { + match WriteSetChangeTypeEnum::try_from(t.r#type) + .expect("WriteSetChange must have a valid type.") + { + WriteSetChangeTypeEnum::DeleteModule => "delete_module".to_string(), + WriteSetChangeTypeEnum::DeleteResource => "delete_resource".to_string(), + WriteSetChangeTypeEnum::DeleteTableItem => "delete_table_item".to_string(), + WriteSetChangeTypeEnum::WriteModule => "write_module".to_string(), + WriteSetChangeTypeEnum::WriteResource => "write_resource".to_string(), + WriteSetChangeTypeEnum::WriteTableItem => "write_table_item".to_string(), + WriteSetChangeTypeEnum::Unspecified => { + panic!("WriteSetChange type must be specified.") + }, + } + } +} + +#[derive(Deserialize, Serialize)] +pub enum WriteSetChangeDetail { + Module(MoveModule), + Resource(MoveResource), + Table(TableItem, CurrentTableItem, Option), +} + +// Prevent conflicts with other things named `WriteSetChange` +pub type WriteSetChangeModel = WriteSetChange; diff --git a/rust/processor/src/db/common/models/default_models/transactions.rs b/rust/processor/src/db/common/models/default_models/transactions.rs new file mode 100644 index 000000000..11e1ccc14 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/transactions.rs @@ -0,0 +1,341 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + block_metadata_transactions::BlockMetadataTransaction, + write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, +}; +use crate::{ + schema::transactions, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{ + get_clean_payload, get_clean_writeset, get_payload_type, standardize_address, + u64_to_bigdecimal, + }, + }, +}; +use aptos_protos::transaction::v1::{ + transaction::{TransactionType, TxnData}, + Transaction as TransactionPB, TransactionInfo, +}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(version))] +#[diesel(table_name = transactions)] +pub struct Transaction { + pub version: i64, + pub block_height: i64, + pub hash: String, + pub type_: String, + pub payload: Option, + pub state_change_hash: String, + pub event_root_hash: String, + pub state_checkpoint_hash: Option, + pub gas_used: BigDecimal, + pub success: bool, + pub vm_status: String, + pub accumulator_root_hash: String, + pub num_events: i64, + pub num_write_set_changes: i64, + pub epoch: i64, + pub payload_type: Option, +} + +impl Default for Transaction { + fn default() -> Self { + Self { + version: 0, + block_height: 0, + hash: "".to_string(), + type_: "".to_string(), + payload: None, + state_change_hash: "".to_string(), + event_root_hash: "".to_string(), + state_checkpoint_hash: None, + gas_used: BigDecimal::from(0), + success: true, + vm_status: "".to_string(), + accumulator_root_hash: "".to_string(), + num_events: 0, + num_write_set_changes: 0, + epoch: 0, + payload_type: None, + } + } +} + +impl Transaction { + fn from_transaction_info( + info: &TransactionInfo, + version: i64, + epoch: i64, + block_height: i64, + ) -> Self { + Self { + version, + block_height, + hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), + state_change_hash: standardize_address( + hex::encode(info.state_change_hash.as_slice()).as_str(), + ), + event_root_hash: standardize_address( + hex::encode(info.event_root_hash.as_slice()).as_str(), + ), + state_checkpoint_hash: info + .state_checkpoint_hash + .as_ref() + .map(|hash| standardize_address(hex::encode(hash).as_str())), + gas_used: u64_to_bigdecimal(info.gas_used), + success: info.success, + vm_status: info.vm_status.clone(), + accumulator_root_hash: standardize_address( + hex::encode(info.accumulator_root_hash.as_slice()).as_str(), + ), + num_write_set_changes: info.changes.len() as i64, + epoch, + ..Default::default() + } + } + + fn from_transaction_info_with_data( + info: &TransactionInfo, + payload: Option, + payload_type: Option, + version: i64, + type_: String, + num_events: i64, + block_height: i64, + epoch: i64, + ) -> Self { + Self { + type_, + payload, + version, + block_height, + hash: standardize_address(hex::encode(info.hash.as_slice()).as_str()), + state_change_hash: standardize_address( + hex::encode(info.state_change_hash.as_slice()).as_str(), + ), + event_root_hash: standardize_address( + hex::encode(info.event_root_hash.as_slice()).as_str(), + ), + state_checkpoint_hash: info + .state_checkpoint_hash + .as_ref() + .map(|hash| standardize_address(hex::encode(hash).as_str())), + gas_used: u64_to_bigdecimal(info.gas_used), + success: info.success, + vm_status: info.vm_status.clone(), + accumulator_root_hash: standardize_address( + hex::encode(info.accumulator_root_hash.as_slice()).as_str(), + ), + num_events, + num_write_set_changes: info.changes.len() as i64, + epoch, + payload_type, + } + } + + pub fn from_transaction( + transaction: &TransactionPB, + ) -> ( + Self, + Option, + Vec, + Vec, + ) { + let block_height = transaction.block_height as i64; + let epoch = transaction.epoch as i64; + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + let txn_data = match transaction.txn_data.as_ref() { + Some(txn_data) => txn_data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["Transaction"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + let transaction_out = Self::from_transaction_info( + transaction_info, + transaction.version as i64, + epoch, + block_height, + ); + return (transaction_out, None, Vec::new(), Vec::new()); + }, + }; + let version = transaction.version as i64; + let transaction_type = TransactionType::try_from(transaction.r#type) + .expect("Transaction type doesn't exist!") + .as_str_name() + .to_string(); + let timestamp = transaction + .timestamp + .as_ref() + .expect("Transaction timestamp doesn't exist!"); + + let (wsc, wsc_detail) = WriteSetChangeModel::from_write_set_changes( + &transaction_info.changes, + version, + block_height, + ); + + match txn_data { + TxnData::User(user_txn) => { + let payload = user_txn + .request + .as_ref() + .expect("Getting user request failed.") + .payload + .as_ref() + .expect("Getting payload failed."); + let payload_cleaned = get_clean_payload(payload, version); + let payload_type = get_payload_type(payload); + ( + Self::from_transaction_info_with_data( + transaction_info, + payload_cleaned, + Some(payload_type), + version, + transaction_type, + user_txn.events.len() as i64, + block_height, + epoch, + ), + None, + wsc, + wsc_detail, + ) + }, + TxnData::Genesis(genesis_txn) => { + let payload = genesis_txn.payload.as_ref().unwrap(); + let payload_cleaned = get_clean_writeset(payload, version); + // It's genesis so no big deal + let payload_type = None; + ( + Self::from_transaction_info_with_data( + transaction_info, + payload_cleaned, + payload_type, + version, + transaction_type, + genesis_txn.events.len() as i64, + block_height, + epoch, + ), + None, + wsc, + wsc_detail, + ) + }, + TxnData::BlockMetadata(block_metadata_txn) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + version, + transaction_type, + block_metadata_txn.events.len() as i64, + block_height, + epoch, + ), + Some(BlockMetadataTransaction::from_transaction( + block_metadata_txn, + version, + block_height, + epoch, + timestamp, + )), + wsc, + wsc_detail, + ), + TxnData::StateCheckpoint(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + version, + transaction_type, + 0, + block_height, + epoch, + ), + None, + vec![], + vec![], + ), + TxnData::Validator(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + version, + transaction_type, + 0, + block_height, + epoch, + ), + None, + vec![], + vec![], + ), + TxnData::BlockEpilogue(_) => ( + Self::from_transaction_info_with_data( + transaction_info, + None, + None, + version, + transaction_type, + 0, + block_height, + epoch, + ), + None, + vec![], + vec![], + ), + } + } + + pub fn from_transactions( + transactions: &[TransactionPB], + ) -> ( + Vec, + Vec, + Vec, + Vec, + ) { + let mut txns = vec![]; + let mut block_metadata_txns = vec![]; + let mut wscs = vec![]; + let mut wsc_details = vec![]; + + for txn in transactions { + let (txn, block_metadata, mut wsc_list, mut wsc_detail_list) = + Self::from_transaction(txn); + txns.push(txn); + if let Some(a) = block_metadata { + block_metadata_txns.push(a); + } + wscs.append(&mut wsc_list); + wsc_details.append(&mut wsc_detail_list); + } + (txns, block_metadata_txns, wscs, wsc_details) + } +} + +// Prevent conflicts with other things named `Transaction` +pub type TransactionModel = Transaction; diff --git a/rust/processor/src/db/common/models/default_models/write_set_changes.rs b/rust/processor/src/db/common/models/default_models/write_set_changes.rs new file mode 100644 index 000000000..c28a97c51 --- /dev/null +++ b/rust/processor/src/db/common/models/default_models/write_set_changes.rs @@ -0,0 +1,216 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use super::{ + move_modules::MoveModule, + move_resources::MoveResource, + move_tables::{CurrentTableItem, TableItem, TableMetadata}, + transactions::Transaction, +}; +use crate::{schema::write_set_changes, utils::util::standardize_address}; +use aptos_protos::transaction::v1::{ + write_set_change::{Change as WriteSetChangeEnum, Type as WriteSetChangeTypeEnum}, + WriteSetChange as WriteSetChangePB, +}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Associations, Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, +)] +#[diesel(belongs_to(Transaction, foreign_key = transaction_version))] +#[diesel(primary_key(transaction_version, index))] +#[diesel(table_name = write_set_changes)] +pub struct WriteSetChange { + pub transaction_version: i64, + pub index: i64, + pub hash: String, + transaction_block_height: i64, + pub type_: String, + pub address: String, +} + +impl WriteSetChange { + pub fn from_write_set_change( + write_set_change: &WriteSetChangePB, + index: i64, + transaction_version: i64, + transaction_block_height: i64, + ) -> (Self, WriteSetChangeDetail) { + let type_ = Self::get_write_set_change_type(write_set_change); + let change = write_set_change + .change + .as_ref() + .expect("WriteSetChange must have a change"); + + match change { + WriteSetChangeEnum::WriteModule(inner) => ( + Self { + transaction_version, + hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + transaction_block_height, + type_, + address: standardize_address(&inner.address.to_string()), + index, + }, + WriteSetChangeDetail::Module(MoveModule::from_write_module( + inner, + index, + transaction_version, + transaction_block_height, + )), + ), + WriteSetChangeEnum::DeleteModule(inner) => ( + Self { + transaction_version, + hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + transaction_block_height, + type_, + address: standardize_address(&inner.address.to_string()), + index, + }, + WriteSetChangeDetail::Module(MoveModule::from_delete_module( + inner, + index, + transaction_version, + transaction_block_height, + )), + ), + WriteSetChangeEnum::WriteResource(inner) => ( + Self { + transaction_version, + hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + transaction_block_height, + type_, + address: standardize_address(&inner.address.to_string()), + index, + }, + WriteSetChangeDetail::Resource(MoveResource::from_write_resource( + inner, + index, + transaction_version, + transaction_block_height, + )), + ), + WriteSetChangeEnum::DeleteResource(inner) => ( + Self { + transaction_version, + hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + transaction_block_height, + type_, + address: standardize_address(&inner.address.to_string()), + index, + }, + WriteSetChangeDetail::Resource(MoveResource::from_delete_resource( + inner, + index, + transaction_version, + transaction_block_height, + )), + ), + WriteSetChangeEnum::WriteTableItem(inner) => { + let (ti, cti) = TableItem::from_write_table_item( + inner, + index, + transaction_version, + transaction_block_height, + ); + ( + Self { + transaction_version, + hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + transaction_block_height, + type_, + address: String::default(), + index, + }, + WriteSetChangeDetail::Table( + ti, + cti, + Some(TableMetadata::from_write_table_item(inner)), + ), + ) + }, + WriteSetChangeEnum::DeleteTableItem(inner) => { + let (ti, cti) = TableItem::from_delete_table_item( + inner, + index, + transaction_version, + transaction_block_height, + ); + ( + Self { + transaction_version, + hash: standardize_address( + hex::encode(inner.state_key_hash.as_slice()).as_str(), + ), + transaction_block_height, + type_, + address: String::default(), + index, + }, + WriteSetChangeDetail::Table(ti, cti, None), + ) + }, + } + } + + pub fn from_write_set_changes( + write_set_changes: &[WriteSetChangePB], + transaction_version: i64, + transaction_block_height: i64, + ) -> (Vec, Vec) { + write_set_changes + .iter() + .enumerate() + .map(|(index, write_set_change)| { + Self::from_write_set_change( + write_set_change, + index as i64, + transaction_version, + transaction_block_height, + ) + }) + .collect::>() + .into_iter() + .unzip() + } + + fn get_write_set_change_type(t: &WriteSetChangePB) -> String { + match WriteSetChangeTypeEnum::try_from(t.r#type) + .expect("WriteSetChange must have a valid type.") + { + WriteSetChangeTypeEnum::DeleteModule => "delete_module".to_string(), + WriteSetChangeTypeEnum::DeleteResource => "delete_resource".to_string(), + WriteSetChangeTypeEnum::DeleteTableItem => "delete_table_item".to_string(), + WriteSetChangeTypeEnum::WriteModule => "write_module".to_string(), + WriteSetChangeTypeEnum::WriteResource => "write_resource".to_string(), + WriteSetChangeTypeEnum::WriteTableItem => "write_table_item".to_string(), + WriteSetChangeTypeEnum::Unspecified => { + panic!("WriteSetChange type must be specified.") + }, + } + } +} + +#[derive(Deserialize, Serialize)] +pub enum WriteSetChangeDetail { + Module(MoveModule), + Resource(MoveResource), + Table(TableItem, CurrentTableItem, Option), +} + +// Prevent conflicts with other things named `WriteSetChange` +pub type WriteSetChangeModel = WriteSetChange; diff --git a/rust/processor/src/db/common/models/events_models/events.rs b/rust/processor/src/db/common/models/events_models/events.rs new file mode 100644 index 000000000..6747636ab --- /dev/null +++ b/rust/processor/src/db/common/models/events_models/events.rs @@ -0,0 +1,76 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + schema::events, + utils::util::{standardize_address, truncate_str}, +}; +use aptos_protos::transaction::v1::Event as EventPB; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// p99 currently is 303 so using 300 as a safe max length +const EVENT_TYPE_MAX_LENGTH: usize = 300; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, event_index))] +#[diesel(table_name = events)] +pub struct Event { + pub sequence_number: i64, + pub creation_number: i64, + pub account_address: String, + pub transaction_version: i64, + pub transaction_block_height: i64, + pub type_: String, + pub data: serde_json::Value, + pub event_index: i64, + pub indexed_type: String, +} + +impl Event { + pub fn from_event( + event: &EventPB, + transaction_version: i64, + transaction_block_height: i64, + event_index: i64, + ) -> Self { + let t: &str = event.type_str.as_ref(); + Event { + account_address: standardize_address( + event.key.as_ref().unwrap().account_address.as_str(), + ), + creation_number: event.key.as_ref().unwrap().creation_number as i64, + sequence_number: event.sequence_number as i64, + transaction_version, + transaction_block_height, + type_: t.to_string(), + data: serde_json::from_str(event.data.as_str()).unwrap(), + event_index, + indexed_type: truncate_str(t, EVENT_TYPE_MAX_LENGTH), + } + } + + pub fn from_events( + events: &[EventPB], + transaction_version: i64, + transaction_block_height: i64, + ) -> Vec { + events + .iter() + .enumerate() + .map(|(index, event)| { + Self::from_event( + event, + transaction_version, + transaction_block_height, + index as i64, + ) + }) + .collect::>() + } +} + +// Prevent conflicts with other things named `Event` +pub type EventModel = Event; diff --git a/rust/processor/src/db/common/models/events_models/mod.rs b/rust/processor/src/db/common/models/events_models/mod.rs new file mode 100644 index 000000000..9d363699e --- /dev/null +++ b/rust/processor/src/db/common/models/events_models/mod.rs @@ -0,0 +1,4 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod events; diff --git a/rust/processor/src/db/common/models/fungible_asset_models/mod.rs b/rust/processor/src/db/common/models/fungible_asset_models/mod.rs new file mode 100644 index 000000000..d1b93659d --- /dev/null +++ b/rust/processor/src/db/common/models/fungible_asset_models/mod.rs @@ -0,0 +1,7 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod v2_fungible_asset_activities; +pub mod v2_fungible_asset_balances; +pub mod v2_fungible_asset_utils; +pub mod v2_fungible_metadata; diff --git a/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_activities.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_activities.rs new file mode 100644 index 000000000..c4ee7880b --- /dev/null +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_activities.rs @@ -0,0 +1,214 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_fungible_asset_utils::{FeeStatement, FungibleAssetEvent}; +use crate::{ + db::common::models::{ + coin_models::{ + coin_activities::CoinActivity, + coin_utils::{CoinEvent, CoinInfoType, EventGuidResource}, + }, + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_v2_models::v2_token_utils::TokenStandard, + }, + schema::fungible_asset_activities, + utils::util::standardize_address, +}; +use ahash::AHashMap; +use anyhow::Context; +use aptos_protos::transaction::v1::{Event, TransactionInfo, UserTransactionRequest}; +use bigdecimal::{BigDecimal, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +pub const GAS_FEE_EVENT: &str = "0x1::aptos_coin::GasFeeEvent"; +// We will never have a negative number on chain so this will avoid collision in postgres +pub const BURN_GAS_EVENT_CREATION_NUM: i64 = -1; +pub const BURN_GAS_EVENT_INDEX: i64 = -1; + +pub type OwnerAddress = String; +pub type CoinType = String; +// Primary key of the current_coin_balances table, i.e. (owner_address, coin_type) +pub type CurrentCoinBalancePK = (OwnerAddress, CoinType); +pub type EventToCoinType = AHashMap; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, event_index))] +#[diesel(table_name = fungible_asset_activities)] +pub struct FungibleAssetActivity { + pub transaction_version: i64, + pub event_index: i64, + pub owner_address: String, + pub storage_id: String, + pub asset_type: String, + pub is_frozen: Option, + pub amount: Option, + pub type_: String, + pub is_gas_fee: bool, + pub gas_fee_payer_address: Option, + pub is_transaction_success: bool, + pub entry_function_id_str: Option, + pub block_height: i64, + pub token_standard: String, + pub transaction_timestamp: chrono::NaiveDateTime, + pub storage_refund_amount: BigDecimal, +} + +impl FungibleAssetActivity { + pub async fn get_v2_from_event( + event: &Event, + txn_version: i64, + block_height: i64, + txn_timestamp: chrono::NaiveDateTime, + event_index: i64, + entry_function_id_str: &Option, + object_aggregated_data_mapping: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + let event_type = event.type_str.clone(); + if let Some(fa_event) = + &FungibleAssetEvent::from_event(event_type.as_str(), &event.data, txn_version)? + { + let storage_id = standardize_address(&event.key.as_ref().unwrap().account_address); + + // The event account address will also help us find fungible store which tells us where to find + // the metadata + if let Some(object_metadata) = object_aggregated_data_mapping.get(&storage_id) { + let object_core = &object_metadata.object.object_core; + let fungible_asset = object_metadata.fungible_asset_store.as_ref().unwrap(); + let asset_type = fungible_asset.metadata.get_reference_address(); + + let (is_frozen, amount) = match fa_event { + FungibleAssetEvent::WithdrawEvent(inner) => (None, Some(inner.amount.clone())), + FungibleAssetEvent::DepositEvent(inner) => (None, Some(inner.amount.clone())), + FungibleAssetEvent::FrozenEvent(inner) => (Some(inner.frozen), None), + }; + + return Ok(Some(Self { + transaction_version: txn_version, + event_index, + owner_address: object_core.get_owner_address(), + storage_id: storage_id.clone(), + asset_type: asset_type.clone(), + is_frozen, + amount, + type_: event_type.clone(), + is_gas_fee: false, + gas_fee_payer_address: None, + is_transaction_success: true, + entry_function_id_str: entry_function_id_str.clone(), + block_height, + token_standard: TokenStandard::V2.to_string(), + transaction_timestamp: txn_timestamp, + storage_refund_amount: BigDecimal::zero(), + })); + } + } + Ok(None) + } + + pub fn get_v1_from_event( + event: &Event, + txn_version: i64, + block_height: i64, + transaction_timestamp: chrono::NaiveDateTime, + entry_function_id_str: &Option, + event_to_coin_type: &EventToCoinType, + event_index: i64, + ) -> anyhow::Result> { + if let Some(inner) = + CoinEvent::from_event(event.type_str.as_str(), &event.data, txn_version)? + { + let amount = match inner { + CoinEvent::WithdrawCoinEvent(inner) => inner.amount, + CoinEvent::DepositCoinEvent(inner) => inner.amount, + }; + let event_key = event.key.as_ref().context("event must have a key")?; + let event_move_guid = EventGuidResource { + addr: standardize_address(event_key.account_address.as_str()), + creation_num: event_key.creation_number as i64, + }; + // Given this mapping only contains coin type < 1000 length, we should not assume that the mapping exists. + // If it doesn't exist, skip. + let coin_type = match event_to_coin_type.get(&event_move_guid) { + Some(coin_type) => coin_type.clone(), + None => { + tracing::warn!( + "Could not find event in resources (CoinStore), version: {}, event guid: {:?}, mapping: {:?}", + txn_version, event_move_guid, event_to_coin_type + ); + return Ok(None); + }, + }; + let storage_id = + CoinInfoType::get_storage_id(coin_type.as_str(), event_move_guid.addr.as_str()); + Ok(Some(Self { + transaction_version: txn_version, + event_index, + owner_address: event_move_guid.addr, + storage_id, + asset_type: coin_type, + is_frozen: None, + amount: Some(amount), + type_: event.type_str.clone(), + is_gas_fee: false, + gas_fee_payer_address: None, + is_transaction_success: true, + entry_function_id_str: entry_function_id_str.clone(), + block_height, + token_standard: TokenStandard::V1.to_string(), + transaction_timestamp, + storage_refund_amount: BigDecimal::zero(), + })) + } else { + Ok(None) + } + } + + /// Artificially creates a gas event. If it's a fee payer, still show gas event to the sender + /// but with an extra field to indicate the fee payer. + pub fn get_gas_event( + txn_info: &TransactionInfo, + user_transaction_request: &UserTransactionRequest, + entry_function_id_str: &Option, + transaction_version: i64, + transaction_timestamp: chrono::NaiveDateTime, + block_height: i64, + fee_statement: Option, + ) -> Self { + let v1_activity = CoinActivity::get_gas_event( + txn_info, + user_transaction_request, + entry_function_id_str, + transaction_version, + transaction_timestamp, + block_height, + fee_statement, + ); + let storage_id = CoinInfoType::get_storage_id( + v1_activity.coin_type.as_str(), + v1_activity.owner_address.as_str(), + ); + Self { + transaction_version, + event_index: v1_activity.event_index.unwrap(), + owner_address: v1_activity.owner_address, + storage_id, + asset_type: v1_activity.coin_type, + is_frozen: None, + amount: Some(v1_activity.amount), + type_: v1_activity.activity_type, + is_gas_fee: v1_activity.is_gas_fee, + gas_fee_payer_address: v1_activity.gas_fee_payer_address, + is_transaction_success: v1_activity.is_transaction_success, + entry_function_id_str: v1_activity.entry_function_id_str, + block_height, + token_standard: TokenStandard::V1.to_string(), + transaction_timestamp, + storage_refund_amount: v1_activity.storage_refund_amount, + } + } +} diff --git a/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_balances.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_balances.rs new file mode 100644 index 000000000..33819b470 --- /dev/null +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_balances.rs @@ -0,0 +1,383 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + v2_fungible_asset_activities::EventToCoinType, v2_fungible_asset_utils::FungibleAssetStore, +}; +use crate::{ + db::common::models::{ + coin_models::coin_utils::{CoinInfoType, CoinResource}, + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_v2_models::v2_token_utils::{TokenStandard, V2_STANDARD}, + }, + schema::{ + current_fungible_asset_balances, current_unified_fungible_asset_balances_to_be_renamed, + fungible_asset_balances, + }, + utils::util::{ + hex_to_raw_bytes, sha3_256, standardize_address, APTOS_COIN_TYPE_STR, + APT_METADATA_ADDRESS_HEX, APT_METADATA_ADDRESS_RAW, + }, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; +use bigdecimal::{BigDecimal, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; + +// Storage id +pub type CurrentFungibleAssetBalancePK = String; +pub type CurrentFungibleAssetMapping = + AHashMap; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = fungible_asset_balances)] +pub struct FungibleAssetBalance { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub storage_id: String, + pub owner_address: String, + pub asset_type: String, + pub is_primary: bool, + pub is_frozen: bool, + pub amount: BigDecimal, + pub transaction_timestamp: chrono::NaiveDateTime, + pub token_standard: String, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(storage_id))] +#[diesel(table_name = current_fungible_asset_balances)] +pub struct CurrentFungibleAssetBalance { + pub storage_id: String, + pub owner_address: String, + pub asset_type: String, + pub is_primary: bool, + pub is_frozen: bool, + pub amount: BigDecimal, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub token_standard: String, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, Default)] +#[diesel(primary_key(storage_id))] +#[diesel(table_name = current_unified_fungible_asset_balances_to_be_renamed)] +#[diesel(treat_none_as_null = true)] +pub struct CurrentUnifiedFungibleAssetBalance { + pub storage_id: String, + pub owner_address: String, + // metadata address for (paired) Fungible Asset + pub asset_type_v1: Option, + pub asset_type_v2: Option, + pub is_primary: Option, + pub is_frozen: bool, + pub amount_v1: Option, + pub amount_v2: Option, + pub last_transaction_version_v1: Option, + pub last_transaction_version_v2: Option, + pub last_transaction_timestamp_v1: Option, + pub last_transaction_timestamp_v2: Option, +} + +fn get_paired_metadata_address(coin_type_name: &str) -> String { + if coin_type_name == APTOS_COIN_TYPE_STR { + APT_METADATA_ADDRESS_HEX.clone() + } else { + let mut preimage = APT_METADATA_ADDRESS_RAW.to_vec(); + preimage.extend(coin_type_name.as_bytes()); + preimage.push(0xFE); + format!("0x{}", hex::encode(sha3_256(&preimage))) + } +} + +fn get_primary_fungible_store_address( + owner_address: &str, + metadata_address: &str, +) -> anyhow::Result { + let mut preimage = hex_to_raw_bytes(owner_address)?; + preimage.append(&mut hex_to_raw_bytes(metadata_address)?); + preimage.push(0xFC); + Ok(standardize_address(&hex::encode(sha3_256(&preimage)))) +} + +impl From<&CurrentFungibleAssetBalance> for CurrentUnifiedFungibleAssetBalance { + fn from(cfab: &CurrentFungibleAssetBalance) -> Self { + if cfab.token_standard.as_str() == V2_STANDARD.borrow().as_str() { + Self { + storage_id: cfab.storage_id.clone(), + owner_address: cfab.owner_address.clone(), + asset_type_v2: Some(cfab.asset_type.clone()), + asset_type_v1: None, + is_primary: Some(cfab.is_primary), + is_frozen: cfab.is_frozen, + amount_v1: None, + amount_v2: Some(cfab.amount.clone()), + last_transaction_version_v1: None, + last_transaction_version_v2: Some(cfab.last_transaction_version), + last_transaction_timestamp_v1: None, + last_transaction_timestamp_v2: Some(cfab.last_transaction_timestamp), + } + } else { + let metadata_addr = get_paired_metadata_address(&cfab.asset_type); + let pfs_addr = get_primary_fungible_store_address(&cfab.owner_address, &metadata_addr) + .expect("calculate pfs_address failed"); + Self { + storage_id: pfs_addr, + owner_address: cfab.owner_address.clone(), + asset_type_v2: None, + asset_type_v1: Some(cfab.asset_type.clone()), + is_primary: None, + is_frozen: cfab.is_frozen, + amount_v1: Some(cfab.amount.clone()), + amount_v2: None, + last_transaction_version_v1: Some(cfab.last_transaction_version), + last_transaction_version_v2: None, + last_transaction_timestamp_v1: Some(cfab.last_transaction_timestamp), + last_transaction_timestamp_v2: None, + } + } + } +} + +impl FungibleAssetBalance { + /// Basically just need to index FA Store, but we'll need to look up FA metadata + pub async fn get_v2_from_write_resource( + write_resource: &WriteResource, + write_set_change_index: i64, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + object_metadatas: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + if let Some(inner) = &FungibleAssetStore::from_write_resource(write_resource, txn_version)? + { + let storage_id = standardize_address(write_resource.address.as_str()); + // Need to get the object of the store + if let Some(object_data) = object_metadatas.get(&storage_id) { + let object = &object_data.object.object_core; + let owner_address = object.get_owner_address(); + let asset_type = inner.metadata.get_reference_address(); + let is_primary = Self::is_primary(&owner_address, &asset_type, &storage_id); + + let concurrent_balance = object_data + .concurrent_fungible_asset_balance + .as_ref() + .map(|concurrent_fungible_asset_balance| { + concurrent_fungible_asset_balance.balance.value.clone() + }); + + let coin_balance = Self { + transaction_version: txn_version, + write_set_change_index, + storage_id: storage_id.clone(), + owner_address: owner_address.clone(), + asset_type: asset_type.clone(), + is_primary, + is_frozen: inner.frozen, + amount: concurrent_balance + .clone() + .unwrap_or_else(|| inner.balance.clone()), + transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V2.to_string(), + }; + let current_coin_balance = CurrentFungibleAssetBalance { + storage_id, + owner_address, + asset_type: asset_type.clone(), + is_primary, + is_frozen: inner.frozen, + amount: concurrent_balance.unwrap_or_else(|| inner.balance.clone()), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V2.to_string(), + }; + return Ok(Some((coin_balance, current_coin_balance))); + } + } + + Ok(None) + } + + pub fn get_v1_from_delete_resource( + delete_resource: &DeleteResource, + write_set_change_index: i64, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + if let Some(CoinResource::CoinStoreDeletion) = + &CoinResource::from_delete_resource(delete_resource, txn_version)? + { + let coin_info_type = &CoinInfoType::from_move_type( + &delete_resource.r#type.as_ref().unwrap().generic_type_params[0], + delete_resource.type_str.as_ref(), + txn_version, + ); + if let Some(coin_type) = coin_info_type.get_coin_type_below_max() { + let owner_address = standardize_address(delete_resource.address.as_str()); + let storage_id = + CoinInfoType::get_storage_id(coin_type.as_str(), owner_address.as_str()); + let coin_balance = Self { + transaction_version: txn_version, + write_set_change_index, + storage_id: storage_id.clone(), + owner_address: owner_address.clone(), + asset_type: coin_type.clone(), + is_primary: true, + is_frozen: false, + amount: BigDecimal::zero(), + transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V1.to_string(), + }; + let current_coin_balance = CurrentFungibleAssetBalance { + storage_id, + owner_address, + asset_type: coin_type.clone(), + is_primary: true, + is_frozen: false, + amount: BigDecimal::zero(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V1.to_string(), + }; + return Ok(Some(( + coin_balance, + current_coin_balance, + AHashMap::default(), + ))); + } + } + Ok(None) + } + + /// Getting coin balances from resources for v1 + /// If the fully qualified coin type is too long (currently 1000 length), we exclude from indexing + pub fn get_v1_from_write_resource( + write_resource: &WriteResource, + write_set_change_index: i64, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + if let Some(CoinResource::CoinStoreResource(inner)) = + &CoinResource::from_write_resource(write_resource, txn_version)? + { + let coin_info_type = &CoinInfoType::from_move_type( + &write_resource.r#type.as_ref().unwrap().generic_type_params[0], + write_resource.type_str.as_ref(), + txn_version, + ); + if let Some(coin_type) = coin_info_type.get_coin_type_below_max() { + let owner_address = standardize_address(write_resource.address.as_str()); + let storage_id = + CoinInfoType::get_storage_id(coin_type.as_str(), owner_address.as_str()); + let coin_balance = Self { + transaction_version: txn_version, + write_set_change_index, + storage_id: storage_id.clone(), + owner_address: owner_address.clone(), + asset_type: coin_type.clone(), + is_primary: true, + is_frozen: inner.frozen, + amount: inner.coin.value.clone(), + transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V1.to_string(), + }; + let current_coin_balance = CurrentFungibleAssetBalance { + storage_id, + owner_address, + asset_type: coin_type.clone(), + is_primary: true, + is_frozen: inner.frozen, + amount: inner.coin.value.clone(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + token_standard: TokenStandard::V1.to_string(), + }; + let event_to_coin_mapping: EventToCoinType = AHashMap::from([ + ( + inner.withdraw_events.guid.id.get_standardized(), + coin_type.clone(), + ), + (inner.deposit_events.guid.id.get_standardized(), coin_type), + ]); + return Ok(Some(( + coin_balance, + current_coin_balance, + event_to_coin_mapping, + ))); + } + } + Ok(None) + } + + /// Primary store address are derived from the owner address and object address in this format: sha3_256([source | object addr | 0xFC]). + /// This function expects the addresses to have length 66 + pub fn is_primary( + owner_address: &str, + metadata_address: &str, + fungible_store_address: &str, + ) -> bool { + fungible_store_address + == get_primary_fungible_store_address(owner_address, metadata_address).unwrap() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_primary() { + let owner_address = "0xfd2984f201abdbf30ccd0ec5c2f2357789222c0bbd3c68999acfebe188fdc09d"; + let metadata_address = "0x5dade62351d0b07340ff41763451e05ca2193de583bb3d762193462161888309"; + let fungible_store_address = + "0x5d2c93f23a3964409e8755a179417c4ef842166f6cc41e1416e2c705a02861a6"; + + assert!(FungibleAssetBalance::is_primary( + owner_address, + metadata_address, + fungible_store_address, + )); + } + + #[test] + fn test_is_not_primary() { + let owner_address = "0xfd2984f201abdbf30ccd0ec5c2f2357789222c0bbd3c68999acfebe188fdc09d"; + let metadata_address = "0x5dade62351d0b07340ff41763451e05ca2193de583bb3d762193462161888309"; + let fungible_store_address = "something random"; + + assert!(!FungibleAssetBalance::is_primary( + owner_address, + metadata_address, + fungible_store_address, + )); + } + + #[test] + fn test_zero_prefix() { + let owner_address = "0x049cad43b33c9f907ff80c5f0897ac6bfe6034feea0c9070e37814d1f9efd090"; + let metadata_address = "0x03b0e839106b65826e54fa4c160ca653594b723a5e481a5121c333849bc46f6c"; + let fungible_store_address = + "0xd4af0c43c6228357d7a09da77bf244cd4a1b97a0eb8ef3df43823ff4a807d0b9"; + + assert!(FungibleAssetBalance::is_primary( + owner_address, + metadata_address, + fungible_store_address, + )); + } + + #[test] + fn test_paired_metadata_address() { + assert_eq!( + get_paired_metadata_address("0x1::aptos_coin::AptosCoin"), + *APT_METADATA_ADDRESS_HEX + ); + assert_eq!(get_paired_metadata_address("0x66c34778730acbb120cefa57a3d98fd21e0c8b3a51e9baee530088b2e444e94c::moon_coin::MoonCoin"), "0xf772c28c069aa7e4417d85d771957eb3c5c11b5bf90b1965cda23b899ebc0384"); + } +} diff --git a/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_utils.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_utils.rs new file mode 100644 index 000000000..5de2a9d49 --- /dev/null +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_asset_utils.rs @@ -0,0 +1,399 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + db::common::models::{ + coin_models::coin_utils::COIN_ADDR, default_models::move_resources::MoveResource, + token_models::token_utils::URI_LENGTH, token_v2_models::v2_token_utils::ResourceReference, + }, + utils::util::{deserialize_from_string, truncate_str, Aggregator}, +}; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::WriteResource; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +const FUNGIBLE_ASSET_LENGTH: usize = 32; +const FUNGIBLE_ASSET_SYMBOL: usize = 10; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FeeStatement { + #[serde(deserialize_with = "deserialize_from_string")] + pub storage_fee_refund_octas: u64, +} + +impl FeeStatement { + pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Option { + if data_type == "0x1::transaction_fee::FeeStatement" { + let fee_statement: FeeStatement = serde_json::from_str(data).unwrap_or_else(|_| { + tracing::error!( + transaction_version = txn_version, + data = data, + "failed to parse event for fee statement" + ); + panic!(); + }); + Some(fee_statement) + } else { + None + } + } +} + +/* Section on fungible assets resources */ +#[derive(Serialize, Deserialize, Debug, Clone, FieldCount)] +pub struct FungibleAssetMetadata { + name: String, + symbol: String, + pub decimals: i32, + icon_uri: String, + project_uri: String, +} + +impl FungibleAssetMetadata { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::FungibleAssetMetadata(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_name(&self) -> String { + truncate_str(&self.name, FUNGIBLE_ASSET_LENGTH) + } + + pub fn get_symbol(&self) -> String { + truncate_str(&self.symbol, FUNGIBLE_ASSET_SYMBOL) + } + + pub fn get_icon_uri(&self) -> String { + truncate_str(&self.icon_uri, URI_LENGTH) + } + + pub fn get_project_uri(&self) -> String { + truncate_str(&self.project_uri, URI_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FungibleAssetStore { + pub metadata: ResourceReference, + #[serde(deserialize_with = "deserialize_from_string")] + pub balance: BigDecimal, + pub frozen: bool, +} + +impl FungibleAssetStore { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::FungibleAssetStore(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FungibleAssetSupply { + #[serde(deserialize_with = "deserialize_from_string")] + pub current: BigDecimal, + pub maximum: OptionalBigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OptionalBigDecimal { + vec: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct BigDecimalWrapper(#[serde(deserialize_with = "deserialize_from_string")] pub BigDecimal); + +impl FungibleAssetSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str: String = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::FungibleAssetSupply(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_maximum(&self) -> Option { + self.maximum.vec.first().map(|x| x.0.clone()) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ConcurrentFungibleAssetSupply { + pub current: Aggregator, +} + +impl ConcurrentFungibleAssetSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str: String = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::ConcurrentFungibleAssetSupply(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ConcurrentFungibleAssetBalance { + pub balance: Aggregator, +} + +impl ConcurrentFungibleAssetBalance { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str: String = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2FungibleAssetResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2FungibleAssetResource::ConcurrentFungibleAssetBalance(inner) = + V2FungibleAssetResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DepositEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct WithdrawEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FrozenEvent { + pub frozen: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2FungibleAssetResource { + FungibleAssetMetadata(FungibleAssetMetadata), + FungibleAssetStore(FungibleAssetStore), + FungibleAssetSupply(FungibleAssetSupply), + ConcurrentFungibleAssetSupply(ConcurrentFungibleAssetSupply), + ConcurrentFungibleAssetBalance(ConcurrentFungibleAssetBalance), +} + +impl V2FungibleAssetResource { + pub fn is_resource_supported(data_type: &str) -> bool { + [ + format!("{}::fungible_asset::Supply", COIN_ADDR), + format!("{}::fungible_asset::ConcurrentSupply", COIN_ADDR), + format!("{}::fungible_asset::Metadata", COIN_ADDR), + format!("{}::fungible_asset::FungibleStore", COIN_ADDR), + format!("{}::fungible_asset::ConcurrentFungibleBalance", COIN_ADDR), + ] + .contains(&data_type.to_string()) + } + + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result { + match data_type { + x if x == format!("{}::fungible_asset::Supply", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::FungibleAssetSupply(inner))) + }, + x if x == format!("{}::fungible_asset::ConcurrentSupply", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::ConcurrentFungibleAssetSupply(inner))) + }, + x if x == format!("{}::fungible_asset::Metadata", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::FungibleAssetMetadata(inner))) + }, + x if x == format!("{}::fungible_asset::FungibleStore", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::FungibleAssetStore(inner))) + }, + x if x == format!("{}::fungible_asset::ConcurrentFungibleBalance", COIN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::ConcurrentFungibleAssetBalance(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } +} + +pub enum FungibleAssetEvent { + DepositEvent(DepositEvent), + WithdrawEvent(WithdrawEvent), + FrozenEvent(FrozenEvent), +} + +impl FungibleAssetEvent { + pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { + match data_type { + "0x1::fungible_asset::DepositEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::DepositEvent(inner))) + }, + "0x1::fungible_asset::WithdrawEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::WithdrawEvent(inner))) + }, + "0x1::fungible_asset::FrozenEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::FrozenEvent(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_fungible_asset_supply_null() { + let test = r#"{"current": "0", "maximum": {"vec": []}}"#; + let test: serde_json::Value = serde_json::from_str(test).unwrap(); + let supply = serde_json::from_value(test) + .map(V2FungibleAssetResource::FungibleAssetSupply) + .unwrap(); + if let V2FungibleAssetResource::FungibleAssetSupply(supply) = supply { + assert_eq!(supply.current, BigDecimal::from(0)); + assert_eq!(supply.get_maximum(), None); + } else { + panic!("Wrong type") + } + } + + #[test] + fn test_fungible_asset_supply_nonnull() { + let test = r#"{"current": "100", "maximum": {"vec": ["5000"]}}"#; + let test: serde_json::Value = serde_json::from_str(test).unwrap(); + let supply = serde_json::from_value(test) + .map(V2FungibleAssetResource::FungibleAssetSupply) + .unwrap(); + if let V2FungibleAssetResource::FungibleAssetSupply(supply) = supply { + assert_eq!(supply.current, BigDecimal::from(100)); + assert_eq!(supply.get_maximum(), Some(BigDecimal::from(5000))); + } else { + panic!("Wrong type") + } + } + + // TODO: Add similar tests for ConcurrentFungibleAssetSupply. +} diff --git a/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_metadata.rs b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_metadata.rs new file mode 100644 index 000000000..bcb1df18d --- /dev/null +++ b/rust/processor/src/db/common/models/fungible_asset_models/v2_fungible_metadata.rs @@ -0,0 +1,192 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_fungible_asset_utils::FungibleAssetMetadata; +use crate::{ + db::common::models::{ + coin_models::coin_utils::{CoinInfoType, CoinResource}, + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_v2_models::v2_token_utils::TokenStandard, + }, + schema::fungible_asset_metadata, + utils::util::standardize_address, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; +use bigdecimal::BigDecimal; +use diesel::prelude::*; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// This is the asset type +pub type FungibleAssetMetadataPK = String; +pub type FungibleAssetMetadataMapping = + AHashMap; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(asset_type))] +#[diesel(table_name = fungible_asset_metadata)] +pub struct FungibleAssetMetadataModel { + pub asset_type: String, + pub creator_address: String, + pub name: String, + pub symbol: String, + pub decimals: i32, + pub icon_uri: Option, + pub project_uri: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub supply_aggregator_table_handle_v1: Option, + pub supply_aggregator_table_key_v1: Option, + pub token_standard: String, + pub is_token_v2: Option, + pub supply_v2: Option, + pub maximum_v2: Option, +} + +impl FungibleAssetMetadataModel { + /// Fungible asset is part of an object and we need to get the object first to get owner address + pub fn get_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + object_metadatas: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + if let Some(inner) = + &FungibleAssetMetadata::from_write_resource(write_resource, txn_version)? + { + // the new coin type + let asset_type = standardize_address(&write_resource.address.to_string()); + if let Some(object_metadata) = object_metadatas.get(&asset_type) { + let object = &object_metadata.object.object_core; + let (maximum_v2, supply_v2) = if let Some(fungible_asset_supply) = + object_metadata.fungible_asset_supply.as_ref() + { + ( + fungible_asset_supply.get_maximum(), + Some(fungible_asset_supply.current.clone()), + ) + } else if let Some(concurrent_fungible_asset_supply) = + object_metadata.concurrent_fungible_asset_supply.as_ref() + { + ( + Some(concurrent_fungible_asset_supply.current.max_value.clone()), + Some(concurrent_fungible_asset_supply.current.value.clone()), + ) + } else { + (None, None) + }; + + return Ok(Some(Self { + asset_type: asset_type.clone(), + creator_address: object.get_owner_address(), + name: inner.get_name(), + symbol: inner.get_symbol(), + decimals: inner.decimals, + icon_uri: Some(inner.get_icon_uri()), + project_uri: Some(inner.get_project_uri()), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + supply_aggregator_table_handle_v1: None, + supply_aggregator_table_key_v1: None, + token_standard: TokenStandard::V2.to_string(), + is_token_v2: None, + supply_v2, + maximum_v2, + })); + } + } + Ok(None) + } + + /// We can find v1 coin info from resources + pub fn get_v1_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + match &CoinResource::from_write_resource(write_resource, txn_version)? { + Some(CoinResource::CoinInfoResource(inner)) => { + let coin_info_type = &CoinInfoType::from_move_type( + &write_resource.r#type.as_ref().unwrap().generic_type_params[0], + write_resource.type_str.as_ref(), + txn_version, + ); + let (supply_aggregator_table_handle, supply_aggregator_table_key) = inner + .get_aggregator_metadata() + .map(|agg| (Some(agg.handle), Some(agg.key))) + .unwrap_or((None, None)); + // If asset type is too long, just ignore + if let Some(asset_type) = coin_info_type.get_coin_type_below_max() { + Ok(Some(Self { + asset_type, + creator_address: coin_info_type.get_creator_address(), + name: inner.get_name_trunc(), + symbol: inner.get_symbol_trunc(), + decimals: inner.decimals, + icon_uri: None, + project_uri: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + supply_aggregator_table_handle_v1: supply_aggregator_table_handle, + supply_aggregator_table_key_v1: supply_aggregator_table_key, + token_standard: TokenStandard::V1.to_string(), + is_token_v2: None, + supply_v2: None, + maximum_v2: None, + })) + } else { + Ok(None) + } + }, + _ => Ok(None), + } + } + + pub fn get_v1_from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + match &CoinResource::from_delete_resource(delete_resource, txn_version)? { + Some(CoinResource::CoinInfoResource(inner)) => { + let coin_info_type = &CoinInfoType::from_move_type( + &delete_resource.r#type.as_ref().unwrap().generic_type_params[0], + delete_resource.type_str.as_ref(), + txn_version, + ); + let (supply_aggregator_table_handle, supply_aggregator_table_key) = inner + .get_aggregator_metadata() + .map(|agg| (Some(agg.handle), Some(agg.key))) + .unwrap_or((None, None)); + // If asset type is too long, just ignore + if let Some(asset_type) = coin_info_type.get_coin_type_below_max() { + Ok(Some(Self { + asset_type, + creator_address: coin_info_type.get_creator_address(), + name: inner.get_name_trunc(), + symbol: inner.get_symbol_trunc(), + decimals: inner.decimals, + icon_uri: None, + project_uri: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + supply_aggregator_table_handle_v1: supply_aggregator_table_handle, + supply_aggregator_table_key_v1: supply_aggregator_table_key, + token_standard: TokenStandard::V1.to_string(), + is_token_v2: None, + supply_v2: None, + maximum_v2: None, + })) + } else { + Ok(None) + } + }, + _ => Ok(None), + } + } +} diff --git a/rust/processor/src/db/common/models/ledger_info.rs b/rust/processor/src/db/common/models/ledger_info.rs new file mode 100644 index 000000000..f25759ec5 --- /dev/null +++ b/rust/processor/src/db/common/models/ledger_info.rs @@ -0,0 +1,25 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{schema::ledger_infos, utils::database::DbPoolConnection}; +use diesel::{OptionalExtension, QueryDsl}; +use diesel_async::RunQueryDsl; + +#[derive(Debug, Identifiable, Insertable, Queryable)] +#[diesel(table_name = ledger_infos)] +#[diesel(primary_key(chain_id))] +pub struct LedgerInfo { + pub chain_id: i64, +} + +impl LedgerInfo { + pub async fn get(conn: &mut DbPoolConnection<'_>) -> diesel::QueryResult> { + ledger_infos::table + .select(ledger_infos::all_columns) + .first::(conn) + .await + .optional() + } +} diff --git a/rust/processor/src/db/common/models/mod.rs b/rust/processor/src/db/common/models/mod.rs new file mode 100644 index 000000000..cf80f3fc0 --- /dev/null +++ b/rust/processor/src/db/common/models/mod.rs @@ -0,0 +1,18 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod account_transaction_models; +pub mod ans_models; +pub mod coin_models; +pub mod default_models; +pub mod events_models; +pub mod fungible_asset_models; +pub mod ledger_info; +pub mod object_models; +pub mod processor_status; +pub mod property_map; +pub mod stake_models; +pub mod token_models; +pub mod token_v2_models; +pub mod transaction_metadata_model; +pub mod user_transactions_models; diff --git a/rust/processor/src/db/common/models/object_models/mod.rs b/rust/processor/src/db/common/models/object_models/mod.rs new file mode 100644 index 000000000..63812d5c0 --- /dev/null +++ b/rust/processor/src/db/common/models/object_models/mod.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod v2_object_utils; +pub mod v2_objects; diff --git a/rust/processor/src/db/common/models/object_models/v2_object_utils.rs b/rust/processor/src/db/common/models/object_models/v2_object_utils.rs new file mode 100644 index 000000000..617e98f00 --- /dev/null +++ b/rust/processor/src/db/common/models/object_models/v2_object_utils.rs @@ -0,0 +1,166 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{ + db::common::models::{ + default_models::move_resources::MoveResource, + fungible_asset_models::v2_fungible_asset_utils::{ + ConcurrentFungibleAssetBalance, ConcurrentFungibleAssetSupply, FungibleAssetMetadata, + FungibleAssetStore, FungibleAssetSupply, + }, + token_v2_models::v2_token_utils::{ + AptosCollection, ConcurrentSupply, FixedSupply, PropertyMapModel, TokenIdentifiers, + TokenV2, TransferEvent, UnlimitedSupply, V2TokenResource, + }, + }, + utils::util::{deserialize_from_string, standardize_address}, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::WriteResource; +use bigdecimal::BigDecimal; +use serde::{Deserialize, Serialize}; + +// PK of current_objects, i.e. object_address +pub type CurrentObjectPK = String; + +/// Tracks all object related metadata in a hashmap for quick access (keyed on address of the object) +pub type ObjectAggregatedDataMapping = AHashMap; + +/// Index of the event so that we can write its inverse to the db as primary key (to avoid collisiona) +pub type EventIndex = i64; + +/// This contains metadata for the object. This only includes fungible asset and token v2 metadata for now. +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ObjectAggregatedData { + pub object: ObjectWithMetadata, + // There could be more than one transfers on the same transaction + pub transfer_events: Vec<(EventIndex, TransferEvent)>, + // This would make transfers impossible + pub untransferable: Option, + // Fungible asset structs + pub fungible_asset_metadata: Option, + pub fungible_asset_supply: Option, + pub concurrent_fungible_asset_supply: Option, + pub fungible_asset_store: Option, + pub concurrent_fungible_asset_balance: Option, + // Token v2 structs + pub aptos_collection: Option, + pub fixed_supply: Option, + pub property_map: Option, + pub token: Option, + pub unlimited_supply: Option, + pub concurrent_supply: Option, + pub token_identifier: Option, +} + +impl Default for ObjectAggregatedData { + fn default() -> Self { + Self { + object: ObjectWithMetadata { + object_core: ObjectCore { + allow_ungated_transfer: false, + guid_creation_num: BigDecimal::default(), + owner: String::default(), + }, + state_key_hash: String::default(), + }, + transfer_events: Vec::new(), + untransferable: None, + fungible_asset_metadata: None, + fungible_asset_supply: None, + concurrent_fungible_asset_supply: None, + concurrent_fungible_asset_balance: None, + fungible_asset_store: None, + aptos_collection: None, + fixed_supply: None, + property_map: None, + token: None, + unlimited_supply: None, + concurrent_supply: None, + token_identifier: None, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ObjectCore { + pub allow_ungated_transfer: bool, + #[serde(deserialize_with = "deserialize_from_string")] + pub guid_creation_num: BigDecimal, + owner: String, +} + +impl ObjectCore { + pub fn get_owner_address(&self) -> String { + standardize_address(&self.owner) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ObjectWithMetadata { + pub object_core: ObjectCore, + pub state_key_hash: String, +} + +impl ObjectWithMetadata { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + if let V2TokenResource::ObjectCore(inner) = V2TokenResource::from_resource( + &type_str, + &serde_json::from_str(write_resource.data.as_str()).unwrap(), + txn_version, + )? { + Ok(Some(Self { + object_core: inner, + state_key_hash: standardize_address( + hex::encode(write_resource.state_key_hash.as_slice()).as_str(), + ), + })) + } else { + Ok(None) + } + } + + pub fn get_state_key_hash(&self) -> String { + standardize_address(&self.state_key_hash) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Untransferable {} + +impl Untransferable { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::Untransferable(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} diff --git a/rust/processor/src/db/common/models/object_models/v2_objects.rs b/rust/processor/src/db/common/models/object_models/v2_objects.rs new file mode 100644 index 000000000..66d67cc63 --- /dev/null +++ b/rust/processor/src/db/common/models/object_models/v2_objects.rs @@ -0,0 +1,229 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_object_utils::{CurrentObjectPK, ObjectAggregatedDataMapping}; +use crate::{ + db::common::models::default_models::move_resources::MoveResource, + schema::{current_objects, objects}, + utils::{database::DbPoolConnection, util::standardize_address}, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource}; +use bigdecimal::BigDecimal; +use diesel::prelude::*; +use diesel_async::RunQueryDsl; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = objects)] +pub struct Object { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub object_address: String, + pub owner_address: String, + pub state_key_hash: String, + pub guid_creation_num: BigDecimal, + pub allow_ungated_transfer: bool, + pub is_deleted: bool, + pub untransferrable: bool, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(object_address))] +#[diesel(table_name = current_objects)] +pub struct CurrentObject { + pub object_address: String, + pub owner_address: String, + pub state_key_hash: String, + pub allow_ungated_transfer: bool, + pub last_guid_creation_num: BigDecimal, + pub last_transaction_version: i64, + pub is_deleted: bool, + pub untransferrable: bool, +} + +#[derive(Debug, Deserialize, Identifiable, Queryable, Serialize)] +#[diesel(primary_key(object_address))] +#[diesel(table_name = current_objects)] +pub struct CurrentObjectQuery { + pub object_address: String, + pub owner_address: String, + pub state_key_hash: String, + pub allow_ungated_transfer: bool, + pub last_guid_creation_num: BigDecimal, + pub last_transaction_version: i64, + pub is_deleted: bool, + pub inserted_at: chrono::NaiveDateTime, + pub untransferrable: bool, +} + +impl Object { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + object_metadata_mapping: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + let address = standardize_address(&write_resource.address.to_string()); + if let Some(object_aggregated_metadata) = object_metadata_mapping.get(&address) { + // do something + let object_with_metadata = object_aggregated_metadata.object.clone(); + let object_core = object_with_metadata.object_core; + + let untransferrable = if object_aggregated_metadata.untransferable.as_ref().is_some() { + true + } else { + !object_core.allow_ungated_transfer + }; + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + object_address: address.clone(), + owner_address: object_core.get_owner_address(), + state_key_hash: object_with_metadata.state_key_hash.clone(), + guid_creation_num: object_core.guid_creation_num.clone(), + allow_ungated_transfer: object_core.allow_ungated_transfer, + is_deleted: false, + untransferrable, + }, + CurrentObject { + object_address: address, + owner_address: object_core.get_owner_address(), + state_key_hash: object_with_metadata.state_key_hash, + allow_ungated_transfer: object_core.allow_ungated_transfer, + last_guid_creation_num: object_core.guid_creation_num.clone(), + last_transaction_version: txn_version, + is_deleted: false, + untransferrable, + }, + ))) + } else { + Ok(None) + } + } + + /// This handles the case where the entire object is deleted + /// TODO: We need to detect if an object is only partially deleted + /// using KV store + pub async fn from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + write_set_change_index: i64, + object_mapping: &AHashMap, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + if delete_resource.type_str == "0x1::object::ObjectGroup" { + let resource = MoveResource::from_delete_resource( + delete_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + let previous_object = if let Some(object) = object_mapping.get(&resource.address) { + object.clone() + } else { + match Self::get_current_object( + conn, + &resource.address, + query_retries, + query_retry_delay_ms, + ) + .await + { + Ok(object) => object, + Err(_) => { + tracing::error!( + transaction_version = txn_version, + lookup_key = &resource.address, + "Missing current_object for object_address: {}. You probably should backfill db.", + resource.address, + ); + return Ok(None); + }, + } + }; + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + object_address: resource.address.clone(), + owner_address: previous_object.owner_address.clone(), + state_key_hash: resource.state_key_hash.clone(), + guid_creation_num: previous_object.last_guid_creation_num.clone(), + allow_ungated_transfer: previous_object.allow_ungated_transfer, + is_deleted: true, + untransferrable: previous_object.untransferrable, + }, + CurrentObject { + object_address: resource.address, + owner_address: previous_object.owner_address.clone(), + state_key_hash: resource.state_key_hash, + last_guid_creation_num: previous_object.last_guid_creation_num.clone(), + allow_ungated_transfer: previous_object.allow_ungated_transfer, + last_transaction_version: txn_version, + is_deleted: true, + untransferrable: previous_object.untransferrable, + }, + ))) + } else { + Ok(None) + } + } + + /// This is actually not great because object owner can change. The best we can do now though. + /// This will loop forever until we get the object from the db + pub async fn get_current_object( + conn: &mut DbPoolConnection<'_>, + object_address: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match CurrentObjectQuery::get_by_address(object_address, conn).await { + Ok(res) => { + return Ok(CurrentObject { + object_address: res.object_address, + owner_address: res.owner_address, + state_key_hash: res.state_key_hash, + allow_ungated_transfer: res.allow_ungated_transfer, + last_guid_creation_num: res.last_guid_creation_num, + last_transaction_version: res.last_transaction_version, + is_deleted: res.is_deleted, + untransferrable: res.untransferrable, + }); + }, + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + Err(anyhow::anyhow!("Failed to get object owner")) + } +} + +impl CurrentObjectQuery { + /// TODO: Change this to a KV store + pub async fn get_by_address( + object_address: &str, + conn: &mut DbPoolConnection<'_>, + ) -> diesel::QueryResult { + current_objects::table + .filter(current_objects::object_address.eq(object_address)) + .first::(conn) + .await + } +} diff --git a/rust/processor/src/db/common/models/processor_status.rs b/rust/processor/src/db/common/models/processor_status.rs new file mode 100644 index 000000000..2d7928511 --- /dev/null +++ b/rust/processor/src/db/common/models/processor_status.rs @@ -0,0 +1,40 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{schema::processor_status, utils::database::DbPoolConnection}; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl}; +use diesel_async::RunQueryDsl; + +#[derive(AsChangeset, Debug, Insertable)] +#[diesel(table_name = processor_status)] +/// Only tracking the latest version successfully processed +pub struct ProcessorStatus { + pub processor: String, + pub last_success_version: i64, + pub last_transaction_timestamp: Option, +} + +#[derive(AsChangeset, Debug, Queryable)] +#[diesel(table_name = processor_status)] +/// Only tracking the latest version successfully processed +pub struct ProcessorStatusQuery { + pub processor: String, + pub last_success_version: i64, + pub last_updated: chrono::NaiveDateTime, + pub last_transaction_timestamp: Option, +} + +impl ProcessorStatusQuery { + pub async fn get_by_processor( + processor_name: &str, + conn: &mut DbPoolConnection<'_>, + ) -> diesel::QueryResult> { + processor_status::table + .filter(processor_status::processor.eq(processor_name)) + .first::(conn) + .await + .optional() + } +} diff --git a/rust/processor/src/db/common/models/property_map.rs b/rust/processor/src/db/common/models/property_map.rs new file mode 100644 index 000000000..c6114f012 --- /dev/null +++ b/rust/processor/src/db/common/models/property_map.rs @@ -0,0 +1,104 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::utils::util; +use ahash::AHashMap; +use serde::{Deserialize, Serialize}; +use serde_json::{Result, Value}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PropertyValue { + value: String, + typ: String, +} + +pub fn create_property_value(typ: String, value: String) -> Result { + Ok(PropertyValue { + value: util::convert_bcs_hex(typ.clone(), value.clone()).unwrap_or(value), + typ, + }) +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PropertyMap { + data: AHashMap, +} + +impl PropertyMap { + /// Deserializes PropertyValue from bcs encoded json + pub fn from_bcs_encode_str(val: Value) -> Option { + let mut pm = PropertyMap { + data: AHashMap::new(), + }; + let records: &Vec = val.get("map")?.get("data")?.as_array()?; + for entry in records { + let key = entry.get("key")?.as_str()?; + let val = entry.get("value")?.get("value")?.as_str()?; + let typ = entry.get("value")?.get("type")?.as_str()?; + let pv = create_property_value(typ.to_string(), val.to_string()).ok()?; + pm.data.insert(key.to_string(), pv); + } + Some(Self::to_flat_json(pm)) + } + + /// Flattens PropertyMap which can't be easily consumable by downstream. + /// For example: Object {"data": Object {"creation_time_sec": Object {"value": String("1666125588")}}} + /// becomes Object {"creation_time_sec": "1666125588"} + fn to_flat_json(val: PropertyMap) -> Value { + let mut map = AHashMap::new(); + for (k, v) in val.data { + map.insert(k, v.value); + } + serde_json::to_value(map).unwrap() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenObjectPropertyValue { + value: String, + typ: u8, +} + +pub fn create_token_object_property_value( + typ: u8, + value: String, +) -> Result { + Ok(TokenObjectPropertyValue { + value: util::convert_bcs_hex_new(typ, value.clone()).unwrap_or(value), + typ, + }) +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenObjectPropertyMap { + data: AHashMap, +} + +impl TokenObjectPropertyMap { + /// Deserializes PropertyValue from bcs encoded json + pub fn from_bcs_encode_str(val: Value) -> Option { + let mut pm = TokenObjectPropertyMap { + data: AHashMap::new(), + }; + let records: &Vec = val.get("data")?.as_array()?; + for entry in records { + let key = entry.get("key")?.as_str()?; + let val = entry.get("value")?.get("value")?.as_str()?; + let typ = entry.get("value")?.get("type")?.as_u64()?; + let pv = create_token_object_property_value(typ as u8, val.to_string()).ok()?; + pm.data.insert(key.to_string(), pv); + } + Some(Self::to_flat_json_new(pm)) + } + + /// Flattens PropertyMap which can't be easily consumable by downstream. + /// For example: Object {"data": Object {"creation_time_sec": Object {"value": String("1666125588")}}} + /// becomes Object {"creation_time_sec": "1666125588"} + fn to_flat_json_new(val: TokenObjectPropertyMap) -> Value { + let mut map = AHashMap::new(); + for (k, v) in val.data { + map.insert(k, v.value); + } + serde_json::to_value(map).unwrap() + } +} diff --git a/rust/processor/src/db/common/models/stake_models/current_delegated_voter.rs b/rust/processor/src/db/common/models/stake_models/current_delegated_voter.rs new file mode 100644 index 000000000..e87dcde71 --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/current_delegated_voter.rs @@ -0,0 +1,280 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::{ + delegator_balances::{CurrentDelegatorBalance, ShareToStakingPoolMapping}, + stake_utils::VoteDelegationTableItem, +}; +use crate::{ + schema::current_delegated_voter, + utils::{database::DbPoolConnection, util::standardize_address}, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::WriteTableItem; +use diesel::prelude::*; +use diesel_async::RunQueryDsl; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Identifiable, Queryable)] +#[diesel(primary_key(delegator_address, delegation_pool_address))] +#[diesel(table_name = current_delegated_voter)] +pub struct CurrentDelegatedVoterQuery { + pub delegation_pool_address: String, + pub delegator_address: String, + pub table_handle: Option, + // vote_delegation table handle + pub voter: Option, + pub pending_voter: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub inserted_at: chrono::NaiveDateTime, +} + +#[derive( + Debug, Deserialize, Eq, FieldCount, Identifiable, Insertable, PartialEq, Serialize, Clone, +)] +#[diesel(primary_key(delegator_address, delegation_pool_address))] +#[diesel(table_name = current_delegated_voter)] +pub struct CurrentDelegatedVoter { + pub delegation_pool_address: String, + pub delegator_address: String, + pub table_handle: Option, + // vote_delegation table handle + pub voter: Option, + pub pending_voter: Option, + // voter to be in the next lockup period + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +// (delegation_pool_address, delegator_address) +type CurrentDelegatedVoterPK = (String, String); +type CurrentDelegatedVoterMap = AHashMap; +// table handle to delegation pool address mapping +type VoteDelegationTableHandleToPool = AHashMap; + +impl CurrentDelegatedVoter { + pub fn pk(&self) -> CurrentDelegatedVoterPK { + ( + self.delegation_pool_address.clone(), + self.delegator_address.clone(), + ) + } + + /// There are 3 pieces of information we need in order to get the delegated voters + /// 1. We need the mapping between pool address and table handle of the governance record. This will help us + /// figure out what the pool address it is + /// 2. We need to parse the governance record itself + /// 3. All active shares prior to governance contract need to be tracked as well, the default voters are the delegators themselves + pub async fn from_write_table_item( + write_table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + vote_delegation_handle_to_pool_address: &VoteDelegationTableHandleToPool, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut delegated_voter_map: CurrentDelegatedVoterMap = AHashMap::new(); + + let table_item_data = write_table_item.data.as_ref().unwrap(); + let table_handle = standardize_address(&write_table_item.handle); + if let Some(VoteDelegationTableItem::VoteDelegationVector(vote_delegation_vector)) = + VoteDelegationTableItem::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? + { + let pool_address = match vote_delegation_handle_to_pool_address.get(&table_handle) { + Some(pool_address) => pool_address.clone(), + None => { + // look up from db + Self::get_delegation_pool_address_by_table_handle(conn, &table_handle, query_retries, query_retry_delay_ms).await + .unwrap_or_else(|_| { + tracing::error!( + transaction_version = txn_version, + lookup_key = &table_handle, + "Missing pool address for table handle. You probably should backfill db.", + ); + "".to_string() + }) + }, + }; + if !pool_address.is_empty() { + for inner in vote_delegation_vector { + let delegator_address = inner.get_delegator_address(); + let voter = inner.value.get_voter(); + let pending_voter = inner.value.get_pending_voter(); + + let delegated_voter = CurrentDelegatedVoter { + delegator_address: delegator_address.clone(), + delegation_pool_address: pool_address.clone(), + voter: Some(voter.clone()), + pending_voter: Some(pending_voter.clone()), + last_transaction_timestamp: txn_timestamp, + last_transaction_version: txn_version, + table_handle: Some(table_handle.clone()), + }; + delegated_voter_map + .insert((pool_address.clone(), delegator_address), delegated_voter); + } + } + } + Ok(delegated_voter_map) + } + + /// For delegators that have delegated before the vote delegation contract deployment, we + /// need to mark them as default voters, but also be careful that we don't override the + /// new data + pub async fn get_delegators_pre_contract_deployment( + write_table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + active_pool_to_staking_pool: &ShareToStakingPoolMapping, + previous_delegated_voters: &CurrentDelegatedVoterMap, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + if let Some((_, active_balance)) = + CurrentDelegatorBalance::get_active_share_from_write_table_item( + write_table_item, + txn_version, + 0, // placeholder + active_pool_to_staking_pool, + ) + .await? + { + let pool_address = active_balance.pool_address.clone(); + let delegator_address = active_balance.delegator_address.clone(); + + let already_exists = match previous_delegated_voters + .get(&(pool_address.clone(), delegator_address.clone())) + { + Some(_) => true, + None => { + // look up from db + Self::get_existence_by_pk( + conn, + &delegator_address, + &pool_address, + query_retries, + query_retry_delay_ms, + ) + .await + }, + }; + if !already_exists { + return Ok(Some(CurrentDelegatedVoter { + delegator_address: delegator_address.clone(), + delegation_pool_address: pool_address, + table_handle: None, + voter: Some(delegator_address.clone()), + pending_voter: Some(delegator_address), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + })); + } + } + Ok(None) + } + + pub async fn get_delegation_pool_address_by_table_handle( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match CurrentDelegatedVoterQuery::get_by_table_handle(conn, table_handle).await { + Ok(current_delegated_voter_query_result) => { + return Ok(current_delegated_voter_query_result.delegation_pool_address); + }, + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + Err(anyhow::anyhow!( + "Failed to get delegation pool address from vote delegation write table handle" + )) + } + + pub async fn get_existence_by_pk( + conn: &mut DbPoolConnection<'_>, + delegator_address: &str, + delegation_pool_address: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> bool { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match CurrentDelegatedVoterQuery::get_by_pk( + conn, + delegator_address, + delegation_pool_address, + ) + .await + { + Ok(_) => return true, + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + false + } +} + +impl CurrentDelegatedVoterQuery { + pub async fn get_by_table_handle( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + ) -> diesel::QueryResult { + current_delegated_voter::table + .filter(current_delegated_voter::table_handle.eq(table_handle)) + .first::(conn) + .await + } + + pub async fn get_by_pk( + conn: &mut DbPoolConnection<'_>, + delegator_address: &str, + delegation_pool_address: &str, + ) -> diesel::QueryResult { + current_delegated_voter::table + .filter(current_delegated_voter::delegator_address.eq(delegator_address)) + .filter(current_delegated_voter::delegation_pool_address.eq(delegation_pool_address)) + .first::(conn) + .await + } +} + +impl Ord for CurrentDelegatedVoter { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.delegator_address.cmp(&other.delegator_address).then( + self.delegation_pool_address + .cmp(&other.delegation_pool_address), + ) + } +} + +impl PartialOrd for CurrentDelegatedVoter { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/rust/processor/src/db/common/models/stake_models/delegator_activities.rs b/rust/processor/src/db/common/models/stake_models/delegator_activities.rs new file mode 100644 index 000000000..846f10f7f --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/delegator_activities.rs @@ -0,0 +1,108 @@ +// Copyright © Aptos Foundation + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::stake_utils::StakeEvent; +use crate::{ + schema::delegated_staking_activities, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{standardize_address, u64_to_bigdecimal}, + }, +}; +use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, event_index))] +#[diesel(table_name = delegated_staking_activities)] +pub struct DelegatedStakingActivity { + pub transaction_version: i64, + pub event_index: i64, + pub delegator_address: String, + pub pool_address: String, + pub event_type: String, + pub amount: BigDecimal, +} + +impl DelegatedStakingActivity { + /// Pretty straightforward parsing from known delegated staking events + pub fn from_transaction(transaction: &Transaction) -> anyhow::Result> { + let mut delegator_activities = vec![]; + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["DelegatedStakingActivity"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return Ok(delegator_activities); + }, + }; + + let txn_version = transaction.version as i64; + let events = match txn_data { + TxnData::User(txn) => &txn.events, + TxnData::BlockMetadata(txn) => &txn.events, + _ => return Ok(delegator_activities), + }; + for (index, event) in events.iter().enumerate() { + let event_index = index as i64; + if let Some(staking_event) = + StakeEvent::from_event(event.type_str.as_str(), &event.data, txn_version)? + { + let activity = match staking_event { + StakeEvent::AddStakeEvent(inner) => DelegatedStakingActivity { + transaction_version: txn_version, + event_index, + delegator_address: standardize_address(&inner.delegator_address), + pool_address: standardize_address(&inner.pool_address), + event_type: event.type_str.clone(), + amount: u64_to_bigdecimal(inner.amount_added), + }, + StakeEvent::UnlockStakeEvent(inner) => DelegatedStakingActivity { + transaction_version: txn_version, + event_index, + delegator_address: standardize_address(&inner.delegator_address), + pool_address: standardize_address(&inner.pool_address), + event_type: event.type_str.clone(), + amount: u64_to_bigdecimal(inner.amount_unlocked), + }, + StakeEvent::WithdrawStakeEvent(inner) => DelegatedStakingActivity { + transaction_version: txn_version, + event_index, + delegator_address: standardize_address(&inner.delegator_address), + pool_address: standardize_address(&inner.pool_address), + event_type: event.type_str.clone(), + amount: u64_to_bigdecimal(inner.amount_withdrawn), + }, + StakeEvent::ReactivateStakeEvent(inner) => DelegatedStakingActivity { + transaction_version: txn_version, + event_index, + delegator_address: standardize_address(&inner.delegator_address), + pool_address: standardize_address(&inner.pool_address), + event_type: event.type_str.clone(), + amount: u64_to_bigdecimal(inner.amount_reactivated), + }, + StakeEvent::DistributeRewardsEvent(inner) => DelegatedStakingActivity { + transaction_version: txn_version, + event_index, + delegator_address: "".to_string(), + pool_address: standardize_address(&inner.pool_address), + event_type: event.type_str.clone(), + amount: u64_to_bigdecimal(inner.rewards_amount), + }, + _ => continue, + }; + delegator_activities.push(activity); + } + } + Ok(delegator_activities) + } +} diff --git a/rust/processor/src/db/common/models/stake_models/delegator_balances.rs b/rust/processor/src/db/common/models/stake_models/delegator_balances.rs new file mode 100644 index 000000000..54790f43b --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/delegator_balances.rs @@ -0,0 +1,514 @@ +// Copyright © Aptos Foundation + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::delegator_pools::{DelegatorPool, DelegatorPoolBalanceMetadata, PoolBalanceMetadata}; +use crate::{ + db::common::models::default_models::move_tables::TableItem, + schema::{current_delegator_balances, delegator_balances}, + utils::{database::DbPoolConnection, util::standardize_address}, +}; +use ahash::AHashMap; +use anyhow::Context; +use aptos_protos::transaction::v1::{ + write_set_change::Change, DeleteTableItem, Transaction, WriteResource, WriteTableItem, +}; +use bigdecimal::{BigDecimal, Zero}; +use diesel::prelude::*; +use diesel_async::RunQueryDsl; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +pub type TableHandle = String; +pub type Address = String; +pub type ShareToStakingPoolMapping = AHashMap; +pub type ShareToPoolMapping = AHashMap; +pub type CurrentDelegatorBalancePK = (Address, Address, String); +pub type CurrentDelegatorBalanceMap = AHashMap; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(delegator_address, pool_address, pool_type))] +#[diesel(table_name = current_delegator_balances)] +pub struct CurrentDelegatorBalance { + pub delegator_address: String, + pub pool_address: String, + pub pool_type: String, + pub table_handle: String, + pub last_transaction_version: i64, + pub shares: BigDecimal, + pub parent_table_handle: String, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = delegator_balances)] +pub struct DelegatorBalance { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub delegator_address: String, + pub pool_address: String, + pub pool_type: String, + pub table_handle: String, + pub shares: BigDecimal, + pub parent_table_handle: String, +} + +#[derive(Debug, Identifiable, Queryable)] +#[diesel(primary_key(delegator_address, pool_address, pool_type))] +#[diesel(table_name = current_delegator_balances)] +pub struct CurrentDelegatorBalanceQuery { + pub delegator_address: String, + pub pool_address: String, + pub pool_type: String, + pub table_handle: String, + pub last_transaction_version: i64, + pub inserted_at: chrono::NaiveDateTime, + pub shares: BigDecimal, + pub parent_table_handle: String, +} + +impl CurrentDelegatorBalance { + /// Getting active share balances. Only 1 active pool per staking pool tracked in a single table + pub async fn get_active_share_from_write_table_item( + write_table_item: &WriteTableItem, + txn_version: i64, + write_set_change_index: i64, + active_pool_to_staking_pool: &ShareToStakingPoolMapping, + ) -> anyhow::Result> { + let table_handle = standardize_address(&write_table_item.handle.to_string()); + // The mapping will tell us if the table item is an active share table + if let Some(pool_balance) = active_pool_to_staking_pool.get(&table_handle) { + let pool_address = pool_balance.staking_pool_address.clone(); + let delegator_address = standardize_address(&write_table_item.key.to_string()); + + // Convert to TableItem model. Some fields are just placeholders + let (table_item_model, _) = + TableItem::from_write_table_item(write_table_item, 0, txn_version, 0); + + let shares: BigDecimal = table_item_model + .decoded_value + .as_ref() + .unwrap() + .as_str() + .unwrap() + .parse::() + .context(format!( + "cannot parse string as u128: {:?}, version {}", + table_item_model.decoded_value.as_ref(), + txn_version + ))?; + let shares = shares / &pool_balance.scaling_factor; + Ok(Some(( + DelegatorBalance { + transaction_version: txn_version, + write_set_change_index, + delegator_address: delegator_address.clone(), + pool_address: pool_address.clone(), + pool_type: "active_shares".to_string(), + table_handle: table_handle.clone(), + shares: shares.clone(), + parent_table_handle: table_handle.clone(), + }, + Self { + delegator_address, + pool_address, + pool_type: "active_shares".to_string(), + table_handle: table_handle.clone(), + last_transaction_version: txn_version, + shares, + parent_table_handle: table_handle, + }, + ))) + } else { + Ok(None) + } + } + + /// Getting inactive share balances. There could be multiple inactive pool per staking pool so we have + /// 2 layers of mapping (table w/ all inactive pools -> staking pool, table w/ delegator inactive shares -> each inactive pool) + pub async fn get_inactive_share_from_write_table_item( + write_table_item: &WriteTableItem, + txn_version: i64, + write_set_change_index: i64, + inactive_pool_to_staking_pool: &ShareToStakingPoolMapping, + inactive_share_to_pool: &ShareToPoolMapping, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let table_handle = standardize_address(&write_table_item.handle.to_string()); + // The mapping will tell us if the table item belongs to an inactive pool + if let Some(pool_balance) = inactive_share_to_pool.get(&table_handle) { + // If it is, we need to get the inactive staking pool handle and use it to look up the staking pool + let inactive_pool_handle = pool_balance.parent_table_handle.clone(); + + let pool_address = match inactive_pool_to_staking_pool + .get(&inactive_pool_handle) + .map(|metadata| metadata.staking_pool_address.clone()) + { + Some(pool_address) => pool_address, + None => { + match Self::get_staking_pool_from_inactive_share_handle( + conn, + &inactive_pool_handle, + query_retries, + query_retry_delay_ms, + ) + .await + { + Ok(pool) => pool, + Err(_) => { + tracing::error!( + transaction_version = txn_version, + lookup_key = &inactive_pool_handle, + "Failed to get staking pool address from inactive share handle. You probably should backfill db.", + ); + return Ok(None); + }, + } + }, + }; + let delegator_address = standardize_address(&write_table_item.key.to_string()); + // Convert to TableItem model. Some fields are just placeholders + let (table_item_model, _) = + TableItem::from_write_table_item(write_table_item, 0, txn_version, 0); + + let shares: BigDecimal = table_item_model + .decoded_value + .as_ref() + .unwrap() + .as_str() + .unwrap() + .parse::() + .context(format!( + "cannot parse string as u128: {:?}, version {}", + table_item_model.decoded_value.as_ref(), + txn_version + ))?; + let shares = shares / &pool_balance.scaling_factor; + Ok(Some(( + DelegatorBalance { + transaction_version: txn_version, + write_set_change_index, + delegator_address: delegator_address.clone(), + pool_address: pool_address.clone(), + pool_type: "inactive_shares".to_string(), + table_handle: table_handle.clone(), + shares: shares.clone(), + parent_table_handle: inactive_pool_handle.clone(), + }, + Self { + delegator_address, + pool_address, + pool_type: "inactive_shares".to_string(), + table_handle: table_handle.clone(), + last_transaction_version: txn_version, + shares, + parent_table_handle: inactive_pool_handle, + }, + ))) + } else { + Ok(None) + } + } + + // Setting amount to 0 if table item is deleted + pub fn get_active_share_from_delete_table_item( + delete_table_item: &DeleteTableItem, + txn_version: i64, + write_set_change_index: i64, + active_pool_to_staking_pool: &ShareToStakingPoolMapping, + ) -> anyhow::Result> { + let table_handle = standardize_address(&delete_table_item.handle.to_string()); + // The mapping will tell us if the table item is an active share table + if let Some(pool_balance) = active_pool_to_staking_pool.get(&table_handle) { + let delegator_address = standardize_address(&delete_table_item.key.to_string()); + + return Ok(Some(( + DelegatorBalance { + transaction_version: txn_version, + write_set_change_index, + delegator_address: delegator_address.clone(), + pool_address: pool_balance.staking_pool_address.clone(), + pool_type: "active_shares".to_string(), + table_handle: table_handle.clone(), + shares: BigDecimal::zero(), + parent_table_handle: table_handle.clone(), + }, + Self { + delegator_address, + pool_address: pool_balance.staking_pool_address.clone(), + pool_type: "active_shares".to_string(), + table_handle: table_handle.clone(), + last_transaction_version: txn_version, + shares: BigDecimal::zero(), + parent_table_handle: table_handle, + }, + ))); + } + Ok(None) + } + + // Setting amount to 0 if table item is deleted + pub async fn get_inactive_share_from_delete_table_item( + delete_table_item: &DeleteTableItem, + txn_version: i64, + write_set_change_index: i64, + inactive_pool_to_staking_pool: &ShareToStakingPoolMapping, + inactive_share_to_pool: &ShareToPoolMapping, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let table_handle = standardize_address(&delete_table_item.handle.to_string()); + // The mapping will tell us if the table item belongs to an inactive pool + if let Some(pool_balance) = inactive_share_to_pool.get(&table_handle) { + // If it is, we need to get the inactive staking pool handle and use it to look up the staking pool + let inactive_pool_handle = pool_balance.parent_table_handle.clone(); + + let pool_address = match inactive_pool_to_staking_pool + .get(&inactive_pool_handle) + .map(|metadata| metadata.staking_pool_address.clone()) + { + Some(pool_address) => pool_address, + None => Self::get_staking_pool_from_inactive_share_handle( + conn, + &inactive_pool_handle, + query_retries, + query_retry_delay_ms, + ) + .await + .context(format!( + "Failed to get staking pool from inactive share handle {}, txn version {}", + inactive_pool_handle, txn_version + ))?, + }; + let delegator_address = standardize_address(&delete_table_item.key.to_string()); + + return Ok(Some(( + DelegatorBalance { + transaction_version: txn_version, + write_set_change_index, + delegator_address: delegator_address.clone(), + pool_address: pool_address.clone(), + pool_type: "inactive_shares".to_string(), + table_handle: table_handle.clone(), + shares: BigDecimal::zero(), + parent_table_handle: inactive_pool_handle.clone(), + }, + Self { + delegator_address, + pool_address, + pool_type: "inactive_shares".to_string(), + table_handle: table_handle.clone(), + last_transaction_version: txn_version, + shares: BigDecimal::zero(), + parent_table_handle: table_handle, + }, + ))); + } + Ok(None) + } + + /// Key is the inactive share table handle obtained from 0x1::delegation_pool::DelegationPool + /// Value is the same metadata although it's not really used + pub fn get_active_pool_to_staking_pool_mapping( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(balance) = DelegatorPool::get_delegated_pool_metadata_from_write_resource( + write_resource, + txn_version, + )? { + Ok(Some(AHashMap::from([( + balance.active_share_table_handle.clone(), + balance, + )]))) + } else { + Ok(None) + } + } + + /// Key is the inactive share table handle obtained from 0x1::delegation_pool::DelegationPool + /// Value is the same metadata although it's not really used + pub fn get_inactive_pool_to_staking_pool_mapping( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(balance) = DelegatorPool::get_delegated_pool_metadata_from_write_resource( + write_resource, + txn_version, + )? { + Ok(Some(AHashMap::from([( + balance.inactive_share_table_handle.clone(), + balance, + )]))) + } else { + Ok(None) + } + } + + /// Key is the inactive share table handle obtained from 0x1::pool_u64_unbound::Pool + /// Value is the 0x1::pool_u64_unbound::Pool metadata that will be used to populate a user's inactive balance + pub fn get_inactive_share_to_pool_mapping( + write_table_item: &WriteTableItem, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(balance) = DelegatorPool::get_inactive_pool_metadata_from_write_table_item( + write_table_item, + txn_version, + )? { + Ok(Some(AHashMap::from([( + balance.shares_table_handle.clone(), + balance, + )]))) + } else { + Ok(None) + } + } + + pub async fn get_staking_pool_from_inactive_share_handle( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match CurrentDelegatorBalanceQuery::get_by_inactive_share_handle(conn, table_handle) + .await + { + Ok(current_delegator_balance) => return Ok(current_delegator_balance.pool_address), + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + Err(anyhow::anyhow!( + "Failed to get staking pool address from inactive share handle" + )) + } + + pub async fn from_transaction( + transaction: &Transaction, + active_pool_to_staking_pool: &ShareToStakingPoolMapping, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result<(Vec, CurrentDelegatorBalanceMap)> { + let mut inactive_pool_to_staking_pool: ShareToStakingPoolMapping = AHashMap::new(); + let mut inactive_share_to_pool: ShareToPoolMapping = AHashMap::new(); + let mut current_delegator_balances: CurrentDelegatorBalanceMap = AHashMap::new(); + let mut delegator_balances = vec![]; + let txn_version = transaction.version as i64; + + let changes = &transaction.info.as_ref().unwrap().changes; + // Do a first pass to get the mapping of active_share table handles to staking pool resource let txn_version = transaction.version as i64; + for wsc in changes { + if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { + if let Some(map) = + Self::get_inactive_pool_to_staking_pool_mapping(write_resource, txn_version) + .unwrap() + { + inactive_pool_to_staking_pool.extend(map); + } + } + + if let Change::WriteTableItem(table_item) = wsc.change.as_ref().unwrap() { + if let Some(map) = + Self::get_inactive_share_to_pool_mapping(table_item, txn_version).unwrap() + { + inactive_share_to_pool.extend(map); + } + } + } + // Now make a pass through table items to get the actual delegator balances + for (index, wsc) in changes.iter().enumerate() { + let maybe_delegator_balance = match wsc.change.as_ref().unwrap() { + Change::DeleteTableItem(table_item) => { + if let Some((balance, current_balance)) = + Self::get_active_share_from_delete_table_item( + table_item, + txn_version, + index as i64, + active_pool_to_staking_pool, + ) + .unwrap() + { + Some((balance, current_balance)) + } else { + Self::get_inactive_share_from_delete_table_item( + table_item, + txn_version, + index as i64, + &inactive_pool_to_staking_pool, + &inactive_share_to_pool, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + } + }, + Change::WriteTableItem(table_item) => { + if let Some((balance, current_balance)) = + Self::get_active_share_from_write_table_item( + table_item, + txn_version, + index as i64, + active_pool_to_staking_pool, + ) + .await + .unwrap() + { + Some((balance, current_balance)) + } else { + Self::get_inactive_share_from_write_table_item( + table_item, + txn_version, + index as i64, + &inactive_pool_to_staking_pool, + &inactive_share_to_pool, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + } + }, + _ => None, + }; + if let Some((delegator_balance, current_delegator_balance)) = maybe_delegator_balance { + delegator_balances.push(delegator_balance); + current_delegator_balances.insert( + ( + current_delegator_balance.delegator_address.clone(), + current_delegator_balance.pool_address.clone(), + current_delegator_balance.pool_type.clone(), + ), + current_delegator_balance, + ); + } + } + Ok((delegator_balances, current_delegator_balances)) + } +} + +impl CurrentDelegatorBalanceQuery { + pub async fn get_by_inactive_share_handle( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + ) -> diesel::QueryResult { + current_delegator_balances::table + .filter(current_delegator_balances::parent_table_handle.eq(table_handle)) + .first::(conn) + .await + } +} diff --git a/rust/processor/src/db/common/models/stake_models/delegator_pools.rs b/rust/processor/src/db/common/models/stake_models/delegator_pools.rs new file mode 100644 index 000000000..6c5613990 --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/delegator_pools.rs @@ -0,0 +1,238 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::stake_utils::{StakeResource, StakeTableItem}; +use crate::{ + schema::{ + current_delegated_staking_pool_balances, delegated_staking_pool_balances, + delegated_staking_pools, + }, + utils::{counters::PROCESSOR_UNKNOWN_TYPE_COUNT, util::standardize_address}, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{ + transaction::TxnData, write_set_change::Change, Transaction, WriteResource, WriteTableItem, +}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +type StakingPoolAddress = String; +pub type DelegatorPoolMap = AHashMap; +pub type DelegatorPoolBalanceMap = AHashMap; + +// All pools +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(staking_pool_address))] +#[diesel(table_name = delegated_staking_pools)] +pub struct DelegatorPool { + pub staking_pool_address: String, + pub first_transaction_version: i64, +} + +// Metadata to fill pool balances and delegator balance +#[derive(Debug, Deserialize, Serialize)] +pub struct DelegatorPoolBalanceMetadata { + pub transaction_version: i64, + pub staking_pool_address: String, + pub total_coins: BigDecimal, + pub total_shares: BigDecimal, + pub scaling_factor: BigDecimal, + pub operator_commission_percentage: BigDecimal, + pub active_share_table_handle: String, + pub inactive_share_table_handle: String, +} + +// Similar metadata but specifically for 0x1::pool_u64_unbound::Pool +#[derive(Debug, Deserialize, Serialize)] +pub struct PoolBalanceMetadata { + pub transaction_version: i64, + pub total_coins: BigDecimal, + pub total_shares: BigDecimal, + pub scaling_factor: BigDecimal, + pub shares_table_handle: String, + pub parent_table_handle: String, +} + +// Pools balances +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, staking_pool_address))] +#[diesel(table_name = delegated_staking_pool_balances)] +pub struct DelegatorPoolBalance { + pub transaction_version: i64, + pub staking_pool_address: String, + pub total_coins: BigDecimal, + pub total_shares: BigDecimal, + pub operator_commission_percentage: BigDecimal, + pub inactive_table_handle: String, + pub active_table_handle: String, +} + +// All pools w latest balances (really a more comprehensive version than DelegatorPool) +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(staking_pool_address))] +#[diesel(table_name = current_delegated_staking_pool_balances)] +pub struct CurrentDelegatorPoolBalance { + pub staking_pool_address: String, + pub total_coins: BigDecimal, + pub total_shares: BigDecimal, + pub last_transaction_version: i64, + pub operator_commission_percentage: BigDecimal, + pub inactive_table_handle: String, + pub active_table_handle: String, +} + +impl DelegatorPool { + pub fn from_transaction( + transaction: &Transaction, + ) -> anyhow::Result<( + DelegatorPoolMap, + Vec, + DelegatorPoolBalanceMap, + )> { + let mut delegator_pool_map = AHashMap::new(); + let mut delegator_pool_balances = vec![]; + let mut delegator_pool_balances_map = AHashMap::new(); + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["DelegatorPool"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return Ok(( + delegator_pool_map, + delegator_pool_balances, + delegator_pool_balances_map, + )); + }, + }; + let txn_version = transaction.version as i64; + + // Do a first pass to get the mapping of active_share table handles to staking pool addresses + if let TxnData::User(_) = txn_data { + let changes = &transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!") + .changes; + for wsc in changes { + if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { + let maybe_write_resource = + Self::from_write_resource(write_resource, txn_version)?; + if let Some((pool, pool_balances, current_pool_balances)) = maybe_write_resource + { + let staking_pool_address = pool.staking_pool_address.clone(); + delegator_pool_map.insert(staking_pool_address.clone(), pool); + delegator_pool_balances.push(pool_balances); + delegator_pool_balances_map + .insert(staking_pool_address.clone(), current_pool_balances); + } + } + } + } + Ok(( + delegator_pool_map, + delegator_pool_balances, + delegator_pool_balances_map, + )) + } + + pub fn get_delegated_pool_metadata_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(StakeResource::DelegationPool(inner)) = + StakeResource::from_write_resource(write_resource, txn_version)? + { + let staking_pool_address = standardize_address(&write_resource.address.to_string()); + let total_coins = inner.active_shares.total_coins; + let total_shares = + &inner.active_shares.total_shares / &inner.active_shares.scaling_factor; + Ok(Some(DelegatorPoolBalanceMetadata { + transaction_version: txn_version, + staking_pool_address, + total_coins, + total_shares, + scaling_factor: inner.active_shares.scaling_factor, + operator_commission_percentage: inner.operator_commission_percentage.clone(), + active_share_table_handle: inner.active_shares.shares.inner.get_handle(), + inactive_share_table_handle: inner.inactive_shares.get_handle(), + })) + } else { + Ok(None) + } + } + + pub fn get_inactive_pool_metadata_from_write_table_item( + write_table_item: &WriteTableItem, + txn_version: i64, + ) -> anyhow::Result> { + let table_item_data = write_table_item.data.as_ref().unwrap(); + + if let Some(StakeTableItem::Pool(inner)) = &StakeTableItem::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + let total_coins = inner.total_coins.clone(); + let total_shares = &inner.total_shares / &inner.scaling_factor; + Ok(Some(PoolBalanceMetadata { + transaction_version: txn_version, + total_coins, + total_shares, + scaling_factor: inner.scaling_factor.clone(), + shares_table_handle: inner.shares.inner.get_handle(), + parent_table_handle: standardize_address(&write_table_item.handle.to_string()), + })) + } else { + Ok(None) + } + } + + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + if let Some(balance) = + &Self::get_delegated_pool_metadata_from_write_resource(write_resource, txn_version)? + { + let staking_pool_address = balance.staking_pool_address.clone(); + let total_coins = balance.total_coins.clone(); + let total_shares = balance.total_shares.clone(); + let transaction_version = balance.transaction_version; + Ok(Some(( + Self { + staking_pool_address: staking_pool_address.clone(), + first_transaction_version: transaction_version, + }, + DelegatorPoolBalance { + transaction_version, + staking_pool_address: staking_pool_address.clone(), + total_coins: total_coins.clone(), + total_shares: total_shares.clone(), + operator_commission_percentage: balance.operator_commission_percentage.clone(), + inactive_table_handle: balance.inactive_share_table_handle.clone(), + active_table_handle: balance.active_share_table_handle.clone(), + }, + CurrentDelegatorPoolBalance { + staking_pool_address, + total_coins, + total_shares, + last_transaction_version: transaction_version, + operator_commission_percentage: balance.operator_commission_percentage.clone(), + inactive_table_handle: balance.inactive_share_table_handle.clone(), + active_table_handle: balance.active_share_table_handle.clone(), + }, + ))) + } else { + Ok(None) + } + } +} diff --git a/rust/processor/src/db/common/models/stake_models/mod.rs b/rust/processor/src/db/common/models/stake_models/mod.rs new file mode 100644 index 000000000..75db7e273 --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/mod.rs @@ -0,0 +1,10 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod current_delegated_voter; +pub mod delegator_activities; +pub mod delegator_balances; +pub mod delegator_pools; +pub mod proposal_votes; +pub mod stake_utils; +pub mod staking_pool_voter; diff --git a/rust/processor/src/db/common/models/stake_models/proposal_votes.rs b/rust/processor/src/db/common/models/stake_models/proposal_votes.rs new file mode 100644 index 000000000..ebe473e0c --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/proposal_votes.rs @@ -0,0 +1,73 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::stake_utils::StakeEvent; +use crate::{ + schema::proposal_votes, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{parse_timestamp, standardize_address}, + }, +}; +use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, proposal_id, voter_address))] +#[diesel(table_name = proposal_votes)] +pub struct ProposalVote { + pub transaction_version: i64, + pub proposal_id: i64, + pub voter_address: String, + pub staking_pool_address: String, + pub num_votes: BigDecimal, + pub should_pass: bool, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +impl ProposalVote { + pub fn from_transaction(transaction: &Transaction) -> anyhow::Result> { + let mut proposal_votes = vec![]; + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["ProposalVote"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return Ok(proposal_votes); + }, + }; + let txn_version = transaction.version as i64; + + if let TxnData::User(user_txn) = txn_data { + for event in &user_txn.events { + if let Some(StakeEvent::GovernanceVoteEvent(ev)) = + StakeEvent::from_event(event.type_str.as_str(), &event.data, txn_version)? + { + proposal_votes.push(Self { + transaction_version: txn_version, + proposal_id: ev.proposal_id as i64, + voter_address: standardize_address(&ev.voter), + staking_pool_address: standardize_address(&ev.stake_pool), + num_votes: ev.num_votes.clone(), + should_pass: ev.should_pass, + transaction_timestamp: parse_timestamp( + transaction.timestamp.as_ref().unwrap(), + txn_version, + ), + }); + } + } + } + Ok(proposal_votes) + } +} diff --git a/rust/processor/src/db/common/models/stake_models/stake_utils.rs b/rust/processor/src/db/common/models/stake_models/stake_utils.rs new file mode 100644 index 000000000..f623d3ffb --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/stake_utils.rs @@ -0,0 +1,337 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + db::common::models::{ + default_models::move_resources::MoveResource, token_models::token_utils::Table, + }, + utils::util::{deserialize_from_string, standardize_address}, +}; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::WriteResource; +use bigdecimal::BigDecimal; +use serde::{Deserialize, Serialize}; + +const STAKE_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000001"; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct StakePoolResource { + delegated_voter: String, + operator_address: String, +} + +impl StakePoolResource { + pub fn get_delegated_voter(&self) -> String { + standardize_address(&self.delegated_voter) + } + + pub fn get_operator_address(&self) -> String { + standardize_address(&self.operator_address) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DelegationPoolResource { + pub active_shares: PoolResource, + pub inactive_shares: Table, + #[serde(deserialize_with = "deserialize_from_string")] + pub operator_commission_percentage: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PoolResource { + pub shares: SharesInnerResource, + #[serde(deserialize_with = "deserialize_from_string")] + pub total_coins: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub total_shares: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub scaling_factor: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct SharesInnerResource { + pub inner: Table, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct GovernanceVoteEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub proposal_id: u64, + pub voter: String, + pub stake_pool: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub num_votes: BigDecimal, + pub should_pass: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DistributeRewardsEvent { + pub pool_address: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub rewards_amount: u64, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AddStakeEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount_added: u64, + pub delegator_address: String, + pub pool_address: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct UnlockStakeEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount_unlocked: u64, + pub delegator_address: String, + pub pool_address: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct WithdrawStakeEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount_withdrawn: u64, + pub delegator_address: String, + pub pool_address: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ReactivateStakeEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount_reactivated: u64, + pub delegator_address: String, + pub pool_address: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum StakeTableItem { + Pool(PoolResource), +} + +impl StakeTableItem { + pub fn from_table_item_type( + data_type: &str, + data: &str, + txn_version: i64, + ) -> Result> { + match data_type { + "0x1::pool_u64_unbound::Pool" => { + serde_json::from_str(data).map(|inner| Some(StakeTableItem::Pool(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum StakeResource { + StakePool(StakePoolResource), + DelegationPool(DelegationPoolResource), +} + +impl StakeResource { + fn is_resource_supported(data_type: &str) -> bool { + [ + format!("{}::stake::StakePool", STAKE_ADDR), + format!("{}::delegation_pool::DelegationPool", STAKE_ADDR), + ] + .contains(&data_type.to_string()) + } + + fn from_resource(data_type: &str, data: &serde_json::Value, txn_version: i64) -> Result { + match data_type { + x if x == format!("{}::stake::StakePool", STAKE_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(StakeResource::StakePool(inner))) + }, + x if x == format!("{}::delegation_pool::DelegationPool", STAKE_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(StakeResource::DelegationPool(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } + + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !Self::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + Ok(Some(Self::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )?)) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum StakeEvent { + GovernanceVoteEvent(GovernanceVoteEvent), + DistributeRewardsEvent(DistributeRewardsEvent), + AddStakeEvent(AddStakeEvent), + UnlockStakeEvent(UnlockStakeEvent), + WithdrawStakeEvent(WithdrawStakeEvent), + ReactivateStakeEvent(ReactivateStakeEvent), +} + +impl StakeEvent { + pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { + match data_type { + "0x1::aptos_governance::VoteEvent" => { + serde_json::from_str(data).map(|inner| Some(StakeEvent::GovernanceVoteEvent(inner))) + }, + "0x1::stake::DistributeRewardsEvent" => serde_json::from_str(data) + .map(|inner| Some(StakeEvent::DistributeRewardsEvent(inner))), + "0x1::delegation_pool::AddStakeEvent" => { + serde_json::from_str(data).map(|inner| Some(StakeEvent::AddStakeEvent(inner))) + }, + "0x1::delegation_pool::UnlockStakeEvent" => { + serde_json::from_str(data).map(|inner| Some(StakeEvent::UnlockStakeEvent(inner))) + }, + "0x1::delegation_pool::WithdrawStakeEvent" => { + serde_json::from_str(data).map(|inner| Some(StakeEvent::WithdrawStakeEvent(inner))) + }, + "0x1::delegation_pool::ReactivateStakeEvent" => serde_json::from_str(data) + .map(|inner| Some(StakeEvent::ReactivateStakeEvent(inner))), + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} + +pub enum VoteDelegationTableItem { + VoteDelegationVector(Vec), +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct VoteDelegationVector { + key: String, + pub value: VoteDelegationResource, +} + +impl VoteDelegationVector { + pub fn get_delegator_address(&self) -> String { + standardize_address(&self.key) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct VoteDelegationResource { + pub voter: String, + pub pending_voter: String, +} + +impl VoteDelegationResource { + pub fn get_voter(&self) -> String { + standardize_address(&self.voter) + } + + pub fn get_pending_voter(&self) -> String { + standardize_address(&self.pending_voter) + } +} + +impl VoteDelegationTableItem { + pub fn from_table_item_type( + data_type: &str, + data: &str, + txn_version: i64, + ) -> Result> { + match data_type { + "vector<0x1::smart_table::Entry>" => { + let vote_delegation_vector: Vec = serde_json::from_str(data) + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))?; + Ok(Some(VoteDelegationTableItem::VoteDelegationVector( + vote_delegation_vector.clone(), + ))) + }, + _ => Ok(None), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct GovernanceRecordsResource { + pub vote_delegation: VoteDelegationBucketsResource, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct VoteDelegationBucketsResource { + pub buckets: VoteDelegationInnerResource, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct VoteDelegationInnerResource { + pub inner: Table, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum DelegationVoteGovernanceRecordsResource { + GovernanceRecords(GovernanceRecordsResource), +} + +impl DelegationVoteGovernanceRecordsResource { + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result> { + match data_type { + x if x == format!("{}::delegation_pool::GovernanceRecords", STAKE_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| { + Some(DelegationVoteGovernanceRecordsResource::GovernanceRecords( + inner, + )) + }) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } + + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + Self::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version) + } +} diff --git a/rust/processor/src/db/common/models/stake_models/staking_pool_voter.rs b/rust/processor/src/db/common/models/stake_models/staking_pool_voter.rs new file mode 100644 index 000000000..67ecf4d45 --- /dev/null +++ b/rust/processor/src/db/common/models/stake_models/staking_pool_voter.rs @@ -0,0 +1,51 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::stake_utils::StakeResource; +use crate::{schema::current_staking_pool_voter, utils::util::standardize_address}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +type StakingPoolAddress = String; +pub type StakingPoolVoterMap = AHashMap; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(staking_pool_address))] +#[diesel(table_name = current_staking_pool_voter)] +pub struct CurrentStakingPoolVoter { + pub staking_pool_address: String, + pub voter_address: String, + pub last_transaction_version: i64, + pub operator_address: String, +} + +impl CurrentStakingPoolVoter { + pub fn from_transaction(transaction: &Transaction) -> anyhow::Result { + let mut staking_pool_voters = AHashMap::new(); + + let txn_version = transaction.version as i64; + for wsc in &transaction.info.as_ref().unwrap().changes { + if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { + if let Some(StakeResource::StakePool(inner)) = + StakeResource::from_write_resource(write_resource, txn_version)? + { + let staking_pool_address = + standardize_address(&write_resource.address.to_string()); + staking_pool_voters.insert(staking_pool_address.clone(), Self { + staking_pool_address, + voter_address: inner.get_delegated_voter(), + last_transaction_version: txn_version, + operator_address: inner.get_operator_address(), + }); + } + } + } + + Ok(staking_pool_voters) + } +} diff --git a/rust/processor/src/db/common/models/token_models/collection_datas.rs b/rust/processor/src/db/common/models/token_models/collection_datas.rs new file mode 100644 index 000000000..977fa8fc8 --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/collection_datas.rs @@ -0,0 +1,208 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + token_utils::{CollectionDataIdType, TokenWriteSet}, + tokens::TableHandleToOwner, +}; +use crate::{ + schema::{collection_datas, current_collection_datas}, + utils::{database::DbPoolConnection, util::standardize_address}, +}; +use aptos_protos::transaction::v1::WriteTableItem; +use bigdecimal::BigDecimal; +use diesel::prelude::*; +use diesel_async::RunQueryDsl; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(collection_data_id_hash, transaction_version))] +#[diesel(table_name = collection_datas)] +pub struct CollectionData { + pub collection_data_id_hash: String, + pub transaction_version: i64, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub metadata_uri: String, + pub supply: BigDecimal, + pub maximum: BigDecimal, + pub maximum_mutable: bool, + pub uri_mutable: bool, + pub description_mutable: bool, + pub table_handle: String, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(collection_data_id_hash))] +#[diesel(table_name = current_collection_datas)] +pub struct CurrentCollectionData { + pub collection_data_id_hash: String, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub metadata_uri: String, + pub supply: BigDecimal, + pub maximum: BigDecimal, + pub maximum_mutable: bool, + pub uri_mutable: bool, + pub description_mutable: bool, + pub last_transaction_version: i64, + pub table_handle: String, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) +#[derive(Debug, Identifiable, Queryable)] +#[diesel(primary_key(collection_data_id_hash))] +#[diesel(table_name = current_collection_datas)] +pub struct CurrentCollectionDataQuery { + pub collection_data_id_hash: String, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub metadata_uri: String, + pub supply: BigDecimal, + pub maximum: BigDecimal, + pub maximum_mutable: bool, + pub uri_mutable: bool, + pub description_mutable: bool, + pub last_transaction_version: i64, + pub inserted_at: chrono::NaiveDateTime, + pub table_handle: String, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl CollectionData { + pub async fn from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_collection_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::CollectionData(inner)) => Some(inner), + _ => None, + }; + if let Some(collection_data) = maybe_collection_data { + let table_handle = table_item.handle.to_string(); + let maybe_creator_address = table_handle_to_owner + .get(&standardize_address(&table_handle)) + .map(|table_metadata| table_metadata.get_owner_address()); + let mut creator_address = match maybe_creator_address { + Some(ca) => ca, + None => match Self::get_collection_creator( + conn, + &table_handle, + query_retries, + query_retry_delay_ms, + ) + .await + { + Ok(creator) => creator, + Err(_) => { + tracing::error!( + transaction_version = txn_version, + lookup_key = &table_handle, + "Failed to get collection creator for table handle. You probably should backfill db." + ); + return Ok(None); + }, + }, + }; + creator_address = standardize_address(&creator_address); + let collection_data_id = + CollectionDataIdType::new(creator_address, collection_data.get_name().to_string()); + let collection_data_id_hash = collection_data_id.to_hash(); + let collection_name = collection_data.get_name_trunc(); + let metadata_uri = collection_data.get_uri_trunc(); + + Ok(Some(( + Self { + collection_data_id_hash: collection_data_id_hash.clone(), + collection_name: collection_name.clone(), + creator_address: collection_data_id.creator.clone(), + description: collection_data.description.clone(), + transaction_version: txn_version, + metadata_uri: metadata_uri.clone(), + supply: collection_data.supply.clone(), + maximum: collection_data.maximum.clone(), + maximum_mutable: collection_data.mutability_config.maximum, + uri_mutable: collection_data.mutability_config.uri, + description_mutable: collection_data.mutability_config.description, + table_handle: table_handle.clone(), + transaction_timestamp: txn_timestamp, + }, + CurrentCollectionData { + collection_data_id_hash, + collection_name, + creator_address: collection_data_id.creator, + description: collection_data.description, + metadata_uri, + supply: collection_data.supply, + maximum: collection_data.maximum, + maximum_mutable: collection_data.mutability_config.maximum, + uri_mutable: collection_data.mutability_config.uri, + description_mutable: collection_data.mutability_config.description, + last_transaction_version: txn_version, + table_handle, + last_transaction_timestamp: txn_timestamp, + }, + ))) + } else { + Ok(None) + } + } + + /// If collection data is not in resources of the same transaction, then try looking for it in the database. Since collection owner + /// cannot change, we can just look in the current_collection_datas table. + /// Retrying a few times since this collection could've been written in a separate thread. + pub async fn get_collection_creator( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match CurrentCollectionDataQuery::get_by_table_handle(conn, table_handle).await { + Ok(current_collection_data) => return Ok(current_collection_data.creator_address), + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + Err(anyhow::anyhow!("Failed to get collection creator")) + } +} + +impl CurrentCollectionDataQuery { + pub async fn get_by_table_handle( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + ) -> diesel::QueryResult { + current_collection_datas::table + .filter(current_collection_datas::table_handle.eq(table_handle)) + .first::(conn) + .await + } +} diff --git a/rust/processor/src/db/common/models/token_models/mod.rs b/rust/processor/src/db/common/models/token_models/mod.rs new file mode 100644 index 000000000..e97b317c1 --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/mod.rs @@ -0,0 +1,11 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod collection_datas; +pub mod nft_points; +pub mod token_activities; +pub mod token_claims; +pub mod token_datas; +pub mod token_ownerships; +pub mod token_utils; +pub mod tokens; diff --git a/rust/processor/src/db/common/models/token_models/nft_points.rs b/rust/processor/src/db/common/models/token_models/nft_points.rs new file mode 100644 index 000000000..0224a0f1c --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/nft_points.rs @@ -0,0 +1,138 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{ + schema::nft_points, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{ + get_clean_payload, get_entry_function_from_user_request, parse_timestamp, + standardize_address, + }, + }, +}; +use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; +use bigdecimal::BigDecimal; +use diesel::prelude::*; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version))] +#[diesel(table_name = nft_points)] +pub struct NftPoints { + pub transaction_version: i64, + pub owner_address: String, + pub token_name: String, + pub point_type: String, + pub amount: BigDecimal, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +impl NftPoints { + pub fn from_transaction( + transaction: &Transaction, + nft_points_contract: Option, + ) -> Option { + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["NftPoints"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return None; + }, + }; + let version = transaction.version as i64; + let timestamp = transaction + .timestamp + .as_ref() + .expect("Transaction timestamp doesn't exist!"); + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + if let Some(contract) = nft_points_contract { + if let TxnData::User(user_txn) = txn_data { + let user_request = user_txn + .request + .as_ref() + .expect("Sends is not present in user txn"); + let payload = user_txn + .request + .as_ref() + .expect("Getting user request failed.") + .payload + .as_ref() + .expect("Getting payload failed."); + let entry_function_id_str = + get_entry_function_from_user_request(user_request).unwrap_or_default(); + + // If failed transaction, end + if !transaction_info.success { + return None; + } + if entry_function_id_str == contract { + let payload_cleaned = get_clean_payload(payload, version).unwrap(); + let args = payload_cleaned["arguments"] + .as_array() + .unwrap_or_else(|| { + tracing::error!( + transaction_version = version, + payload = ?payload_cleaned, + "Failed to get arguments from nft_points transaction" + ); + panic!() + }) + .iter() + .map(|x| { + unescape::unescape(x.as_str().unwrap_or_else(|| { + tracing::error!( + transaction_version = version, + payload = ?payload_cleaned, + "Failed to parse arguments from nft_points transaction" + ); + panic!() + })) + .unwrap_or_else(|| { + tracing::error!( + transaction_version = version, + payload = ?payload_cleaned, + "Failed to escape arguments from nft_points transaction" + ); + panic!() + }) + }) + .collect::>(); + let owner_address = standardize_address(&args[0]); + let amount = args[2].parse().unwrap_or_else(|_| { + tracing::error!( + transaction_version = version, + argument = &args[2], + "Failed to parse amount from nft_points transaction" + ); + panic!() + }); + let transaction_timestamp = parse_timestamp(timestamp, version); + return Some(Self { + transaction_version: version, + owner_address, + token_name: args[1].clone(), + point_type: args[3].clone(), + amount, + transaction_timestamp, + }); + } + } + } + None + } +} diff --git a/rust/processor/src/db/common/models/token_models/token_activities.rs b/rust/processor/src/db/common/models/token_models/token_activities.rs new file mode 100644 index 000000000..638738f14 --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/token_activities.rs @@ -0,0 +1,209 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::token_utils::{TokenDataIdType, TokenEvent}; +use crate::{ + schema::token_activities, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + util::{parse_timestamp, standardize_address}, + }, +}; +use aptos_protos::transaction::v1::{transaction::TxnData, Event, Transaction}; +use bigdecimal::{BigDecimal, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key( + transaction_version, + event_account_address, + event_creation_number, + event_sequence_number +))] +#[diesel(table_name = token_activities)] +pub struct TokenActivity { + pub transaction_version: i64, + pub event_account_address: String, + pub event_creation_number: i64, + pub event_sequence_number: i64, + pub token_data_id_hash: String, + pub property_version: BigDecimal, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub transfer_type: String, + pub from_address: Option, + pub to_address: Option, + pub token_amount: BigDecimal, + pub coin_type: Option, + pub coin_amount: Option, + pub collection_data_id_hash: String, + pub transaction_timestamp: chrono::NaiveDateTime, + pub event_index: Option, +} + +/// A simplified TokenActivity (excluded common fields) to reduce code duplication +struct TokenActivityHelper<'a> { + pub token_data_id: &'a TokenDataIdType, + pub property_version: BigDecimal, + pub from_address: Option, + pub to_address: Option, + pub token_amount: BigDecimal, + pub coin_type: Option, + pub coin_amount: Option, +} + +impl TokenActivity { + pub fn from_transaction(transaction: &Transaction) -> Vec { + let mut token_activities = vec![]; + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["TokenActivity"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return token_activities; + }, + }; + if let TxnData::User(user_txn) = txn_data { + for (index, event) in user_txn.events.iter().enumerate() { + let txn_version = transaction.version as i64; + if let Some(token_event) = TokenEvent::from_event( + event.type_str.as_str(), + event.data.as_str(), + txn_version, + ) + .unwrap() + { + token_activities.push(Self::from_parsed_event( + event.type_str.as_str(), + event, + &token_event, + txn_version, + parse_timestamp(transaction.timestamp.as_ref().unwrap(), txn_version), + index as i64, + )) + } + } + } + token_activities + } + + pub fn from_parsed_event( + event_type: &str, + event: &Event, + token_event: &TokenEvent, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + event_index: i64, + ) -> Self { + let event_account_address = + standardize_address(event.key.as_ref().unwrap().account_address.as_str()); + let event_creation_number = event.key.as_ref().unwrap().creation_number as i64; + let event_sequence_number = event.sequence_number as i64; + let token_activity_helper = match token_event { + TokenEvent::MintTokenEvent(inner) => TokenActivityHelper { + token_data_id: &inner.id, + property_version: BigDecimal::zero(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::BurnTokenEvent(inner) => TokenActivityHelper { + token_data_id: &inner.id.token_data_id, + property_version: inner.id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::MutateTokenPropertyMapEvent(inner) => TokenActivityHelper { + token_data_id: &inner.new_id.token_data_id, + property_version: inner.new_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: BigDecimal::zero(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::WithdrawTokenEvent(inner) => TokenActivityHelper { + token_data_id: &inner.id.token_data_id, + property_version: inner.id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::DepositTokenEvent(inner) => TokenActivityHelper { + token_data_id: &inner.id.token_data_id, + property_version: inner.id.property_version.clone(), + from_address: None, + to_address: Some(standardize_address(&event_account_address)), + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::OfferTokenEvent(inner) => TokenActivityHelper { + token_data_id: &inner.token_id.token_data_id, + property_version: inner.token_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: Some(inner.get_to_address()), + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::CancelTokenOfferEvent(inner) => TokenActivityHelper { + token_data_id: &inner.token_id.token_data_id, + property_version: inner.token_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: Some(inner.get_to_address()), + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + TokenEvent::ClaimTokenEvent(inner) => TokenActivityHelper { + token_data_id: &inner.token_id.token_data_id, + property_version: inner.token_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: Some(inner.get_to_address()), + token_amount: inner.amount.clone(), + coin_type: None, + coin_amount: None, + }, + }; + let token_data_id = token_activity_helper.token_data_id; + Self { + event_account_address, + event_creation_number, + event_sequence_number, + token_data_id_hash: token_data_id.to_hash(), + property_version: token_activity_helper.property_version, + collection_data_id_hash: token_data_id.get_collection_data_id_hash(), + creator_address: token_data_id.get_creator_address(), + collection_name: token_data_id.get_collection_trunc(), + name: token_data_id.get_name_trunc(), + transaction_version: txn_version, + transfer_type: event_type.to_string(), + from_address: token_activity_helper.from_address, + to_address: token_activity_helper.to_address, + token_amount: token_activity_helper.token_amount, + coin_type: token_activity_helper.coin_type, + coin_amount: token_activity_helper.coin_amount, + transaction_timestamp: txn_timestamp, + event_index: Some(event_index), + } + } +} diff --git a/rust/processor/src/db/common/models/token_models/token_claims.rs b/rust/processor/src/db/common/models/token_models/token_claims.rs new file mode 100644 index 000000000..458859d9b --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/token_claims.rs @@ -0,0 +1,172 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{token_utils::TokenWriteSet, tokens::TableHandleToOwner}; +use crate::{schema::current_token_pending_claims, utils::util::standardize_address}; +use aptos_protos::transaction::v1::{DeleteTableItem, WriteTableItem}; +use bigdecimal::{BigDecimal, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id_hash, property_version, from_address, to_address))] +#[diesel(table_name = current_token_pending_claims)] +pub struct CurrentTokenPendingClaim { + pub token_data_id_hash: String, + pub property_version: BigDecimal, + pub from_address: String, + pub to_address: String, + pub collection_data_id_hash: String, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub amount: BigDecimal, + pub table_handle: String, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub token_data_id: String, + pub collection_id: String, +} + +impl CurrentTokenPendingClaim { + /// Token claim is stored in a table in the offerer's account. The key is token_offer_id (token_id + to address) + /// and value is token (token_id + amount) + pub fn from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_offer = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenOfferId(inner)) => Some(inner), + _ => None, + }; + if let Some(offer) = &maybe_offer { + let maybe_token = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::Token(inner)) => Some(inner), + _ => None, + }; + if let Some(token) = &maybe_token { + let table_handle = standardize_address(&table_item.handle.to_string()); + + let maybe_table_metadata = table_handle_to_owner.get(&table_handle); + + if let Some(table_metadata) = maybe_table_metadata { + let token_id = offer.token_id.clone(); + let token_data_id_struct = token_id.token_data_id; + let collection_data_id_hash = + token_data_id_struct.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id_struct.to_hash(); + // Basically adding 0x prefix to the previous 2 lines. This is to be consistent with Token V2 + let collection_id = token_data_id_struct.get_collection_id(); + let token_data_id = token_data_id_struct.to_id(); + let collection_name = token_data_id_struct.get_collection_trunc(); + let name = token_data_id_struct.get_name_trunc(); + + return Ok(Some(Self { + token_data_id_hash, + property_version: token_id.property_version, + from_address: table_metadata.get_owner_address(), + to_address: offer.get_to_address(), + collection_data_id_hash, + creator_address: token_data_id_struct.get_creator_address(), + collection_name, + name, + amount: token.amount.clone(), + table_handle, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + token_data_id, + collection_id, + })); + } else { + tracing::warn!( + transaction_version = txn_version, + table_handle = table_handle, + "Missing table handle metadata for TokenClaim. {:?}", + table_handle_to_owner + ); + } + } else { + tracing::warn!( + transaction_version = txn_version, + value_type = table_item_data.value_type, + value = table_item_data.value, + "Expecting token as value for key = token_offer_id", + ); + } + } + Ok(None) + } + + pub fn from_delete_table_item( + table_item: &DeleteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_offer = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenOfferId(inner)) => Some(inner), + _ => None, + }; + if let Some(offer) = &maybe_offer { + let table_handle = standardize_address(&table_item.handle.to_string()); + + let table_metadata = table_handle_to_owner.get(&table_handle).unwrap_or_else(|| { + panic!( + "Missing table handle metadata for claim. \ + Version: {}, table handle for PendingClaims: {}, all metadata: {:?}", + txn_version, table_handle, table_handle_to_owner + ) + }); + + let token_id = offer.token_id.clone(); + let token_data_id_struct = token_id.token_data_id; + let collection_data_id_hash = token_data_id_struct.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id_struct.to_hash(); + // Basically adding 0x prefix to the previous 2 lines. This is to be consistent with Token V2 + let collection_id = token_data_id_struct.get_collection_id(); + let token_data_id = token_data_id_struct.to_id(); + let collection_name = token_data_id_struct.get_collection_trunc(); + let name = token_data_id_struct.get_name_trunc(); + + return Ok(Some(Self { + token_data_id_hash, + property_version: token_id.property_version, + from_address: table_metadata.get_owner_address(), + to_address: offer.get_to_address(), + collection_data_id_hash, + creator_address: token_data_id_struct.get_creator_address(), + collection_name, + name, + amount: BigDecimal::zero(), + table_handle, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + token_data_id, + collection_id, + })); + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/token_models/token_datas.rs b/rust/processor/src/db/common/models/token_models/token_datas.rs new file mode 100644 index 000000000..65bc7d3b8 --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/token_datas.rs @@ -0,0 +1,167 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::token_utils::TokenWriteSet; +use crate::schema::{current_token_datas, token_datas}; +use aptos_protos::transaction::v1::WriteTableItem; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id_hash, transaction_version))] +#[diesel(table_name = token_datas)] +pub struct TokenData { + pub token_data_id_hash: String, + pub transaction_version: i64, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub maximum: BigDecimal, + pub supply: BigDecimal, + pub largest_property_version: BigDecimal, + pub metadata_uri: String, + pub payee_address: String, + pub royalty_points_numerator: BigDecimal, + pub royalty_points_denominator: BigDecimal, + pub maximum_mutable: bool, + pub uri_mutable: bool, + pub description_mutable: bool, + pub properties_mutable: bool, + pub royalty_mutable: bool, + pub default_properties: serde_json::Value, + pub collection_data_id_hash: String, + pub transaction_timestamp: chrono::NaiveDateTime, + pub description: String, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id_hash))] +#[diesel(table_name = current_token_datas)] +pub struct CurrentTokenData { + pub token_data_id_hash: String, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub maximum: bigdecimal::BigDecimal, + pub supply: bigdecimal::BigDecimal, + pub largest_property_version: bigdecimal::BigDecimal, + pub metadata_uri: String, + pub payee_address: String, + pub royalty_points_numerator: bigdecimal::BigDecimal, + pub royalty_points_denominator: bigdecimal::BigDecimal, + pub maximum_mutable: bool, + pub uri_mutable: bool, + pub description_mutable: bool, + pub properties_mutable: bool, + pub royalty_mutable: bool, + pub default_properties: serde_json::Value, + pub last_transaction_version: i64, + pub collection_data_id_hash: String, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub description: String, +} + +impl TokenData { + pub fn from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::TokenData(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_data) = maybe_token_data { + let maybe_token_data_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), + _ => None, + }; + if let Some(token_data_id) = maybe_token_data_id { + let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id.to_hash(); + let collection_name = token_data_id.get_collection_trunc(); + let name = token_data_id.get_name_trunc(); + let metadata_uri = token_data.get_uri_trunc(); + + return Ok(Some(( + Self { + collection_data_id_hash: collection_data_id_hash.clone(), + token_data_id_hash: token_data_id_hash.clone(), + creator_address: token_data_id.get_creator_address(), + collection_name: collection_name.clone(), + name: name.clone(), + transaction_version: txn_version, + maximum: token_data.maximum.clone(), + supply: token_data.supply.clone(), + largest_property_version: token_data.largest_property_version.clone(), + metadata_uri: metadata_uri.clone(), + payee_address: token_data.royalty.get_payee_address(), + royalty_points_numerator: token_data + .royalty + .royalty_points_numerator + .clone(), + royalty_points_denominator: token_data + .royalty + .royalty_points_denominator + .clone(), + maximum_mutable: token_data.mutability_config.maximum, + uri_mutable: token_data.mutability_config.uri, + description_mutable: token_data.mutability_config.description, + properties_mutable: token_data.mutability_config.properties, + royalty_mutable: token_data.mutability_config.royalty, + default_properties: token_data.default_properties.clone(), + transaction_timestamp: txn_timestamp, + description: token_data.description.clone(), + }, + CurrentTokenData { + collection_data_id_hash, + token_data_id_hash, + creator_address: token_data_id.get_creator_address(), + collection_name, + name, + maximum: token_data.maximum, + supply: token_data.supply, + largest_property_version: token_data.largest_property_version, + metadata_uri, + payee_address: token_data.royalty.get_payee_address(), + royalty_points_numerator: token_data.royalty.royalty_points_numerator, + royalty_points_denominator: token_data.royalty.royalty_points_denominator, + maximum_mutable: token_data.mutability_config.maximum, + uri_mutable: token_data.mutability_config.uri, + description_mutable: token_data.mutability_config.description, + properties_mutable: token_data.mutability_config.properties, + royalty_mutable: token_data.mutability_config.royalty, + default_properties: token_data.default_properties, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + description: token_data.description, + }, + ))); + } else { + tracing::warn!( + transaction_version = txn_version, + key_type = table_item_data.key_type, + key = table_item_data.key, + "Expecting token_data_id as key for value = token_data" + ); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/token_models/token_ownerships.rs b/rust/processor/src/db/common/models/token_models/token_ownerships.rs new file mode 100644 index 000000000..1011d2476 --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/token_ownerships.rs @@ -0,0 +1,134 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + token_utils::TokenWriteSet, + tokens::{TableHandleToOwner, Token}, +}; +use crate::{ + schema::{current_token_ownerships, token_ownerships}, + utils::util::standardize_address, +}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key( + token_data_id_hash, + property_version, + transaction_version, + table_handle +))] +#[diesel(table_name = token_ownerships)] +pub struct TokenOwnership { + pub token_data_id_hash: String, + pub property_version: BigDecimal, + pub transaction_version: i64, + pub table_handle: String, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub owner_address: Option, + pub amount: BigDecimal, + pub table_type: Option, + pub collection_data_id_hash: String, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id_hash, property_version, owner_address))] +#[diesel(table_name = current_token_ownerships)] +pub struct CurrentTokenOwnership { + pub token_data_id_hash: String, + pub property_version: BigDecimal, + pub owner_address: String, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub amount: BigDecimal, + pub token_properties: serde_json::Value, + pub last_transaction_version: i64, + pub collection_data_id_hash: String, + pub table_type: String, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl TokenOwnership { + /// We only want to track tokens in 0x1::token::TokenStore for now. This is because the table + /// schema doesn't have table type (i.e. token container) as primary key. TokenStore has token_id + /// as key and token as value. + pub fn from_token( + token: &Token, + table_item_key_type: &str, + table_item_key: &str, + amount: BigDecimal, + table_handle: String, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result)>> { + let txn_version = token.transaction_version; + let maybe_token_id = match TokenWriteSet::from_table_item_type( + table_item_key_type, + table_item_key, + txn_version, + )? { + Some(TokenWriteSet::TokenId(inner)) => Some(inner), + _ => None, + }; + // Return early if table key is not token id + if maybe_token_id.is_none() { + return Ok(None); + } + let table_handle = standardize_address(&table_handle); + let maybe_table_metadata = table_handle_to_owner.get(&table_handle); + // Return early if table type is not tokenstore + if let Some(tm) = maybe_table_metadata { + if tm.table_type != "0x3::token::TokenStore" { + return Ok(None); + } + } + let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { + Some(tm) => ( + Some(CurrentTokenOwnership { + collection_data_id_hash: token.collection_data_id_hash.clone(), + token_data_id_hash: token.token_data_id_hash.clone(), + property_version: token.property_version.clone(), + owner_address: tm.get_owner_address(), + creator_address: standardize_address(&token.creator_address.clone()), + collection_name: token.collection_name.clone(), + name: token.name.clone(), + amount: amount.clone(), + token_properties: token.token_properties.clone(), + last_transaction_version: txn_version, + table_type: tm.table_type.clone(), + last_transaction_timestamp: token.transaction_timestamp, + }), + Some(tm.get_owner_address()), + Some(tm.table_type.clone()), + ), + None => (None, None, None), + }; + + Ok(Some(( + Self { + collection_data_id_hash: token.collection_data_id_hash.clone(), + token_data_id_hash: token.token_data_id_hash.clone(), + property_version: token.property_version.clone(), + owner_address: owner_address.map(|s| standardize_address(&s)), + creator_address: standardize_address(&token.creator_address), + collection_name: token.collection_name.clone(), + name: token.name.clone(), + amount, + table_type, + transaction_version: token.transaction_version, + table_handle, + transaction_timestamp: token.transaction_timestamp, + }, + curr_token_ownership, + ))) + } +} diff --git a/rust/processor/src/db/common/models/token_models/token_utils.rs b/rust/processor/src/db/common/models/token_models/token_utils.rs new file mode 100644 index 000000000..aa2d74c18 --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/token_utils.rs @@ -0,0 +1,475 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use crate::utils::util::{ + deserialize_from_string, deserialize_property_map_from_bcs_hexstring, + deserialize_string_from_hexstring, hash_str, standardize_address, truncate_str, +}; +use anyhow::{Context, Result}; +use bigdecimal::BigDecimal; +use serde::{Deserialize, Serialize}; +use std::fmt::{self, Formatter}; + +pub const TOKEN_ADDR: &str = "0x0000000000000000000000000000000000000000000000000000000000000003"; +pub const NAME_LENGTH: usize = 128; +pub const URI_LENGTH: usize = 512; + +/** + * This file defines deserialized move types as defined in our 0x3 contracts. + */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Table { + handle: String, +} + +impl Table { + pub fn get_handle(&self) -> String { + standardize_address(&self.handle) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenDataIdType { + creator: String, + collection: String, + name: String, +} + +impl TokenDataIdType { + pub fn to_id(&self) -> String { + format!("0x{}", self.to_hash()) + } + + pub fn to_hash(&self) -> String { + hash_str(&self.to_string()) + } + + pub fn get_collection_trunc(&self) -> String { + truncate_str(&self.collection, NAME_LENGTH) + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, NAME_LENGTH) + } + + pub fn get_collection_data_id_hash(&self) -> String { + CollectionDataIdType::new(self.creator.clone(), self.collection.clone()).to_hash() + } + + pub fn get_collection_id(&self) -> String { + CollectionDataIdType::new(self.creator.clone(), self.collection.clone()).to_id() + } + + pub fn get_creator_address(&self) -> String { + standardize_address(&self.creator) + } +} + +impl fmt::Display for TokenDataIdType { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!( + f, + "{}::{}::{}", + standardize_address(self.creator.as_str()), + self.collection, + self.name + ) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CollectionDataIdType { + pub creator: String, + pub name: String, +} + +impl CollectionDataIdType { + pub fn new(creator: String, name: String) -> Self { + Self { creator, name } + } + + pub fn to_hash(&self) -> String { + hash_str(&self.to_string()) + } + + pub fn to_id(&self) -> String { + format!("0x{}", self.to_hash()) + } +} + +impl fmt::Display for CollectionDataIdType { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!( + f, + "{}::{}", + standardize_address(self.creator.as_str()), + self.name + ) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenIdType { + pub token_data_id: TokenDataIdType, + #[serde(deserialize_with = "deserialize_from_string")] + pub property_version: BigDecimal, +} + +impl fmt::Display for TokenIdType { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{}::{}", self.token_data_id, self.property_version) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenDataType { + #[serde(deserialize_with = "deserialize_property_map_from_bcs_hexstring")] + pub default_properties: serde_json::Value, + pub description: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub largest_property_version: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub maximum: BigDecimal, + pub mutability_config: TokenDataMutabilityConfigType, + name: String, + pub royalty: RoyaltyType, + #[serde(deserialize_with = "deserialize_from_string")] + pub supply: BigDecimal, + uri: String, +} + +impl TokenDataType { + pub fn get_uri_trunc(&self) -> String { + truncate_str(&self.uri, URI_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenDataMutabilityConfigType { + pub description: bool, + pub maximum: bool, + pub properties: bool, + pub royalty: bool, + pub uri: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct RoyaltyType { + payee_address: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub royalty_points_denominator: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub royalty_points_numerator: BigDecimal, +} + +impl RoyaltyType { + pub fn get_payee_address(&self) -> String { + standardize_address(&self.payee_address) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + pub id: TokenIdType, + #[serde(deserialize_with = "deserialize_property_map_from_bcs_hexstring")] + pub token_properties: serde_json::Value, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CollectionDataType { + pub description: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub maximum: BigDecimal, + pub mutability_config: CollectionDataMutabilityConfigType, + name: String, + #[serde(deserialize_with = "deserialize_from_string")] + pub supply: BigDecimal, + uri: String, +} + +impl CollectionDataType { + pub fn get_name(&self) -> &str { + &self.name + } + + pub fn get_uri_trunc(&self) -> String { + truncate_str(&self.uri, URI_LENGTH) + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, NAME_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenOfferIdType { + to_addr: String, + pub token_id: TokenIdType, +} + +impl TokenOfferIdType { + pub fn get_to_address(&self) -> String { + standardize_address(&self.to_addr) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CollectionResourceType { + pub collection_data: Table, + pub token_data: Table, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenStoreResourceType { + pub tokens: Table, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PendingClaimsResourceType { + pub pending_claims: Table, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CollectionDataMutabilityConfigType { + pub description: bool, + pub maximum: bool, + pub uri: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct WithdrawTokenEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + pub id: TokenIdType, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DepositTokenEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + pub id: TokenIdType, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MintTokenEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + pub id: TokenDataIdType, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct BurnTokenEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + pub id: TokenIdType, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MutateTokenPropertyMapEventType { + pub old_id: TokenIdType, + pub new_id: TokenIdType, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OfferTokenEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + to_address: String, + pub token_id: TokenIdType, +} + +impl OfferTokenEventType { + pub fn get_to_address(&self) -> String { + standardize_address(&self.to_address) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CancelTokenOfferEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + to_address: String, + pub token_id: TokenIdType, +} + +impl CancelTokenOfferEventType { + pub fn get_to_address(&self) -> String { + standardize_address(&self.to_address) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ClaimTokenEventType { + #[serde(deserialize_with = "deserialize_from_string")] + pub amount: BigDecimal, + to_address: String, + pub token_id: TokenIdType, +} + +impl ClaimTokenEventType { + pub fn get_to_address(&self) -> String { + standardize_address(&self.to_address) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TypeInfo { + pub account_address: String, + #[serde(deserialize_with = "deserialize_string_from_hexstring")] + pub module_name: String, + #[serde(deserialize_with = "deserialize_string_from_hexstring")] + pub struct_name: String, +} + +impl fmt::Display for TypeInfo { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!( + f, + "{}::{}::{}", + self.account_address, self.module_name, self.struct_name + ) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum TokenWriteSet { + TokenDataId(TokenDataIdType), + TokenId(TokenIdType), + TokenData(TokenDataType), + Token(TokenType), + CollectionData(CollectionDataType), + TokenOfferId(TokenOfferIdType), +} + +impl TokenWriteSet { + pub fn from_table_item_type( + data_type: &str, + data: &str, + txn_version: i64, + ) -> Result> { + match data_type { + "0x3::token::TokenDataId" => { + serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenDataId(inner))) + }, + "0x3::token::TokenId" => { + serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenId(inner))) + }, + "0x3::token::TokenData" => { + serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenData(inner))) + }, + "0x3::token::Token" => { + serde_json::from_str(data).map(|inner| Some(TokenWriteSet::Token(inner))) + }, + "0x3::token::CollectionData" => { + serde_json::from_str(data).map(|inner| Some(TokenWriteSet::CollectionData(inner))) + }, + "0x3::token_transfers::TokenOfferId" => { + serde_json::from_str(data).map(|inner| Some(TokenWriteSet::TokenOfferId(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum TokenEvent { + MintTokenEvent(MintTokenEventType), + BurnTokenEvent(BurnTokenEventType), + MutateTokenPropertyMapEvent(MutateTokenPropertyMapEventType), + WithdrawTokenEvent(WithdrawTokenEventType), + DepositTokenEvent(DepositTokenEventType), + OfferTokenEvent(OfferTokenEventType), + CancelTokenOfferEvent(CancelTokenOfferEventType), + ClaimTokenEvent(ClaimTokenEventType), +} + +impl TokenEvent { + pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { + match data_type { + "0x3::token::MintTokenEvent" => { + serde_json::from_str(data).map(|inner| Some(TokenEvent::MintTokenEvent(inner))) + }, + "0x3::token::BurnTokenEvent" => { + serde_json::from_str(data).map(|inner| Some(TokenEvent::BurnTokenEvent(inner))) + }, + "0x3::token::MutateTokenPropertyMapEvent" => serde_json::from_str(data) + .map(|inner| Some(TokenEvent::MutateTokenPropertyMapEvent(inner))), + "0x3::token::WithdrawEvent" => { + serde_json::from_str(data).map(|inner| Some(TokenEvent::WithdrawTokenEvent(inner))) + }, + "0x3::token::DepositEvent" => { + serde_json::from_str(data).map(|inner| Some(TokenEvent::DepositTokenEvent(inner))) + }, + "0x3::token_transfers::TokenOfferEvent" => { + serde_json::from_str(data).map(|inner| Some(TokenEvent::OfferTokenEvent(inner))) + }, + "0x3::token_transfers::TokenCancelOfferEvent" => serde_json::from_str(data) + .map(|inner| Some(TokenEvent::CancelTokenOfferEvent(inner))), + "0x3::token_transfers::TokenClaimEvent" => { + serde_json::from_str(data).map(|inner| Some(TokenEvent::ClaimTokenEvent(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum TokenResource { + CollectionResource(CollectionResourceType), + TokenStoreResource(TokenStoreResourceType), + PendingClaimsResource(PendingClaimsResourceType), +} + +impl TokenResource { + pub fn is_resource_supported(data_type: &str) -> bool { + [ + format!("{}::token::Collections", TOKEN_ADDR), + format!("{}::token::TokenStore", TOKEN_ADDR), + format!("{}::token_transfers::PendingClaims", TOKEN_ADDR), + ] + .contains(&data_type.to_string()) + } + + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result { + match data_type { + x if x == format!("{}::token::Collections", TOKEN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(TokenResource::CollectionResource(inner))) + }, + x if x == format!("{}::token::TokenStore", TOKEN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(TokenResource::TokenStoreResource(inner))) + }, + x if x == format!("{}::token_transfers::PendingClaims", TOKEN_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(TokenResource::PendingClaimsResource(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } +} diff --git a/rust/processor/src/db/common/models/token_models/tokens.rs b/rust/processor/src/db/common/models/token_models/tokens.rs new file mode 100644 index 000000000..f6d2e8fab --- /dev/null +++ b/rust/processor/src/db/common/models/token_models/tokens.rs @@ -0,0 +1,457 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + collection_datas::{CollectionData, CurrentCollectionData}, + token_claims::CurrentTokenPendingClaim, + token_datas::{CurrentTokenData, TokenData}, + token_ownerships::{CurrentTokenOwnership, TokenOwnership}, + token_utils::{TokenResource, TokenWriteSet}, +}; +use crate::{ + db::common::models::default_models::move_resources::MoveResource, + schema::tokens, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + database::DbPoolConnection, + util::{ensure_not_negative, parse_timestamp, standardize_address}, + }, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{ + transaction::TxnData, write_set_change::Change as WriteSetChangeEnum, DeleteTableItem, + Transaction, WriteResource, WriteTableItem, +}; +use bigdecimal::{BigDecimal, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +type TableHandle = String; +type Address = String; +type TableType = String; +pub type TableHandleToOwner = AHashMap; +pub type TokenDataIdHash = String; +// PK of current_token_ownerships, i.e. token_data_id_hash + property_version + owner_address, used to dedupe +pub type CurrentTokenOwnershipPK = (TokenDataIdHash, BigDecimal, Address); +// PK of current_token_pending_claims, i.e. token_data_id_hash + property_version + to/from_address, used to dedupe +pub type CurrentTokenPendingClaimPK = (TokenDataIdHash, BigDecimal, Address, Address); +// PK of tokens table, used to dedupe tokens +pub type TokenPK = (TokenDataIdHash, BigDecimal); + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id_hash, property_version, transaction_version))] +#[diesel(table_name = tokens)] +pub struct Token { + pub token_data_id_hash: String, + pub property_version: BigDecimal, + pub transaction_version: i64, + pub creator_address: String, + pub collection_name: String, + pub name: String, + pub token_properties: serde_json::Value, + pub collection_data_id_hash: String, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Debug)] +pub struct TableMetadataForToken { + owner_address: Address, + pub table_type: TableType, +} + +impl Token { + /// We can find token data from write sets in user transactions. Table items will contain metadata for collections + /// and tokens. To find ownership, we have to look in write resource write sets for who owns those table handles + /// + /// We also will compute current versions of the token tables which are at a higher granularity than the transactional tables (only + /// state at the last transaction will be tracked, hence using hashmap to dedupe) + pub async fn from_transaction( + transaction: &Transaction, + table_handle_to_owner: &TableHandleToOwner, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> ( + Vec, + Vec, + Vec, + Vec, + AHashMap, + AHashMap, + AHashMap, + AHashMap, + ) { + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["Token"]) + .inc(); + tracing::warn!( + transaction_version = transaction.version, + "Transaction data doesn't exist", + ); + return ( + vec![], + vec![], + vec![], + vec![], + AHashMap::new(), + AHashMap::new(), + AHashMap::new(), + AHashMap::new(), + ); + }, + }; + if let TxnData::User(_) = txn_data { + let mut token_ownerships = vec![]; + let mut token_datas = vec![]; + let mut collection_datas = vec![]; + + let mut tokens: AHashMap = AHashMap::new(); + let mut current_token_ownerships: AHashMap< + CurrentTokenOwnershipPK, + CurrentTokenOwnership, + > = AHashMap::new(); + let mut current_token_datas: AHashMap = + AHashMap::new(); + let mut current_collection_datas: AHashMap = + AHashMap::new(); + let mut current_token_claims: AHashMap< + CurrentTokenPendingClaimPK, + CurrentTokenPendingClaim, + > = AHashMap::new(); + + let txn_version = transaction.version as i64; + let txn_timestamp = + parse_timestamp(transaction.timestamp.as_ref().unwrap(), txn_version); + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + + for wsc in &transaction_info.changes { + // Basic token and ownership data + let (maybe_token_w_ownership, maybe_token_data, maybe_collection_data) = + match wsc.change.as_ref().unwrap() { + WriteSetChangeEnum::WriteTableItem(write_table_item) => ( + Self::from_write_table_item( + write_table_item, + txn_version, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap(), + TokenData::from_write_table_item( + write_table_item, + txn_version, + txn_timestamp, + ) + .unwrap(), + CollectionData::from_write_table_item( + write_table_item, + txn_version, + txn_timestamp, + table_handle_to_owner, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap(), + ), + WriteSetChangeEnum::DeleteTableItem(delete_table_item) => ( + Self::from_delete_table_item( + delete_table_item, + txn_version, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap(), + None, + None, + ), + _ => (None, None, None), + }; + // More advanced token contracts + let maybe_current_token_claim = match wsc.change.as_ref().unwrap() { + WriteSetChangeEnum::WriteTableItem(write_table_item) => { + CurrentTokenPendingClaim::from_write_table_item( + write_table_item, + txn_version, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap() + }, + WriteSetChangeEnum::DeleteTableItem(delete_table_item) => { + CurrentTokenPendingClaim::from_delete_table_item( + delete_table_item, + txn_version, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap() + }, + _ => None, + }; + + if let Some((token, maybe_token_ownership, maybe_current_token_ownership)) = + maybe_token_w_ownership + { + tokens.insert( + ( + token.token_data_id_hash.clone(), + token.property_version.clone(), + ), + token, + ); + if let Some(token_ownership) = maybe_token_ownership { + token_ownerships.push(token_ownership); + } + if let Some(current_token_ownership) = maybe_current_token_ownership { + current_token_ownerships.insert( + ( + current_token_ownership.token_data_id_hash.clone(), + current_token_ownership.property_version.clone(), + current_token_ownership.owner_address.clone(), + ), + current_token_ownership, + ); + } + } + if let Some((token_data, current_token_data)) = maybe_token_data { + token_datas.push(token_data); + current_token_datas.insert( + current_token_data.token_data_id_hash.clone(), + current_token_data, + ); + } + if let Some((collection_data, current_collection_data)) = maybe_collection_data { + collection_datas.push(collection_data); + current_collection_datas.insert( + current_collection_data.collection_data_id_hash.clone(), + current_collection_data, + ); + } + if let Some(claim) = maybe_current_token_claim { + current_token_claims.insert( + ( + claim.token_data_id_hash.clone(), + claim.property_version.clone(), + claim.from_address.clone(), + claim.to_address.clone(), + ), + claim, + ); + } + } + return ( + tokens.into_values().collect(), + token_ownerships, + token_datas, + collection_datas, + current_token_ownerships, + current_token_datas, + current_collection_datas, + current_token_claims, + ); + } + Default::default() + } + + /// Get token from write table item. Table items don't have address of the table so we need to look it up in the table_handle_to_owner mapping + /// We get the mapping from resource. + /// If the mapping is missing we'll just leave owner address as blank. This isn't great but at least helps us account for the token + pub fn from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result, Option)>> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::Token(inner)) => Some(inner), + _ => None, + }; + + if let Some(token) = maybe_token { + let token_id = token.id; + let token_data_id = token_id.token_data_id; + let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id.to_hash(); + let collection_name = token_data_id.get_collection_trunc(); + let name = token_data_id.get_name_trunc(); + + let token_pg = Self { + collection_data_id_hash, + token_data_id_hash, + creator_address: token_data_id.get_creator_address(), + collection_name, + name, + property_version: token_id.property_version, + transaction_version: txn_version, + token_properties: token.token_properties, + transaction_timestamp: txn_timestamp, + }; + + let (token_ownership, current_token_ownership) = TokenOwnership::from_token( + &token_pg, + table_item_data.key_type.as_str(), + &table_item_data.key, + ensure_not_negative(token.amount), + table_item.handle.to_string(), + table_handle_to_owner, + )? + .map(|(token_ownership, current_token_ownership)| { + (Some(token_ownership), current_token_ownership) + }) + .unwrap_or((None, None)); + + Ok(Some((token_pg, token_ownership, current_token_ownership))) + } else { + Ok(None) + } + } + + /// Get token from delete table item. The difference from write table item is that value isn't there so + /// we'll set amount to 0 and token property to blank. + pub fn from_delete_table_item( + table_item: &DeleteTableItem, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result, Option)>> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenId(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_id) = maybe_token_id { + let token_data_id = token_id.token_data_id; + let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id.to_hash(); + let collection_name = token_data_id.get_collection_trunc(); + let name = token_data_id.get_name_trunc(); + + let token = Self { + collection_data_id_hash, + token_data_id_hash, + creator_address: token_data_id.get_creator_address(), + collection_name, + name, + property_version: token_id.property_version, + transaction_version: txn_version, + token_properties: serde_json::Value::Null, + transaction_timestamp: txn_timestamp, + }; + let (token_ownership, current_token_ownership) = TokenOwnership::from_token( + &token, + table_item_data.key_type.as_str(), + &table_item_data.key, + BigDecimal::zero(), + table_item.handle.to_string(), + table_handle_to_owner, + )? + .map(|(token_ownership, current_token_ownership)| { + (Some(token_ownership), current_token_ownership) + }) + .unwrap_or((None, None)); + Ok(Some((token, token_ownership, current_token_ownership))) + } else { + Ok(None) + } + } +} + +impl TableMetadataForToken { + /// Mapping from table handle to owner type, including type of the table (AKA resource type) + /// from user transactions in a batch of transactions + pub fn get_table_handle_to_owner_from_transactions( + transactions: &[Transaction], + ) -> TableHandleToOwner { + let mut table_handle_to_owner: TableHandleToOwner = AHashMap::new(); + // Do a first pass to get all the table metadata in the batch. + for transaction in transactions { + if let Some(TxnData::User(_)) = transaction.txn_data.as_ref() { + let txn_version = transaction.version as i64; + + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + for wsc in &transaction_info.changes { + if let WriteSetChangeEnum::WriteResource(write_resource) = + wsc.change.as_ref().unwrap() + { + let maybe_map = TableMetadataForToken::get_table_handle_to_owner( + write_resource, + txn_version, + ) + .unwrap(); + if let Some(map) = maybe_map { + table_handle_to_owner.extend(map); + } + } + } + } + } + table_handle_to_owner + } + + /// Mapping from table handle to owner type, including type of the table (AKA resource type) + fn get_table_handle_to_owner( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + let value = TableMetadataForToken { + owner_address: resource.address.clone(), + table_type: write_resource.type_str.clone(), + }; + let table_handle: TableHandle = match TokenResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? { + TokenResource::CollectionResource(collection_resource) => { + collection_resource.collection_data.get_handle() + }, + TokenResource::TokenStoreResource(inner) => inner.tokens.get_handle(), + TokenResource::PendingClaimsResource(inner) => inner.pending_claims.get_handle(), + }; + Ok(Some(AHashMap::from([( + standardize_address(&table_handle), + value, + )]))) + } + + pub fn get_owner_address(&self) -> String { + standardize_address(&self.owner_address) + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/mod.rs b/rust/processor/src/db/common/models/token_v2_models/mod.rs new file mode 100644 index 000000000..49bd71da5 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/mod.rs @@ -0,0 +1,10 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod v1_token_royalty; +pub mod v2_collections; +pub mod v2_token_activities; +pub mod v2_token_datas; +pub mod v2_token_metadata; +pub mod v2_token_ownerships; +pub mod v2_token_utils; diff --git a/rust/processor/src/db/common/models/token_v2_models/v1_token_royalty.rs b/rust/processor/src/db/common/models/token_v2_models/v1_token_royalty.rs new file mode 100644 index 000000000..f7e1cb124 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v1_token_royalty.rs @@ -0,0 +1,100 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use crate::{ + db::common::models::token_models::token_utils::TokenWriteSet, schema::current_token_royalty_v1, +}; +use aptos_protos::transaction::v1::WriteTableItem; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive( + Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize, PartialEq, Eq, +)] +#[diesel(primary_key(token_data_id))] +#[diesel(table_name = current_token_royalty_v1)] +pub struct CurrentTokenRoyaltyV1 { + pub token_data_id: String, + pub payee_address: String, + pub royalty_points_numerator: BigDecimal, + pub royalty_points_denominator: BigDecimal, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl Ord for CurrentTokenRoyaltyV1 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.token_data_id.cmp(&other.token_data_id) + } +} +impl PartialOrd for CurrentTokenRoyaltyV1 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl CurrentTokenRoyaltyV1 { + pub fn pk(&self) -> String { + self.token_data_id.clone() + } + + // Royalty for v2 token is more complicated and not supported yet. For token v2, royalty can be on the collection (default) or on + // the token (override). + pub fn get_v1_from_write_table_item( + write_table_item: &WriteTableItem, + transaction_version: i64, + transaction_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + let table_item_data = write_table_item.data.as_ref().unwrap(); + + let maybe_token_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + transaction_version, + )? { + Some(TokenWriteSet::TokenData(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_data) = maybe_token_data { + let maybe_token_data_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + transaction_version, + )? { + Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), + _ => None, + }; + if let Some(token_data_id_struct) = maybe_token_data_id { + // token data id is the 0x{hash} version of the creator, collection name, and token name + let token_data_id = token_data_id_struct.to_id(); + let payee_address = token_data.royalty.get_payee_address(); + let royalty_points_numerator = token_data.royalty.royalty_points_numerator.clone(); + let royalty_points_denominator = + token_data.royalty.royalty_points_denominator.clone(); + + return Ok(Some(Self { + token_data_id, + payee_address, + royalty_points_numerator, + royalty_points_denominator, + last_transaction_version: transaction_version, + last_transaction_timestamp: transaction_timestamp, + })); + } else { + tracing::warn!( + transaction_version, + key_type = table_item_data.key_type, + key = table_item_data.key, + "Expecting token_data_id as key for value = token_data" + ); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/v2_collections.rs b/rust/processor/src/db/common/models/token_v2_models/v2_collections.rs new file mode 100644 index 000000000..5d3a394a9 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v2_collections.rs @@ -0,0 +1,350 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_token_utils::{TokenStandard, V2TokenResource}; +use crate::{ + db::common::models::{ + default_models::move_resources::MoveResource, + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_models::{ + collection_datas::CollectionData, + token_utils::{CollectionDataIdType, TokenWriteSet}, + tokens::TableHandleToOwner, + }, + }, + schema::{collections_v2, current_collections_v2}, + utils::{database::DbPoolConnection, util::standardize_address}, +}; +use anyhow::Context; +use aptos_protos::transaction::v1::{WriteResource, WriteTableItem}; +use bigdecimal::{BigDecimal, Zero}; +use diesel::{prelude::*, sql_query, sql_types::Text}; +use diesel_async::RunQueryDsl; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// PK of current_collections_v2, i.e. collection_id +pub type CurrentCollectionV2PK = String; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = collections_v2)] +pub struct CollectionV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub collection_id: String, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub uri: String, + pub current_supply: BigDecimal, + pub max_supply: Option, + pub total_minted_v2: Option, + pub mutable_description: Option, + pub mutable_uri: Option, + pub table_handle_v1: Option, + pub token_standard: String, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(collection_id))] +#[diesel(table_name = current_collections_v2)] +pub struct CurrentCollectionV2 { + pub collection_id: String, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub uri: String, + pub current_supply: BigDecimal, + pub max_supply: Option, + pub total_minted_v2: Option, + pub mutable_description: Option, + pub mutable_uri: Option, + pub table_handle_v1: Option, + pub token_standard: String, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Debug, QueryableByName)] +pub struct CreatorFromCollectionTableV1 { + #[diesel(sql_type = Text)] + pub creator_address: String, +} + +impl CollectionV2 { + pub fn get_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + object_metadatas: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::Collection(inner) = &V2TokenResource::from_resource( + &type_str, + resource.data.as_ref().unwrap(), + txn_version, + )? { + let (mut current_supply, mut max_supply, mut total_minted_v2) = + (BigDecimal::zero(), None, None); + let (mut mutable_description, mut mutable_uri) = (None, None); + if let Some(object_data) = object_metadatas.get(&resource.address) { + // Getting supply data (prefer fixed supply over unlimited supply although they should never appear at the same time anyway) + let fixed_supply = object_data.fixed_supply.as_ref(); + let unlimited_supply = object_data.unlimited_supply.as_ref(); + if let Some(supply) = unlimited_supply { + (current_supply, max_supply, total_minted_v2) = ( + supply.current_supply.clone(), + None, + Some(supply.total_minted.clone()), + ); + } + if let Some(supply) = fixed_supply { + (current_supply, max_supply, total_minted_v2) = ( + supply.current_supply.clone(), + Some(supply.max_supply.clone()), + Some(supply.total_minted.clone()), + ); + } + + // Aggregator V2 enables a separate struct for supply + let concurrent_supply = object_data.concurrent_supply.as_ref(); + if let Some(supply) = concurrent_supply { + (current_supply, max_supply, total_minted_v2) = ( + supply.current_supply.value.clone(), + if supply.current_supply.max_value == u64::MAX.into() { + None + } else { + Some(supply.current_supply.max_value.clone()) + }, + Some(supply.total_minted.value.clone()), + ); + } + + // Getting collection mutability config from AptosCollection + let collection = object_data.aptos_collection.as_ref(); + if let Some(collection) = collection { + mutable_description = Some(collection.mutable_description); + mutable_uri = Some(collection.mutable_uri); + } + } else { + // ObjectCore should not be missing, returning from entire function early + return Ok(None); + } + + let collection_id = resource.address.clone(); + let creator_address = inner.get_creator_address(); + let collection_name = inner.get_name_trunc(); + let description = inner.description.clone(); + let uri = inner.get_uri_trunc(); + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + collection_id: collection_id.clone(), + creator_address: creator_address.clone(), + collection_name: collection_name.clone(), + description: description.clone(), + uri: uri.clone(), + current_supply: current_supply.clone(), + max_supply: max_supply.clone(), + total_minted_v2: total_minted_v2.clone(), + mutable_description, + mutable_uri, + table_handle_v1: None, + token_standard: TokenStandard::V2.to_string(), + transaction_timestamp: txn_timestamp, + }, + CurrentCollectionV2 { + collection_id, + creator_address, + collection_name, + description, + uri, + current_supply, + max_supply, + total_minted_v2, + mutable_description, + mutable_uri, + table_handle_v1: None, + token_standard: TokenStandard::V2.to_string(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))) + } else { + Ok(None) + } + } + + pub async fn get_v1_from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_collection_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::CollectionData(inner)) => Some(inner), + _ => None, + }; + if let Some(collection_data) = maybe_collection_data { + let table_handle = table_item.handle.to_string(); + let maybe_creator_address = table_handle_to_owner + .get(&standardize_address(&table_handle)) + .map(|table_metadata| table_metadata.get_owner_address()); + let mut creator_address = match maybe_creator_address { + Some(ca) => ca, + None => { + match Self::get_collection_creator_for_v1( + conn, + &table_handle, + query_retries, + query_retry_delay_ms, + ) + .await + .context(format!( + "Failed to get collection creator for table handle {}, txn version {}", + table_handle, txn_version + )) { + Ok(ca) => ca, + Err(_) => { + // Try our best by getting from the older collection data + match CollectionData::get_collection_creator( + conn, + &table_handle, + query_retries, + query_retry_delay_ms, + ) + .await + { + Ok(creator) => creator, + Err(_) => { + tracing::error!( + transaction_version = txn_version, + lookup_key = &table_handle, + "Failed to get collection v2 creator for table handle. You probably should backfill db." + ); + return Ok(None); + }, + } + }, + } + }, + }; + creator_address = standardize_address(&creator_address); + let collection_id_struct = + CollectionDataIdType::new(creator_address, collection_data.get_name().to_string()); + let collection_id = collection_id_struct.to_id(); + let collection_name = collection_data.get_name_trunc(); + let uri = collection_data.get_uri_trunc(); + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + collection_id: collection_id.clone(), + creator_address: collection_id_struct.creator.clone(), + collection_name: collection_name.clone(), + description: collection_data.description.clone(), + uri: uri.clone(), + current_supply: collection_data.supply.clone(), + max_supply: Some(collection_data.maximum.clone()), + total_minted_v2: None, + mutable_uri: Some(collection_data.mutability_config.uri), + mutable_description: Some(collection_data.mutability_config.description), + table_handle_v1: Some(table_handle.clone()), + token_standard: TokenStandard::V1.to_string(), + transaction_timestamp: txn_timestamp, + }, + CurrentCollectionV2 { + collection_id, + creator_address: collection_id_struct.creator, + collection_name, + description: collection_data.description, + uri, + current_supply: collection_data.supply, + max_supply: Some(collection_data.maximum.clone()), + total_minted_v2: None, + mutable_uri: Some(collection_data.mutability_config.uri), + mutable_description: Some(collection_data.mutability_config.description), + table_handle_v1: Some(table_handle), + token_standard: TokenStandard::V1.to_string(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))) + } else { + Ok(None) + } + } + + /// If collection data is not in resources of the same transaction, then try looking for it in the database. Since collection owner + /// cannot change, we can just look in the current_collection_datas table. + /// Retrying a few times since this collection could've been written in a separate thread. + async fn get_collection_creator_for_v1( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match Self::get_by_table_handle(conn, table_handle).await { + Ok(creator) => return Ok(creator), + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + Err(anyhow::anyhow!("Failed to get collection creator")) + } + + /// TODO: Change this to a KV store + async fn get_by_table_handle( + conn: &mut DbPoolConnection<'_>, + table_handle: &str, + ) -> anyhow::Result { + let mut res: Vec> = sql_query( + "SELECT creator_address FROM current_collections_v2 WHERE table_handle_v1 = $1", + ) + .bind::(table_handle) + .get_results(conn) + .await?; + Ok(res + .pop() + .context("collection result empty")? + .context("collection result null")? + .creator_address) + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/v2_token_activities.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_activities.rs new file mode 100644 index 000000000..83ee38388 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_activities.rs @@ -0,0 +1,279 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_token_utils::{TokenStandard, V2TokenEvent}; +use crate::{ + db::common::models::{ + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_models::token_utils::{TokenDataIdType, TokenEvent}, + }, + schema::token_activities_v2, + utils::util::standardize_address, +}; +use aptos_protos::transaction::v1::Event; +use bigdecimal::{BigDecimal, One, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, event_index))] +#[diesel(table_name = token_activities_v2)] +pub struct TokenActivityV2 { + pub transaction_version: i64, + pub event_index: i64, + pub event_account_address: String, + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub type_: String, + pub from_address: Option, + pub to_address: Option, + pub token_amount: BigDecimal, + pub before_value: Option, + pub after_value: Option, + pub entry_function_id_str: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +/// A simplified TokenActivity (excluded common fields) to reduce code duplication +struct TokenActivityHelperV1 { + pub token_data_id_struct: TokenDataIdType, + pub property_version: BigDecimal, + pub from_address: Option, + pub to_address: Option, + pub token_amount: BigDecimal, +} + +/// A simplified TokenActivity (excluded common fields) to reduce code duplication +struct TokenActivityHelperV2 { + pub from_address: Option, + pub to_address: Option, + pub token_amount: BigDecimal, + pub before_value: Option, + pub after_value: Option, + pub event_type: String, +} + +impl TokenActivityV2 { + pub async fn get_nft_v2_from_parsed_event( + event: &Event, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + event_index: i64, + entry_function_id_str: &Option, + token_v2_metadata: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + let event_type = event.type_str.clone(); + if let Some(token_event) = + &V2TokenEvent::from_event(&event_type, event.data.as_str(), txn_version)? + { + let event_account_address = + standardize_address(&event.key.as_ref().unwrap().account_address); + // burn and mint events are attached to the collection. The rest should be attached to the token + let token_data_id = match token_event { + V2TokenEvent::MintEvent(inner) => inner.get_token_address(), + V2TokenEvent::Mint(inner) => inner.get_token_address(), + V2TokenEvent::BurnEvent(inner) => inner.get_token_address(), + V2TokenEvent::Burn(inner) => inner.get_token_address(), + V2TokenEvent::TransferEvent(inner) => inner.get_object_address(), + _ => event_account_address.clone(), + }; + + if let Some(metadata) = token_v2_metadata.get(&token_data_id) { + let object_core = &metadata.object.object_core; + let token_activity_helper = match token_event { + V2TokenEvent::MintEvent(_) => TokenActivityHelperV2 { + from_address: Some(object_core.get_owner_address()), + to_address: None, + token_amount: BigDecimal::one(), + before_value: None, + after_value: None, + event_type: event_type.clone(), + }, + V2TokenEvent::Mint(_) => TokenActivityHelperV2 { + from_address: Some(object_core.get_owner_address()), + to_address: None, + token_amount: BigDecimal::one(), + before_value: None, + after_value: None, + event_type: "0x4::collection::MintEvent".to_string(), + }, + V2TokenEvent::TokenMutationEvent(inner) => TokenActivityHelperV2 { + from_address: Some(object_core.get_owner_address()), + to_address: None, + token_amount: BigDecimal::zero(), + before_value: Some(inner.old_value.clone()), + after_value: Some(inner.new_value.clone()), + event_type: event_type.clone(), + }, + V2TokenEvent::BurnEvent(_) => TokenActivityHelperV2 { + from_address: Some(object_core.get_owner_address()), + to_address: None, + token_amount: BigDecimal::one(), + before_value: None, + after_value: None, + event_type: event_type.clone(), + }, + V2TokenEvent::Burn(_) => TokenActivityHelperV2 { + from_address: Some(object_core.get_owner_address()), + to_address: None, + token_amount: BigDecimal::one(), + before_value: None, + after_value: None, + event_type: "0x4::collection::BurnEvent".to_string(), + }, + V2TokenEvent::TransferEvent(inner) => TokenActivityHelperV2 { + from_address: Some(inner.get_from_address()), + to_address: Some(inner.get_to_address()), + token_amount: BigDecimal::one(), + before_value: None, + after_value: None, + event_type: event_type.clone(), + }, + }; + return Ok(Some(Self { + transaction_version: txn_version, + event_index, + event_account_address, + token_data_id, + property_version_v1: BigDecimal::zero(), + type_: token_activity_helper.event_type, + from_address: token_activity_helper.from_address, + to_address: token_activity_helper.to_address, + token_amount: token_activity_helper.token_amount, + before_value: token_activity_helper.before_value, + after_value: token_activity_helper.after_value, + entry_function_id_str: entry_function_id_str.clone(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + })); + } else { + // If the object metadata isn't found in the transaction, then the token was burnt. + + // the new burn event has owner address now! + let owner_address = if let V2TokenEvent::Burn(inner) = token_event { + inner.get_previous_owner_address() + } else { + // To handle a case with the old burn events, when a token is minted and burnt in the same transaction + None + }; + + return Ok(Some(Self { + transaction_version: txn_version, + event_index, + event_account_address, + token_data_id, + property_version_v1: BigDecimal::zero(), + type_: event_type, + from_address: owner_address.clone(), + to_address: None, + token_amount: BigDecimal::one(), + before_value: None, + after_value: None, + entry_function_id_str: entry_function_id_str.clone(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + })); + } + } + Ok(None) + } + + pub fn get_v1_from_parsed_event( + event: &Event, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + event_index: i64, + entry_function_id_str: &Option, + ) -> anyhow::Result> { + let event_type = event.type_str.clone(); + if let Some(token_event) = &TokenEvent::from_event(&event_type, &event.data, txn_version)? { + let event_account_address = + standardize_address(&event.key.as_ref().unwrap().account_address); + let token_activity_helper = match token_event { + TokenEvent::MintTokenEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.id.clone(), + property_version: BigDecimal::zero(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: inner.amount.clone(), + }, + TokenEvent::BurnTokenEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.id.token_data_id.clone(), + property_version: inner.id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: inner.amount.clone(), + }, + TokenEvent::MutateTokenPropertyMapEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.new_id.token_data_id.clone(), + property_version: inner.new_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: BigDecimal::zero(), + }, + TokenEvent::WithdrawTokenEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.id.token_data_id.clone(), + property_version: inner.id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: None, + token_amount: inner.amount.clone(), + }, + TokenEvent::DepositTokenEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.id.token_data_id.clone(), + property_version: inner.id.property_version.clone(), + from_address: None, + to_address: Some(standardize_address(&event_account_address)), + token_amount: inner.amount.clone(), + }, + TokenEvent::OfferTokenEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.token_id.token_data_id.clone(), + property_version: inner.token_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: Some(inner.get_to_address()), + token_amount: inner.amount.clone(), + }, + TokenEvent::CancelTokenOfferEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.token_id.token_data_id.clone(), + property_version: inner.token_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: Some(inner.get_to_address()), + token_amount: inner.amount.clone(), + }, + TokenEvent::ClaimTokenEvent(inner) => TokenActivityHelperV1 { + token_data_id_struct: inner.token_id.token_data_id.clone(), + property_version: inner.token_id.property_version.clone(), + from_address: Some(event_account_address.clone()), + to_address: Some(inner.get_to_address()), + token_amount: inner.amount.clone(), + }, + }; + let token_data_id_struct = token_activity_helper.token_data_id_struct; + return Ok(Some(Self { + transaction_version: txn_version, + event_index, + event_account_address, + token_data_id: token_data_id_struct.to_id(), + property_version_v1: token_activity_helper.property_version, + type_: event_type, + from_address: token_activity_helper.from_address, + to_address: token_activity_helper.to_address, + token_amount: token_activity_helper.token_amount, + before_value: None, + after_value: None, + entry_function_id_str: entry_function_id_str.clone(), + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + })); + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/v2_token_datas.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_datas.rs new file mode 100644 index 000000000..3be211eb0 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_datas.rs @@ -0,0 +1,298 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_token_utils::{TokenStandard, TokenV2, TokenV2Burned}; +use crate::{ + db::common::models::{ + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_models::token_utils::TokenWriteSet, + }, + schema::{current_token_datas_v2, token_datas_v2}, + utils::util::standardize_address, +}; +use aptos_protos::transaction::v1::{DeleteResource, WriteResource, WriteTableItem}; +use bigdecimal::BigDecimal; +use diesel::prelude::*; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// PK of current_token_datas_v2, i.e. token_data_id +pub type CurrentTokenDataV2PK = String; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = token_datas_v2)] +pub struct TokenDataV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub token_data_id: String, + pub collection_id: String, + pub token_name: String, + pub maximum: Option, + pub supply: Option, + pub largest_property_version_v1: Option, + pub token_uri: String, + pub token_properties: serde_json::Value, + pub description: String, + pub token_standard: String, + pub is_fungible_v2: Option, + pub transaction_timestamp: chrono::NaiveDateTime, + // Deprecated, but still here for backwards compatibility + pub decimals: Option, + // Here for consistency but we don't need to actually fill it + // pub is_deleted_v2: Option, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id))] +#[diesel(table_name = current_token_datas_v2)] +pub struct CurrentTokenDataV2 { + pub token_data_id: String, + pub collection_id: String, + pub token_name: String, + pub maximum: Option, + pub supply: Option, + pub largest_property_version_v1: Option, + pub token_uri: String, + pub token_properties: serde_json::Value, + pub description: String, + pub token_standard: String, + pub is_fungible_v2: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + // Deprecated, but still here for backwards compatibility + pub decimals: Option, + pub is_deleted_v2: Option, +} + +impl TokenDataV2 { + // TODO: remove the useless_asref lint when new clippy nighly is released. + #[allow(clippy::useless_asref)] + pub fn get_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + object_metadatas: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + if let Some(inner) = &TokenV2::from_write_resource(write_resource, txn_version)? { + let token_data_id = standardize_address(&write_resource.address.to_string()); + let mut token_name = inner.get_name_trunc(); + let is_fungible_v2; + // Get token properties from 0x4::property_map::PropertyMap + let mut token_properties = serde_json::Value::Null; + if let Some(object_metadata) = object_metadatas.get(&token_data_id) { + let fungible_asset_metadata = object_metadata.fungible_asset_metadata.as_ref(); + if fungible_asset_metadata.is_some() { + is_fungible_v2 = Some(true); + } else { + is_fungible_v2 = Some(false); + } + token_properties = object_metadata + .property_map + .as_ref() + .map(|m| m.inner.clone()) + .unwrap_or(token_properties); + // In aggregator V2 name is now derived from a separate struct + if let Some(token_identifier) = object_metadata.token_identifier.as_ref() { + token_name = token_identifier.get_name_trunc(); + } + } else { + // ObjectCore should not be missing, returning from entire function early + return Ok(None); + } + + let collection_id = inner.get_collection_address(); + let token_uri = inner.get_uri_trunc(); + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + collection_id: collection_id.clone(), + token_name: token_name.clone(), + maximum: None, + supply: None, + largest_property_version_v1: None, + token_uri: token_uri.clone(), + token_properties: token_properties.clone(), + description: inner.description.clone(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2, + transaction_timestamp: txn_timestamp, + decimals: None, + }, + CurrentTokenDataV2 { + token_data_id, + collection_id, + token_name, + maximum: None, + supply: None, + largest_property_version_v1: None, + token_uri, + token_properties, + description: inner.description.clone(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + decimals: None, + is_deleted_v2: Some(false), + }, + ))) + } else { + Ok(None) + } + } + + /// This handles the case where token is burned but objectCore is still there + pub async fn get_burned_nft_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + tokens_burned: &TokenV2Burned, + ) -> anyhow::Result> { + let token_data_id = standardize_address(&write_resource.address.to_string()); + // reminder that v1 events won't get to this codepath + if let Some(burn_event_v2) = tokens_burned.get(&standardize_address(&token_data_id)) { + Ok(Some(CurrentTokenDataV2 { + token_data_id, + collection_id: burn_event_v2.get_collection_address(), + token_name: "".to_string(), + maximum: None, + supply: None, + largest_property_version_v1: None, + token_uri: "".to_string(), + token_properties: serde_json::Value::Null, + description: "".to_string(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + decimals: None, + is_deleted_v2: Some(true), + })) + } else { + Ok(None) + } + } + + /// This handles the case where token is burned and objectCore is deleted + pub async fn get_burned_nft_v2_from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + txn_timestamp: chrono::NaiveDateTime, + tokens_burned: &TokenV2Burned, + ) -> anyhow::Result> { + let token_data_id = standardize_address(&delete_resource.address.to_string()); + // reminder that v1 events won't get to this codepath + if let Some(burn_event_v2) = tokens_burned.get(&standardize_address(&token_data_id)) { + Ok(Some(CurrentTokenDataV2 { + token_data_id, + collection_id: burn_event_v2.get_collection_address(), + token_name: "".to_string(), + maximum: None, + supply: None, + largest_property_version_v1: None, + token_uri: "".to_string(), + token_properties: serde_json::Value::Null, + description: "".to_string(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + decimals: None, + is_deleted_v2: Some(true), + })) + } else { + Ok(None) + } + } + + pub fn get_v1_from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::TokenData(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_data) = maybe_token_data { + let maybe_token_data_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), + _ => None, + }; + if let Some(token_data_id_struct) = maybe_token_data_id { + let collection_id = token_data_id_struct.get_collection_id(); + let token_data_id = token_data_id_struct.to_id(); + let token_name = token_data_id_struct.get_name_trunc(); + let token_uri = token_data.get_uri_trunc(); + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + collection_id: collection_id.clone(), + token_name: token_name.clone(), + maximum: Some(token_data.maximum.clone()), + supply: Some(token_data.supply.clone()), + largest_property_version_v1: Some( + token_data.largest_property_version.clone(), + ), + token_uri: token_uri.clone(), + token_properties: token_data.default_properties.clone(), + description: token_data.description.clone(), + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + decimals: None, + }, + CurrentTokenDataV2 { + token_data_id, + collection_id, + token_name, + maximum: Some(token_data.maximum), + supply: Some(token_data.supply), + largest_property_version_v1: Some(token_data.largest_property_version), + token_uri, + token_properties: token_data.default_properties, + description: token_data.description, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + decimals: None, + is_deleted_v2: None, + }, + ))); + } else { + tracing::warn!( + transaction_version = txn_version, + key_type = table_item_data.key_type, + key = table_item_data.key, + "Expecting token_data_id as key for value = token_data" + ); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/v2_token_metadata.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_metadata.rs new file mode 100644 index 000000000..d059a3da9 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_metadata.rs @@ -0,0 +1,81 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::v2_token_utils::TOKEN_V2_ADDR; +use crate::{ + db::common::models::{ + coin_models::coin_utils::COIN_ADDR, + default_models::move_resources::MoveResource, + object_models::v2_object_utils::ObjectAggregatedDataMapping, + token_models::token_utils::{NAME_LENGTH, TOKEN_ADDR}, + }, + schema::current_token_v2_metadata, + utils::util::{standardize_address, truncate_str}, +}; +use anyhow::Context; +use aptos_protos::transaction::v1::WriteResource; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +// PK of current_objects, i.e. object_address, resource_type +pub type CurrentTokenV2MetadataPK = (String, String); + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(object_address, resource_type))] +#[diesel(table_name = current_token_v2_metadata)] +pub struct CurrentTokenV2Metadata { + pub object_address: String, + pub resource_type: String, + pub data: Value, + pub state_key_hash: String, + pub last_transaction_version: i64, +} + +impl CurrentTokenV2Metadata { + /// Parsing unknown resources with 0x4::token::Token + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + object_metadatas: &ObjectAggregatedDataMapping, + ) -> anyhow::Result> { + let object_address = standardize_address(&write_resource.address.to_string()); + if let Some(object_data) = object_metadatas.get(&object_address) { + // checking if token_v2 + if object_data.token.is_some() { + let move_tag = + MoveResource::convert_move_struct_tag(write_resource.r#type.as_ref().unwrap()); + let resource_type_addr = move_tag.get_address(); + if matches!( + resource_type_addr.as_str(), + COIN_ADDR | TOKEN_ADDR | TOKEN_V2_ADDR + ) { + return Ok(None); + } + + let resource = MoveResource::from_write_resource(write_resource, 0, txn_version, 0); + + let state_key_hash = object_data.object.get_state_key_hash(); + if state_key_hash != resource.state_key_hash { + return Ok(None); + } + + let resource_type = truncate_str(&resource.type_, NAME_LENGTH); + return Ok(Some(CurrentTokenV2Metadata { + object_address, + resource_type, + data: resource + .data + .context("data must be present in write resource")?, + state_key_hash: resource.state_key_hash, + last_transaction_version: txn_version, + })); + } + } + Ok(None) + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/v2_token_ownerships.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_ownerships.rs new file mode 100644 index 000000000..9dbc94533 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_ownerships.rs @@ -0,0 +1,635 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + v2_token_datas::TokenDataV2, + v2_token_utils::{TokenStandard, TokenV2Burned}, +}; +use crate::{ + db::common::models::{ + object_models::v2_object_utils::{ObjectAggregatedDataMapping, ObjectWithMetadata}, + token_models::{token_utils::TokenWriteSet, tokens::TableHandleToOwner}, + token_v2_models::v2_token_utils::DEFAULT_OWNER_ADDRESS, + }, + schema::{current_token_ownerships_v2, token_ownerships_v2}, + utils::{ + database::DbPoolConnection, + util::{ensure_not_negative, standardize_address}, + }, +}; +use ahash::AHashMap; +use anyhow::Context; +use aptos_protos::transaction::v1::{ + DeleteResource, DeleteTableItem, WriteResource, WriteTableItem, +}; +use bigdecimal::{BigDecimal, One, Zero}; +use diesel::prelude::*; +use diesel_async::RunQueryDsl; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// PK of current_token_ownerships_v2, i.e. token_data_id, property_version_v1, owner_address, storage_id +pub type CurrentTokenOwnershipV2PK = (String, BigDecimal, String, String); + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = token_ownerships_v2)] +pub struct TokenOwnershipV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub owner_address: Option, + pub storage_id: String, + pub amount: BigDecimal, + pub table_type_v1: Option, + pub token_properties_mutated_v1: Option, + pub is_soulbound_v2: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub transaction_timestamp: chrono::NaiveDateTime, + pub non_transferrable_by_owner: Option, +} + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id, property_version_v1, owner_address, storage_id))] +#[diesel(table_name = current_token_ownerships_v2)] +pub struct CurrentTokenOwnershipV2 { + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub owner_address: String, + pub storage_id: String, + pub amount: BigDecimal, + pub table_type_v1: Option, + pub token_properties_mutated_v1: Option, + pub is_soulbound_v2: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub non_transferrable_by_owner: Option, +} + +// Facilitate tracking when a token is burned +#[derive(Clone, Debug)] +pub struct NFTOwnershipV2 { + pub token_data_id: String, + pub owner_address: String, + pub is_soulbound: Option, +} + +/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) +#[derive(Clone, Debug, Identifiable, Queryable)] +#[diesel(primary_key(token_data_id, property_version_v1, owner_address, storage_id))] +#[diesel(table_name = current_token_ownerships_v2)] +pub struct CurrentTokenOwnershipV2Query { + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub owner_address: String, + pub storage_id: String, + pub amount: BigDecimal, + pub table_type_v1: Option, + pub token_properties_mutated_v1: Option, + pub is_soulbound_v2: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub inserted_at: chrono::NaiveDateTime, + pub non_transferrable_by_owner: Option, +} + +impl TokenOwnershipV2 { + /// For nfts it's the same resources that we parse tokendatas from so we leverage the work done in there to get ownership data + /// Vecs are returned because there could be multiple transfers and we need to document each one here. + pub fn get_nft_v2_from_token_data( + token_data: &TokenDataV2, + object_metadatas: &ObjectAggregatedDataMapping, + ) -> anyhow::Result<( + Vec, + AHashMap, + )> { + let mut ownerships = vec![]; + let mut current_ownerships = AHashMap::new(); + + let object_data = object_metadatas + .get(&token_data.token_data_id) + .context("If token data exists objectcore must exist")?; + let object_core = object_data.object.object_core.clone(); + let token_data_id = token_data.token_data_id.clone(); + let owner_address = object_core.get_owner_address(); + let storage_id = token_data_id.clone(); + + // is_soulbound currently means if an object is completely untransferrable + // OR if only admin can transfer. Only the former is true soulbound but + // people might already be using it with the latter meaning so let's include both. + let is_soulbound = if object_data.untransferable.as_ref().is_some() { + true + } else { + !object_core.allow_ungated_transfer + }; + let non_transferrable_by_owner = !object_core.allow_ungated_transfer; + + ownerships.push(Self { + transaction_version: token_data.transaction_version, + write_set_change_index: token_data.write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(owner_address.clone()), + storage_id: storage_id.clone(), + amount: BigDecimal::one(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, + transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(non_transferrable_by_owner), + }); + current_ownerships.insert( + ( + token_data_id.clone(), + BigDecimal::zero(), + owner_address.clone(), + storage_id.clone(), + ), + CurrentTokenOwnershipV2 { + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address, + storage_id: storage_id.clone(), + amount: BigDecimal::one(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, + last_transaction_version: token_data.transaction_version, + last_transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(non_transferrable_by_owner), + }, + ); + + // check if token was transferred + for (event_index, transfer_event) in &object_data.transfer_events { + // If it's a self transfer then skip + if transfer_event.get_to_address() == transfer_event.get_from_address() { + continue; + } + ownerships.push(Self { + transaction_version: token_data.transaction_version, + // set to negative of event index to avoid collison with write set index + write_set_change_index: -1 * event_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + // previous owner + owner_address: Some(transfer_event.get_from_address()), + storage_id: storage_id.clone(), + // soft delete + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, + transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(is_soulbound), + }); + current_ownerships.insert( + ( + token_data_id.clone(), + BigDecimal::zero(), + transfer_event.get_from_address(), + storage_id.clone(), + ), + CurrentTokenOwnershipV2 { + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + // previous owner + owner_address: transfer_event.get_from_address(), + storage_id: storage_id.clone(), + // soft delete + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, + last_transaction_version: token_data.transaction_version, + last_transaction_timestamp: token_data.transaction_timestamp, + non_transferrable_by_owner: Some(is_soulbound), + }, + ); + } + Ok((ownerships, current_ownerships)) + } + + /// This handles the case where token is burned but objectCore is still there + pub async fn get_burned_nft_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + prior_nft_ownership: &AHashMap, + tokens_burned: &TokenV2Burned, + object_metadatas: &ObjectAggregatedDataMapping, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let token_data_id = standardize_address(&write_resource.address.to_string()); + if tokens_burned + .get(&standardize_address(&token_data_id)) + .is_some() + { + if let Some(object) = + &ObjectWithMetadata::from_write_resource(write_resource, txn_version)? + { + let object_core = &object.object_core; + let owner_address = object_core.get_owner_address(); + let storage_id = token_data_id.clone(); + + // is_soulbound currently means if an object is completely untransferrable + // OR if only admin can transfer. Only the former is true soulbound but + // people might already be using it with the latter meaning so let's include both. + let is_soulbound = if object_metadatas + .get(&token_data_id) + .map(|obj| obj.untransferable.as_ref()) + .is_some() + { + true + } else { + !object_core.allow_ungated_transfer + }; + let non_transferrable_by_owner = !object_core.allow_ungated_transfer; + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(owner_address.clone()), + storage_id: storage_id.clone(), + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: Some(non_transferrable_by_owner), + }, + CurrentTokenOwnershipV2 { + token_data_id, + property_version_v1: BigDecimal::zero(), + owner_address, + storage_id, + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: Some(non_transferrable_by_owner), + }, + ))); + } else { + return Self::get_burned_nft_v2_helper( + &token_data_id, + txn_version, + write_set_change_index, + txn_timestamp, + prior_nft_ownership, + tokens_burned, + conn, + query_retries, + query_retry_delay_ms, + ) + .await; + } + } + Ok(None) + } + + /// This handles the case where token is burned and objectCore is deleted + pub async fn get_burned_nft_v2_from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + prior_nft_ownership: &AHashMap, + tokens_burned: &TokenV2Burned, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let token_address = standardize_address(&delete_resource.address.to_string()); + Self::get_burned_nft_v2_helper( + &token_address, + txn_version, + write_set_change_index, + txn_timestamp, + prior_nft_ownership, + tokens_burned, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + } + + async fn get_burned_nft_v2_helper( + token_address: &str, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + prior_nft_ownership: &AHashMap, + tokens_burned: &TokenV2Burned, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result> { + let token_address = standardize_address(token_address); + if let Some(burn_event) = tokens_burned.get(&token_address) { + // 1. Try to lookup token address in burn event mapping + let previous_owner = if let Some(previous_owner) = + burn_event.get_previous_owner_address() + { + previous_owner + } else { + // 2. If it doesn't exist in burn event mapping, then it must be an old burn event that doesn't contain previous_owner. + // Do a lookup to get previous owner. This is necessary because previous owner is part of current token ownerships primary key. + match prior_nft_ownership.get(&token_address) { + Some(inner) => inner.owner_address.clone(), + None => { + match CurrentTokenOwnershipV2Query::get_latest_owned_nft_by_token_data_id( + conn, + &token_address, + query_retries, + query_retry_delay_ms, + ) + .await + { + Ok(nft) => nft.owner_address.clone(), + Err(_) => { + tracing::error!( + transaction_version = txn_version, + lookup_key = &token_address, + "Failed to find current_token_ownership_v2 for burned token. You probably should backfill db." + ); + DEFAULT_OWNER_ADDRESS.to_string() + }, + } + }, + } + }; + + let token_data_id = token_address.clone(); + let storage_id = token_data_id.clone(); + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(previous_owner.clone()), + storage_id: storage_id.clone(), + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: None, // default + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, // default + transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, // default + }, + CurrentTokenOwnershipV2 { + token_data_id, + property_version_v1: BigDecimal::zero(), + owner_address: previous_owner, + storage_id, + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: None, // default + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: None, // default + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, // default + }, + ))); + } + Ok(None) + } + + /// We want to track tokens in any offer/claims and tokenstore + pub fn get_v1_from_write_table_item( + table_item: &WriteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result)>> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::Token(inner)) => Some(inner), + _ => None, + }; + + if let Some(token) = maybe_token { + let table_handle = standardize_address(&table_item.handle.to_string()); + let amount = ensure_not_negative(token.amount); + let token_id_struct = token.id; + let token_data_id_struct = token_id_struct.token_data_id; + let token_data_id = token_data_id_struct.to_id(); + + let maybe_table_metadata = table_handle_to_owner.get(&table_handle); + let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { + Some(tm) => { + if tm.table_type != "0x3::token::TokenStore" { + return Ok(None); + } + let owner_address = tm.get_owner_address(); + ( + Some(CurrentTokenOwnershipV2 { + token_data_id: token_data_id.clone(), + property_version_v1: token_id_struct.property_version.clone(), + owner_address: owner_address.clone(), + storage_id: table_handle.clone(), + amount: amount.clone(), + table_type_v1: Some(tm.table_type.clone()), + token_properties_mutated_v1: Some(token.token_properties.clone()), + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, + }), + Some(owner_address), + Some(tm.table_type.clone()), + ) + }, + None => (None, None, None), + }; + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id, + property_version_v1: token_id_struct.property_version, + owner_address, + storage_id: table_handle, + amount, + table_type_v1: table_type, + token_properties_mutated_v1: Some(token.token_properties), + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, + }, + curr_token_ownership, + ))) + } else { + Ok(None) + } + } + + /// We want to track tokens in any offer/claims and tokenstore + pub fn get_v1_from_delete_table_item( + table_item: &DeleteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result)>> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenId(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_id_struct) = maybe_token_id { + let table_handle = standardize_address(&table_item.handle.to_string()); + let token_data_id_struct = token_id_struct.token_data_id; + let token_data_id = token_data_id_struct.to_id(); + + let maybe_table_metadata = table_handle_to_owner.get(&table_handle); + let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { + Some(tm) => { + if tm.table_type != "0x3::token::TokenStore" { + return Ok(None); + } + let owner_address = tm.get_owner_address(); + ( + Some(CurrentTokenOwnershipV2 { + token_data_id: token_data_id.clone(), + property_version_v1: token_id_struct.property_version.clone(), + owner_address: owner_address.clone(), + storage_id: table_handle.clone(), + amount: BigDecimal::zero(), + table_type_v1: Some(tm.table_type.clone()), + token_properties_mutated_v1: None, + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, + }), + Some(owner_address), + Some(tm.table_type.clone()), + ) + }, + None => (None, None, None), + }; + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id, + property_version_v1: token_id_struct.property_version, + owner_address, + storage_id: table_handle, + amount: BigDecimal::zero(), + table_type_v1: table_type, + token_properties_mutated_v1: None, + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + non_transferrable_by_owner: None, + }, + curr_token_ownership, + ))) + } else { + Ok(None) + } + } +} + +impl CurrentTokenOwnershipV2Query { + pub async fn get_latest_owned_nft_by_token_data_id( + conn: &mut DbPoolConnection<'_>, + token_data_id: &str, + query_retries: u32, + query_retry_delay_ms: u64, + ) -> anyhow::Result { + let mut tried = 0; + while tried < query_retries { + tried += 1; + match Self::get_latest_owned_nft_by_token_data_id_impl(conn, token_data_id).await { + Ok(inner) => { + return Ok(NFTOwnershipV2 { + token_data_id: inner.token_data_id.clone(), + owner_address: inner.owner_address.clone(), + is_soulbound: inner.is_soulbound_v2, + }); + }, + Err(_) => { + if tried < query_retries { + tokio::time::sleep(std::time::Duration::from_millis(query_retry_delay_ms)) + .await; + } + }, + } + } + Err(anyhow::anyhow!( + "Failed to get nft by token data id: {}", + token_data_id + )) + } + + async fn get_latest_owned_nft_by_token_data_id_impl( + conn: &mut DbPoolConnection<'_>, + token_data_id: &str, + ) -> diesel::QueryResult { + current_token_ownerships_v2::table + .filter(current_token_ownerships_v2::token_data_id.eq(token_data_id)) + .filter(current_token_ownerships_v2::amount.gt(BigDecimal::zero())) + .first::(conn) + .await + } +} diff --git a/rust/processor/src/db/common/models/token_v2_models/v2_token_utils.rs b/rust/processor/src/db/common/models/token_v2_models/v2_token_utils.rs new file mode 100644 index 000000000..714d852f2 --- /dev/null +++ b/rust/processor/src/db/common/models/token_v2_models/v2_token_utils.rs @@ -0,0 +1,614 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + db::common::models::{ + coin_models::coin_utils::COIN_ADDR, + default_models::move_resources::MoveResource, + object_models::v2_object_utils::{CurrentObjectPK, ObjectCore, Untransferable}, + token_models::token_utils::{NAME_LENGTH, URI_LENGTH}, + }, + utils::util::{ + deserialize_from_string, deserialize_token_object_property_map_from_bcs_hexstring, + standardize_address, truncate_str, Aggregator, AggregatorSnapshot, DerivedStringSnapshot, + }, +}; +use ahash::{AHashMap, AHashSet}; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::{Event, WriteResource}; +use bigdecimal::BigDecimal; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use std::fmt::{self, Formatter}; + +pub const TOKEN_V2_ADDR: &str = + "0x0000000000000000000000000000000000000000000000000000000000000004"; + +pub const DEFAULT_OWNER_ADDRESS: &str = "unknown"; + +lazy_static! { + pub static ref V2_STANDARD: String = TokenStandard::V2.to_string(); +} + +/// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) +/// Maps address to burn event. If it's an old event previous_owner will be empty +pub type TokenV2Burned = AHashMap; +pub type TokenV2Minted = AHashSet; + +/// Tracks which token standard a token / collection is built upon +#[derive(Serialize)] +pub enum TokenStandard { + V1, + V2, +} + +impl fmt::Display for TokenStandard { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + let res = match self { + TokenStandard::V1 => "v1", + TokenStandard::V2 => "v2", + }; + write!(f, "{}", res) + } +} + +/* Section on Collection / Token */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Collection { + creator: String, + pub description: String, + // These are set to private because we should never get name or uri directly + name: String, + uri: String, +} + +impl Collection { + pub fn get_creator_address(&self) -> String { + standardize_address(&self.creator) + } + + pub fn get_uri_trunc(&self) -> String { + truncate_str(&self.uri, URI_LENGTH) + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, NAME_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AptosCollection { + pub mutable_description: bool, + pub mutable_uri: bool, +} + +impl AptosCollection { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::AptosCollection(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenV2 { + collection: ResourceReference, + pub description: String, + // These are set to private because we should never get name or uri directly + name: String, + uri: String, +} + +impl TokenV2 { + pub fn get_collection_address(&self) -> String { + self.collection.get_reference_address() + } + + pub fn get_uri_trunc(&self) -> String { + truncate_str(&self.uri, URI_LENGTH) + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, NAME_LENGTH) + } + + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::TokenV2(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + if let Some(token_identifiers) = + TokenIdentifiers::from_write_resource(write_resource, txn_version).unwrap() + { + Ok(Some(TokenV2 { + name: token_identifiers.name.value, + ..inner + })) + } else { + Ok(Some(inner)) + } + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ResourceReference { + inner: String, +} + +impl ResourceReference { + pub fn get_reference_address(&self) -> String { + standardize_address(&self.inner) + } +} + +/* Section on Supply */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FixedSupply { + #[serde(deserialize_with = "deserialize_from_string")] + pub current_supply: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub max_supply: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub total_minted: BigDecimal, +} + +impl FixedSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::FixedSupply(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct UnlimitedSupply { + #[serde(deserialize_with = "deserialize_from_string")] + pub current_supply: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub total_minted: BigDecimal, +} + +impl UnlimitedSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::UnlimitedSupply(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ConcurrentSupply { + pub current_supply: Aggregator, + pub total_minted: Aggregator, +} + +impl ConcurrentSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::ConcurrentSupply(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +/* Section on Events */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MintEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub index: BigDecimal, + token: String, +} + +impl MintEvent { + pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { + if let Some(V2TokenEvent::MintEvent(inner)) = + V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_token_address(&self) -> String { + standardize_address(&self.token) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Mint { + collection: String, + pub index: AggregatorSnapshot, + token: String, +} + +impl Mint { + pub fn get_token_address(&self) -> String { + standardize_address(&self.token) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenMutationEvent { + pub mutated_field_name: String, + pub old_value: String, + pub new_value: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct BurnEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub index: BigDecimal, + token: String, +} + +impl BurnEvent { + pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { + if let Some(V2TokenEvent::BurnEvent(inner)) = + V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_token_address(&self) -> String { + standardize_address(&self.token) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Burn { + collection: String, + token: String, + previous_owner: String, +} + +impl Burn { + pub fn new(collection: String, token: String, previous_owner: String) -> Self { + Burn { + collection, + token, + previous_owner, + } + } + + pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { + if let Some(V2TokenEvent::Burn(inner)) = + V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_token_address(&self) -> String { + standardize_address(&self.token) + } + + pub fn get_previous_owner_address(&self) -> Option { + if self.previous_owner.is_empty() { + None + } else { + Some(standardize_address(&self.previous_owner)) + } + } + + pub fn get_collection_address(&self) -> String { + standardize_address(&self.collection) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TransferEvent { + from: String, + to: String, + object: String, +} + +impl TransferEvent { + pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { + if let Some(V2TokenEvent::TransferEvent(inner)) = + V2TokenEvent::from_event(event.type_str.as_str(), &event.data, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_from_address(&self) -> String { + standardize_address(&self.from) + } + + pub fn get_to_address(&self) -> String { + standardize_address(&self.to) + } + + pub fn get_object_address(&self) -> String { + standardize_address(&self.object) + } +} + +/* Section on Property Maps */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PropertyMapModel { + #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] + pub inner: serde_json::Value, +} + +impl PropertyMapModel { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::PropertyMapModel(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenIdentifiers { + name: DerivedStringSnapshot, +} + +impl TokenIdentifiers { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = MoveResource::get_outer_type_from_write_resource(write_resource); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::TokenIdentifiers(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name.value, NAME_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2TokenResource { + AptosCollection(AptosCollection), + Collection(Collection), + ConcurrentSupply(ConcurrentSupply), + FixedSupply(FixedSupply), + ObjectCore(ObjectCore), + UnlimitedSupply(UnlimitedSupply), + Untransferable(Untransferable), + TokenV2(TokenV2), + PropertyMapModel(PropertyMapModel), + TokenIdentifiers(TokenIdentifiers), +} + +impl V2TokenResource { + pub fn is_resource_supported(data_type: &str) -> bool { + [ + format!("{}::object::ObjectCore", COIN_ADDR), + format!("{}::object::Untransferable", COIN_ADDR), + format!("{}::collection::Collection", TOKEN_V2_ADDR), + format!("{}::collection::ConcurrentSupply", TOKEN_V2_ADDR), + format!("{}::collection::FixedSupply", TOKEN_V2_ADDR), + format!("{}::collection::UnlimitedSupply", TOKEN_V2_ADDR), + format!("{}::aptos_token::AptosCollection", TOKEN_V2_ADDR), + format!("{}::token::Token", TOKEN_V2_ADDR), + format!("{}::property_map::PropertyMap", TOKEN_V2_ADDR), + format!("{}::token::TokenIdentifiers", TOKEN_V2_ADDR), + ] + .contains(&data_type.to_string()) + } + + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result { + match data_type { + x if x == format!("{}::object::ObjectCore", COIN_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::ObjectCore(inner))) + }, + x if x == format!("{}::object::Untransferable", COIN_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::Untransferable(inner))) + }, + x if x == format!("{}::collection::Collection", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::Collection(inner))) + }, + x if x == format!("{}::collection::ConcurrentSupply", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::ConcurrentSupply(inner))) + }, + x if x == format!("{}::collection::FixedSupply", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::FixedSupply(inner))) + }, + x if x == format!("{}::collection::UnlimitedSupply", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::UnlimitedSupply(inner))) + }, + x if x == format!("{}::aptos_token::AptosCollection", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::AptosCollection(inner))) + }, + x if x == format!("{}::token::Token", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::TokenV2(inner))) + }, + x if x == format!("{}::token::TokenIdentifiers", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::TokenIdentifiers(inner))) + }, + x if x == format!("{}::property_map::PropertyMap", TOKEN_V2_ADDR) => { + serde_json::from_value(data.clone()) + .map(|inner| Some(Self::PropertyMapModel(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2TokenEvent { + Mint(Mint), + MintEvent(MintEvent), + TokenMutationEvent(TokenMutationEvent), + Burn(Burn), + BurnEvent(BurnEvent), + TransferEvent(TransferEvent), +} + +impl V2TokenEvent { + pub fn from_event(data_type: &str, data: &str, txn_version: i64) -> Result> { + match data_type { + "0x4::collection::Mint" => { + serde_json::from_str(data).map(|inner| Some(Self::Mint(inner))) + }, + "0x4::collection::MintEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::MintEvent(inner))) + }, + "0x4::token::MutationEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::TokenMutationEvent(inner))) + }, + "0x4::collection::Burn" => { + serde_json::from_str(data).map(|inner| Some(Self::Burn(inner))) + }, + "0x4::collection::BurnEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::BurnEvent(inner))) + }, + "0x1::object::TransferEvent" => { + serde_json::from_str(data).map(|inner| Some(Self::TransferEvent(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} diff --git a/rust/processor/src/db/common/models/transaction_metadata_model/event_size_info.rs b/rust/processor/src/db/common/models/transaction_metadata_model/event_size_info.rs new file mode 100644 index 000000000..cb5aae060 --- /dev/null +++ b/rust/processor/src/db/common/models/transaction_metadata_model/event_size_info.rs @@ -0,0 +1,34 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::schema::event_size_info; +use aptos_protos::transaction::v1::EventSizeInfo; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, index))] +#[diesel(table_name = event_size_info)] +pub struct EventSize { + pub transaction_version: i64, + pub index: i64, + pub type_tag_bytes: i64, + pub total_bytes: i64, +} + +impl EventSize { + pub fn from_event_size_info( + info: &EventSizeInfo, + transaction_version: i64, + index: i64, + ) -> Self { + EventSize { + transaction_version, + index, + type_tag_bytes: info.type_tag_bytes as i64, + total_bytes: info.total_bytes as i64, + } + } +} diff --git a/rust/processor/src/db/common/models/transaction_metadata_model/mod.rs b/rust/processor/src/db/common/models/transaction_metadata_model/mod.rs new file mode 100644 index 000000000..a04e5184d --- /dev/null +++ b/rust/processor/src/db/common/models/transaction_metadata_model/mod.rs @@ -0,0 +1,6 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod event_size_info; +pub mod transaction_size_info; +pub mod write_set_size_info; diff --git a/rust/processor/src/db/common/models/transaction_metadata_model/transaction_size_info.rs b/rust/processor/src/db/common/models/transaction_metadata_model/transaction_size_info.rs new file mode 100644 index 000000000..4af3934cc --- /dev/null +++ b/rust/processor/src/db/common/models/transaction_metadata_model/transaction_size_info.rs @@ -0,0 +1,26 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::schema::transaction_size_info; +use aptos_protos::transaction::v1::TransactionSizeInfo; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version))] +#[diesel(table_name = transaction_size_info)] +pub struct TransactionSize { + pub transaction_version: i64, + pub size_bytes: i64, +} + +impl TransactionSize { + pub fn from_transaction_info(info: &TransactionSizeInfo, transaction_version: i64) -> Self { + TransactionSize { + transaction_version, + size_bytes: info.transaction_bytes as i64, + } + } +} diff --git a/rust/processor/src/db/common/models/transaction_metadata_model/write_set_size_info.rs b/rust/processor/src/db/common/models/transaction_metadata_model/write_set_size_info.rs new file mode 100644 index 000000000..2ca5b579a --- /dev/null +++ b/rust/processor/src/db/common/models/transaction_metadata_model/write_set_size_info.rs @@ -0,0 +1,34 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::schema::write_set_size_info; +use aptos_protos::transaction::v1::WriteOpSizeInfo; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, index))] +#[diesel(table_name = write_set_size_info)] +pub struct WriteSetSize { + pub transaction_version: i64, + pub index: i64, + pub key_bytes: i64, + pub value_bytes: i64, +} + +impl WriteSetSize { + pub fn from_transaction_info( + info: &WriteOpSizeInfo, + transaction_version: i64, + index: i64, + ) -> Self { + WriteSetSize { + transaction_version, + index, + key_bytes: info.key_bytes as i64, + value_bytes: info.value_bytes as i64, + } + } +} diff --git a/rust/processor/src/db/common/models/user_transactions_models/mod.rs b/rust/processor/src/db/common/models/user_transactions_models/mod.rs new file mode 100644 index 000000000..fbb27686b --- /dev/null +++ b/rust/processor/src/db/common/models/user_transactions_models/mod.rs @@ -0,0 +1,5 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod signatures; +pub mod user_transactions; diff --git a/rust/processor/src/db/common/models/user_transactions_models/signatures.rs b/rust/processor/src/db/common/models/user_transactions_models/signatures.rs new file mode 100644 index 000000000..0ff2ea1c1 --- /dev/null +++ b/rust/processor/src/db/common/models/user_transactions_models/signatures.rs @@ -0,0 +1,573 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::extra_unused_lifetimes)] + +use crate::{ + schema::signatures::{self}, + utils::{counters::PROCESSOR_UNKNOWN_TYPE_COUNT, util::standardize_address}, +}; +use anyhow::{Context, Result}; +use aptos_protos::transaction::v1::{ + account_signature::Signature as AccountSignatureEnum, + any_signature::{SignatureVariant, Type as AnySignatureTypeEnumPb}, + signature::Signature as SignatureEnum, + AccountSignature as ProtoAccountSignature, Ed25519Signature as Ed25519SignaturePB, + FeePayerSignature as ProtoFeePayerSignature, MultiAgentSignature as ProtoMultiAgentSignature, + MultiEd25519Signature as MultiEd25519SignaturePb, MultiKeySignature as MultiKeySignaturePb, + Signature as TransactionSignaturePb, SingleKeySignature as SingleKeySignaturePb, + SingleSender as SingleSenderPb, +}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key( + transaction_version, + multi_agent_index, + multi_sig_index, + is_sender_primary +))] +#[diesel(table_name = signatures)] +pub struct Signature { + pub transaction_version: i64, + pub multi_agent_index: i64, + pub multi_sig_index: i64, + pub transaction_block_height: i64, + pub signer: String, + pub is_sender_primary: bool, + pub type_: String, + pub public_key: String, + pub signature: String, + pub threshold: i64, + pub public_key_indices: serde_json::Value, +} + +impl Signature { + /// Returns a flattened list of signatures. If signature is a Ed25519Signature, then return a vector of 1 signature + pub fn from_user_transaction( + s: &TransactionSignaturePb, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + ) -> Result> { + match s.signature.as_ref().unwrap() { + SignatureEnum::Ed25519(sig) => Ok(vec![Self::parse_ed25519_signature( + sig, + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + )]), + SignatureEnum::MultiEd25519(sig) => Ok(Self::parse_multi_ed25519_signature( + sig, + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + )), + SignatureEnum::MultiAgent(sig) => Self::parse_multi_agent_signature( + sig, + sender, + transaction_version, + transaction_block_height, + ), + SignatureEnum::FeePayer(sig) => Self::parse_fee_payer_signature( + sig, + sender, + transaction_version, + transaction_block_height, + ), + SignatureEnum::SingleSender(s) => Ok(Self::parse_single_sender( + s, + sender, + transaction_version, + transaction_block_height, + )), + } + } + + pub fn get_signature_type(t: &TransactionSignaturePb) -> String { + match t.signature.as_ref().unwrap() { + SignatureEnum::Ed25519(_) => String::from("ed25519_signature"), + SignatureEnum::MultiEd25519(_) => String::from("multi_ed25519_signature"), + SignatureEnum::MultiAgent(_) => String::from("multi_agent_signature"), + SignatureEnum::FeePayer(_) => String::from("fee_payer_signature"), + SignatureEnum::SingleSender(sender) => { + let account_signature = sender.sender.as_ref().unwrap(); + let signature = account_signature.signature.as_ref().unwrap(); + match signature { + AccountSignatureEnum::Ed25519(_) => String::from("ed25519_signature"), + AccountSignatureEnum::MultiEd25519(_) => { + String::from("multi_ed25519_signature") + }, + AccountSignatureEnum::SingleKeySignature(_) => { + String::from("single_key_signature") + }, + AccountSignatureEnum::MultiKeySignature(_) => { + String::from("multi_key_signature") + }, + } + }, + } + } + + pub fn get_fee_payer_address( + t: &TransactionSignaturePb, + transaction_version: i64, + ) -> Option { + let sig = t.signature.as_ref().unwrap_or_else(|| { + tracing::error!( + transaction_version = transaction_version, + "Transaction signature is missing" + ); + panic!("Transaction signature is missing"); + }); + match sig { + SignatureEnum::FeePayer(sig) => Some(standardize_address(&sig.fee_payer_address)), + _ => None, + } + } + + fn parse_ed25519_signature( + s: &Ed25519SignaturePB, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + is_sender_primary: bool, + multi_agent_index: i64, + override_address: Option<&String>, + ) -> Self { + let signer = standardize_address(override_address.unwrap_or(sender)); + Self { + transaction_version, + transaction_block_height, + signer, + is_sender_primary, + type_: String::from("ed25519_signature"), + public_key: format!("0x{}", hex::encode(s.public_key.as_slice())), + threshold: 1, + public_key_indices: serde_json::Value::Array(vec![]), + signature: format!("0x{}", hex::encode(s.signature.as_slice())), + multi_agent_index, + multi_sig_index: 0, + } + } + + fn parse_multi_ed25519_signature( + s: &MultiEd25519SignaturePb, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + is_sender_primary: bool, + multi_agent_index: i64, + override_address: Option<&String>, + ) -> Vec { + let mut signatures = Vec::default(); + let signer = standardize_address(override_address.unwrap_or(sender)); + + let public_key_indices: Vec = s + .public_key_indices + .iter() + .map(|index| *index as usize) + .collect(); + for (index, signature) in s.signatures.iter().enumerate() { + let public_key = s + .public_keys + .get(public_key_indices.clone()[index]) + .unwrap() + .clone(); + signatures.push(Self { + transaction_version, + transaction_block_height, + signer: signer.clone(), + is_sender_primary, + type_: String::from("multi_ed25519_signature"), + public_key: format!("0x{}", hex::encode(public_key.as_slice())), + threshold: s.threshold as i64, + signature: format!("0x{}", hex::encode(signature.as_slice())), + public_key_indices: serde_json::Value::Array( + public_key_indices + .iter() + .map(|index| { + serde_json::Value::Number(serde_json::Number::from(*index as i64)) + }) + .collect(), + ), + multi_agent_index, + multi_sig_index: index as i64, + }); + } + signatures + } + + fn parse_multi_agent_signature( + s: &ProtoMultiAgentSignature, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + ) -> Result> { + let mut signatures = Vec::default(); + // process sender signature + signatures.append(&mut Self::parse_multi_agent_signature_helper( + s.sender.as_ref().unwrap(), + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + )); + for (index, address) in s.secondary_signer_addresses.iter().enumerate() { + let secondary_sig = s.secondary_signers.get(index).context(format!( + "Failed to parse index {} for multi agent secondary signers", + index + ))?; + signatures.append(&mut Self::parse_multi_agent_signature_helper( + secondary_sig, + sender, + transaction_version, + transaction_block_height, + false, + index as i64, + Some(&address.to_string()), + )); + } + Ok(signatures) + } + + fn parse_fee_payer_signature( + s: &ProtoFeePayerSignature, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + ) -> Result> { + let mut signatures = Vec::default(); + // process sender signature + signatures.append(&mut Self::parse_multi_agent_signature_helper( + s.sender.as_ref().unwrap(), + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + )); + for (index, address) in s.secondary_signer_addresses.iter().enumerate() { + let secondary_sig = s.secondary_signers.get(index).context(format!( + "Failed to parse index {} for multi agent secondary signers", + index + ))?; + signatures.append(&mut Self::parse_multi_agent_signature_helper( + secondary_sig, + sender, + transaction_version, + transaction_block_height, + false, + index as i64, + Some(&address.to_string()), + )); + } + Ok(signatures) + } + + fn parse_multi_agent_signature_helper( + s: &ProtoAccountSignature, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + is_sender_primary: bool, + multi_agent_index: i64, + override_address: Option<&String>, + ) -> Vec { + let signature = s.signature.as_ref().unwrap(); + match signature { + AccountSignatureEnum::Ed25519(sig) => vec![Self::parse_ed25519_signature( + sig, + sender, + transaction_version, + transaction_block_height, + is_sender_primary, + multi_agent_index, + override_address, + )], + AccountSignatureEnum::MultiEd25519(sig) => Self::parse_multi_ed25519_signature( + sig, + sender, + transaction_version, + transaction_block_height, + is_sender_primary, + multi_agent_index, + override_address, + ), + AccountSignatureEnum::SingleKeySignature(sig) => { + vec![Self::parse_single_key_signature( + sig, + sender, + transaction_version, + transaction_block_height, + is_sender_primary, + multi_agent_index, + override_address, + )] + }, + AccountSignatureEnum::MultiKeySignature(sig) => Self::parse_multi_key_signature( + sig, + sender, + transaction_version, + transaction_block_height, + is_sender_primary, + multi_agent_index, + override_address, + ), + } + } + + #[allow(deprecated)] + fn parse_single_key_signature( + s: &SingleKeySignaturePb, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + is_sender_primary: bool, + multi_agent_index: i64, + override_address: Option<&String>, + ) -> Self { + let signer = standardize_address(override_address.unwrap_or(sender)); + let signature = s.signature.as_ref().unwrap(); + let signature_bytes = + Self::get_any_signature_bytes(&signature.signature_variant, transaction_version) + // old way of getting signature bytes prior to node 1.10 + .unwrap_or(signature.signature.clone()); + let type_ = if let Some(t) = + Self::get_any_signature_type(&signature.signature_variant, true, transaction_version) + { + t + } else { + // old way of getting signature type prior to node 1.10 + match AnySignatureTypeEnumPb::try_from(signature.r#type) { + Ok(AnySignatureTypeEnumPb::Ed25519) => String::from("single_key_ed25519_signature"), + Ok(AnySignatureTypeEnumPb::Secp256k1Ecdsa) => { + String::from("single_key_secp256k1_ecdsa_signature") + }, + wildcard => { + tracing::warn!( + transaction_version = transaction_version, + "Unspecified signature type or un-recognized type is not supported: {:?}", + wildcard + ); + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["unspecified_signature_type"]) + .inc(); + "".to_string() + }, + } + }; + Self { + transaction_version, + transaction_block_height, + signer, + is_sender_primary, + type_, + public_key: format!( + "0x{}", + hex::encode(s.public_key.as_ref().unwrap().public_key.as_slice()) + ), + threshold: 1, + public_key_indices: serde_json::Value::Array(vec![]), + signature: format!("0x{}", hex::encode(signature_bytes.as_slice())), + multi_agent_index, + multi_sig_index: 0, + } + } + + #[allow(deprecated)] + fn parse_multi_key_signature( + s: &MultiKeySignaturePb, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + is_sender_primary: bool, + multi_agent_index: i64, + override_address: Option<&String>, + ) -> Vec { + let signer = standardize_address(override_address.unwrap_or(sender)); + let mut signatures = Vec::default(); + + let public_key_indices: Vec = + s.signatures.iter().map(|key| key.index as usize).collect(); + + for (index, signature) in s.signatures.iter().enumerate() { + let public_key = s + .public_keys + .as_slice() + .get(index) + .unwrap() + .public_key + .clone(); + let signature_bytes = Self::get_any_signature_bytes( + &signature.signature.as_ref().unwrap().signature_variant, + transaction_version, + ) + // old way of getting signature bytes prior to node 1.10 + .unwrap_or(signature.signature.as_ref().unwrap().signature.clone()); + + let type_ = if let Some(t) = Self::get_any_signature_type( + &signature.signature.as_ref().unwrap().signature_variant, + false, + transaction_version, + ) { + t + } else { + // old way of getting signature type prior to node 1.10 + match AnySignatureTypeEnumPb::try_from(signature.signature.as_ref().unwrap().r#type) + { + Ok(AnySignatureTypeEnumPb::Ed25519) => { + String::from("multi_key_ed25519_signature") + }, + Ok(AnySignatureTypeEnumPb::Secp256k1Ecdsa) => { + String::from("multi_key_secp256k1_ecdsa_signature") + }, + wildcard => { + tracing::warn!( + transaction_version = transaction_version, + "Unspecified signature type or un-recognized type is not supported: {:?}", + wildcard + ); + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["unspecified_signature_type"]) + .inc(); + "unknown".to_string() + }, + } + }; + signatures.push(Self { + transaction_version, + transaction_block_height, + signer: signer.clone(), + is_sender_primary, + type_, + public_key: format!("0x{}", hex::encode(public_key.as_slice())), + threshold: s.signatures_required as i64, + signature: format!("0x{}", hex::encode(signature_bytes.as_slice())), + public_key_indices: serde_json::Value::Array( + public_key_indices + .iter() + .map(|index| { + serde_json::Value::Number(serde_json::Number::from(*index as i64)) + }) + .collect(), + ), + multi_agent_index, + multi_sig_index: index as i64, + }); + } + signatures + } + + fn get_any_signature_bytes( + signature_variant: &Option, + transaction_version: i64, + ) -> Option> { + match signature_variant { + Some(SignatureVariant::Ed25519(sig)) => Some(sig.signature.clone()), + Some(SignatureVariant::Keyless(sig)) => Some(sig.signature.clone()), + Some(SignatureVariant::Webauthn(sig)) => Some(sig.signature.clone()), + Some(SignatureVariant::Secp256k1Ecdsa(sig)) => Some(sig.signature.clone()), + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["SignatureVariant"]) + .inc(); + tracing::warn!( + transaction_version = transaction_version, + "Signature variant doesn't exist", + ); + None + }, + } + } + + fn get_any_signature_type( + signature_variant: &Option, + is_single_sender: bool, + transaction_version: i64, + ) -> Option { + let prefix = if is_single_sender { + "single_sender" + } else { + "multi_key" + }; + match signature_variant { + Some(SignatureVariant::Ed25519(_)) => Some(format!("{}_ed25519_signature", prefix)), + Some(SignatureVariant::Keyless(_)) => Some(format!("{}_keyless_signature", prefix)), + Some(SignatureVariant::Webauthn(_)) => Some(format!("{}_webauthn_signature", prefix)), + Some(SignatureVariant::Secp256k1Ecdsa(_)) => { + Some(format!("{}_secp256k1_ecdsa_signature", prefix)) + }, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["SignatureVariant"]) + .inc(); + tracing::warn!( + transaction_version = transaction_version, + "Signature variant doesn't exist", + ); + None + }, + } + } + + fn parse_single_sender( + s: &SingleSenderPb, + sender: &String, + transaction_version: i64, + transaction_block_height: i64, + ) -> Vec { + let signature = s.sender.as_ref().unwrap(); + match signature.signature.as_ref() { + Some(AccountSignatureEnum::SingleKeySignature(s)) => { + vec![Self::parse_single_key_signature( + s, + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + )] + }, + Some(AccountSignatureEnum::MultiKeySignature(s)) => Self::parse_multi_key_signature( + s, + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + ), + Some(AccountSignatureEnum::Ed25519(s)) => vec![Self::parse_ed25519_signature( + s, + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + )], + Some(AccountSignatureEnum::MultiEd25519(s)) => Self::parse_multi_ed25519_signature( + s, + sender, + transaction_version, + transaction_block_height, + true, + 0, + None, + ), + None => vec![], + } + } +} diff --git a/rust/processor/src/db/common/models/user_transactions_models/user_transactions.rs b/rust/processor/src/db/common/models/user_transactions_models/user_transactions.rs new file mode 100644 index 000000000..e872e8a44 --- /dev/null +++ b/rust/processor/src/db/common/models/user_transactions_models/user_transactions.rs @@ -0,0 +1,105 @@ +// Copyright © Aptos Foundation + +// Copyright (c) Aptos +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::signatures::Signature; +use crate::{ + schema::user_transactions, + utils::util::{ + get_entry_function_from_user_request, parse_timestamp, standardize_address, + u64_to_bigdecimal, + }, +}; +use aptos_protos::{ + transaction::v1::{UserTransaction as UserTransactionPB, UserTransactionRequest}, + util::timestamp::Timestamp, +}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Deserialize, Debug, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(version))] +#[diesel(table_name = user_transactions)] +pub struct UserTransaction { + pub version: i64, + pub block_height: i64, + pub parent_signature_type: String, + pub sender: String, + pub sequence_number: i64, + pub max_gas_amount: BigDecimal, + pub expiration_timestamp_secs: chrono::NaiveDateTime, + pub gas_unit_price: BigDecimal, + pub timestamp: chrono::NaiveDateTime, + pub entry_function_id_str: String, + pub epoch: i64, +} + +impl UserTransaction { + pub fn from_transaction( + txn: &UserTransactionPB, + timestamp: &Timestamp, + block_height: i64, + epoch: i64, + version: i64, + ) -> (Self, Vec) { + let user_request = txn + .request + .as_ref() + .expect("Sends is not present in user txn"); + ( + Self { + version, + block_height, + parent_signature_type: txn + .request + .as_ref() + .unwrap() + .signature + .as_ref() + .map(Signature::get_signature_type) + .unwrap_or_default(), + sender: standardize_address(&user_request.sender), + sequence_number: user_request.sequence_number as i64, + max_gas_amount: u64_to_bigdecimal(user_request.max_gas_amount), + expiration_timestamp_secs: parse_timestamp( + user_request + .expiration_timestamp_secs + .as_ref() + .expect("Expiration timestamp is not present in user txn"), + version, + ), + gas_unit_price: u64_to_bigdecimal(user_request.gas_unit_price), + timestamp: parse_timestamp(timestamp, version), + entry_function_id_str: get_entry_function_from_user_request(user_request) + .unwrap_or_default(), + epoch, + }, + Self::get_signatures(user_request, version, block_height), + ) + } + + /// Empty vec if signature is None + pub fn get_signatures( + user_request: &UserTransactionRequest, + version: i64, + block_height: i64, + ) -> Vec { + user_request + .signature + .as_ref() + .map(|s| { + Signature::from_user_transaction(s, &user_request.sender, version, block_height) + .unwrap() + }) + .unwrap_or_default() + } +} + +// Prevent conflicts with other things named `Transaction` +pub type UserTransactionModel = UserTransaction; diff --git a/rust/processor/src/db/mod.rs b/rust/processor/src/db/mod.rs new file mode 100644 index 000000000..34994bf5a --- /dev/null +++ b/rust/processor/src/db/mod.rs @@ -0,0 +1 @@ +pub mod common; diff --git a/rust/processor/src/db/postgres/diesel.toml b/rust/processor/src/db/postgres/diesel.toml new file mode 100644 index 000000000..9a59970bb --- /dev/null +++ b/rust/processor/src/db/postgres/diesel.toml @@ -0,0 +1,8 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "schema.rs" + +[migrations_directory] +dir = "migrations" diff --git a/rust/processor/src/db/postgres/migrations/.keep b/rust/processor/src/db/postgres/migrations/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/down.sql b/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 000000000..a9f526091 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/up.sql b/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 000000000..d68895b1a --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/down.sql b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/down.sql new file mode 100644 index 000000000..fe8714894 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/down.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS block_metadata_transactions; +DROP TABLE IF EXISTS user_transactions; +DROP TABLE IF EXISTS signatures; +DROP TABLE IF EXISTS events; +DROP TABLE IF EXISTS write_set_changes; +DROP TABLE IF EXISTS move_modules; +DROP TABLE IF EXISTS move_resources; +DROP TABLE IF EXISTS table_items; +DROP TABLE IF EXISTS table_metadatas; +DROP TABLE IF EXISTS ledger_infos; +DROP TABLE IF EXISTS transactions; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/up.sql b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/up.sql new file mode 100644 index 000000000..7bc2c6caa --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-08-08-043603_core_tables/up.sql @@ -0,0 +1,305 @@ +/* Genesis Tx (doesn't have an entry in user_transactions or block_metadata_transactions) Ex: + { + "type":"genesis_transaction", + "version":"0", + "hash":"0x12180a4bbccf48de4d1e23b498add134328669ffc7741c8d529c6b2e3629ac99", + "state_root_hash":"0xb50adef3662d77e528be9e1cb5637fe5b7afd13eea317b330799f0c559c918c1", + "event_root_hash":"0xcbdbb1b830d1016d45a828bb3171ea81826e8315f14140acfbd7886f49fbcb40", + "gas_used":"0", + "success":true, + "vm_status":"Executed successfully", + "accumulator_root_hash":"0x188ed588547d551e652f04fccd5434c2977d6cff9e7443eb8e7c3038408caad4", + "payload":{ + "type":"write_set_payload", + "write_set":{ + "type":"direct_write_set", + "changes":[], + "events":[] + } + }, + "events":[ + { + "key":"0x0400000000000000000000000000000000000000000000000000000000000000000000000a550c18", + "sequence_number":"0", + "type":"0x1::reconfiguration::NewEpochEvent", + "data":{ + "epoch":"1" + } + } + ] + } + */ +CREATE TABLE transactions ( + version BIGINT UNIQUE PRIMARY KEY NOT NULL, + block_height BIGINT NOT NULL, + hash VARCHAR(66) UNIQUE NOT NULL, + type VARCHAR(50) NOT NULL, + payload jsonb, + state_change_hash VARCHAR(66) NOT NULL, + event_root_hash VARCHAR(66) NOT NULL, + state_checkpoint_hash VARCHAR(66), + gas_used NUMERIC NOT NULL, + success BOOLEAN NOT NULL, + vm_status TEXT NOT NULL, + accumulator_root_hash VARCHAR(66) NOT NULL, + num_events BIGINT NOT NULL, + num_write_set_changes BIGINT NOT NULL, + -- Default time columns + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX txn_insat_index ON transactions (inserted_at); +/* Ex: + { + "type":"block_metadata_transaction", + "version":"69158", + "hash":"0x2b7c58ed8524d228f9d0543a82e2793d04e8871df322f976b0e7bb8c5ced4ff5", + "state_root_hash":"0x3ead9eb40582fbc7df5e02f72280931dc3e6f1aae45dc832966b4cd972dac4b8", + "event_root_hash":"0x2e481956dea9c59b6fc9f823fe5f4c45efce173e42c551c1fe073b5d76a65504", + "gas_used":"0", + "success":true, + "vm_status":"Executed successfully", + "accumulator_root_hash":"0xb0ad602f805eb20c398f0f29a3504a9ef38bcc52c9c451deb9ec4a2d18807b49", + "id":"0xeef99391a3fc681f16963a6c03415bc0b1b12b56c00429308fa8bf46ac9eddf0", + "round":"57600", + "previous_block_votes":[ + "0x992da26d46e6d515a070c7f6e52376a1e674e850cb4d116babc6f870da9c258", + "0xfb4d785594a018bd980b4a20556d120c53a3f50b1cff9d5aa2e26eee582a587", + "0x2b7bce01a6f55e4a863c4822b154021a25588250c762ee01169b6208d6169208", + "0x43a2c4cefc4725e710dadf423dd9142057208e640c623b27c6bba704380825ab", + "0x4c91f3949924e988144550ece1da1bd9335cbecdd1c3ce1893f80e55376d018f", + "0x61616c1208b6b3491496370e7783d48426c674bdd7d04ed1a96afe2e4d8a3930", + "0x66ccccae2058641f136b79792d4d884419437826342ba84dfbbf3e52d8b3fc7d", + "0x68f04222bd9f8846cda028ea5ba3846a806b04a47e1f1a4f0939f350d713b2eb", + "0x6bbf2564ea4a6968df450da786b40b3f56b533a7b700c681c31b3714fc30256b", + "0x735c0a1cb33689ecba65907ba05a485f98831ff610955a44abf0a986f2904612", + "0x784a9514644c8ab6235aaff425381f2ea2719315a51388bc1f1e1c5afa2daaa9", + "0x7a8cee78757dfe0cee3631208cc81f171d27ca6004c63ebae5814e1754a03c79", + "0x803160c3a2f8e025df5a6e1110163493293dc974cc8abd43d4c1896000f4a1ec", + "0xcece26ebddbadfcfbc541baddc989fa73b919b82915164bbf77ebd86c7edbc90", + "0xe7be8996cbdf7db0f64abd17aa0968074b32e4b0df6560328921470e09fd608b" + ], + "proposer":"0x68f04222bd9f8846cda028ea5ba3846a806b04a47e1f1a4f0939f350d713b2eb", + "timestamp":"1649395495746947" + } + */ +CREATE TABLE block_metadata_transactions ( + version BIGINT UNIQUE PRIMARY KEY NOT NULL, + block_height BIGINT UNIQUE NOT NULL, + id VARCHAR(66) NOT NULL, + round BIGINT NOT NULL, + epoch BIGINT NOT NULL, + previous_block_votes_bitvec jsonb NOT NULL, + proposer VARCHAR(66) NOT NULL, + failed_proposer_indices jsonb NOT NULL, + "timestamp" TIMESTAMP NOT NULL, + -- Default time columns + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + CONSTRAINT fk_versions FOREIGN KEY (version) REFERENCES transactions (version) +); +CREATE INDEX bmt_insat_index ON block_metadata_transactions (inserted_at); +/* Ex: + { + "type":"user_transaction", + "version":"691595", + "hash":"0xefd4c865e00c240da0c426a37ceeda10d9b030d0e8a4fb4fb7ff452ad63401fb", + "state_root_hash":"0xebfe1eb7aa5321e7a7d741d927487163c34c821eaab60646ae0efd02b286c97c", + "event_root_hash":"0x414343554d554c41544f525f504c414345484f4c4445525f4841534800000000", + "gas_used":"43", + "success":true, + "vm_status":"Executed successfully", + "accumulator_root_hash":"0x97bfd5949d32f6c9a9efad93411924bfda658a8829de384d531ee73c2f740971", + "sender":"0xdfd557c68c6c12b8c65908b3d3c7b95d34bb12ae6eae5a43ee30aa67a4c12494", + "sequence_number":"21386", + "max_gas_amount":"1000", + "gas_unit_price":"1", + "expiration_timestamp_secs":"1649713172", + "payload":{ + "type":"entry_function_payload", + "function":"0x1::aptos_coin::mint", + "type_arguments":[ + + ], + "arguments":[ + "0x45b44793724a5ecc6ad85fa60949d0824cfc7f61d6bd74490b13598379313142", + "20000" + ] + }, + "signature":{ + "type":"ed25519_signature", + "public_key":"0x14ff6646855dad4a2dab30db773cdd4b22d6f9e6813f3e50142adf4f3efcf9f8", + "signature":"0x70781112e78cc8b54b86805c016cef2478bccdef21b721542af0323276ab906c989172adffed5bf2f475f2ec3a5b284a0ac46a6aef0d79f0dbb6b85bfca0080a" + }, + "events":[ + { + "key":"0x040000000000000000000000000000000000000000000000000000000000000000000000fefefefe", + "sequence_number":"0", + "type":"0x1::Whatever::FakeEvent1", + "data":{ + "amazing":"1" + } + }, + { + "key":"0x040000000000000000000000000000000000000000000000000000000000000000000000fefefefe", + "sequence_number":"1", + "type":"0x1::Whatever::FakeEvent2", + "data":{ + "amazing":"2" + } + } + ], + "timestamp":"1649713141723410" + } + */ +CREATE TABLE user_transactions ( + version BIGINT UNIQUE PRIMARY KEY NOT NULL, + block_height BIGINT NOT NULL, + parent_signature_type VARCHAR(50) NOT NULL, + sender VARCHAR(66) NOT NULL, + sequence_number BIGINT NOT NULL, + max_gas_amount NUMERIC NOT NULL, + expiration_timestamp_secs TIMESTAMP NOT NULL, + gas_unit_price NUMERIC NOT NULL, + -- from UserTransaction + "timestamp" TIMESTAMP NOT NULL, + entry_function_id_str text NOT NULL, + -- Default time columns + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + CONSTRAINT fk_versions FOREIGN KEY (version) REFERENCES transactions (version), + UNIQUE (sender, sequence_number) +); +CREATE INDEX ut_sender_seq_index ON user_transactions (sender, sequence_number); +CREATE INDEX ut_insat_index ON user_transactions (inserted_at); +-- tracks signatures for user transactions +CREATE TABLE signatures ( + transaction_version BIGINT NOT NULL, + multi_agent_index BIGINT NOT NULL, + multi_sig_index BIGINT NOT NULL, + transaction_block_height BIGINT NOT NULL, + signer VARCHAR(66) NOT NULL, + is_sender_primary BOOLEAN NOT NULL, + type VARCHAR(50) NOT NULL, + public_key VARCHAR(66) NOT NULL, + signature VARCHAR(200) NOT NULL, + threshold BIGINT NOT NULL, + public_key_indices jsonb NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + transaction_version, + multi_agent_index, + multi_sig_index, + is_sender_primary + ), + CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) +); +CREATE INDEX sig_insat_index ON signatures (inserted_at); +/** Ex: + { + "key": "0x0400000000000000000000000000000000000000000000000000000000000000000000000a550c18", + "sequence_number": "0", + "type": "0x1::reconfiguration::NewEpochEvent", + "data": { + "epoch": "1" + } + } + */ +CREATE TABLE events ( + sequence_number BIGINT NOT NULL, + creation_number BIGINT NOT NULL, + account_address VARCHAR(66) NOT NULL, + transaction_version BIGINT NOT NULL, + transaction_block_height BIGINT NOT NULL, + type TEXT NOT NULL, + data jsonb NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + account_address, + creation_number, + sequence_number + ), + CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) +); +CREATE INDEX ev_addr_type_index ON events (account_address); +CREATE INDEX ev_insat_index ON events (inserted_at); +-- write set changes +CREATE TABLE write_set_changes ( + transaction_version BIGINT NOT NULL, + index BIGINT NOT NULL, + hash VARCHAR(66) NOT NULL, + transaction_block_height BIGINT NOT NULL, + type TEXT NOT NULL, + address VARCHAR(66) NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, index), + CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) +); +CREATE INDEX wsc_addr_type_ver_index ON write_set_changes (address, transaction_version DESC); +CREATE INDEX wsc_insat_index ON write_set_changes (inserted_at); +-- move modules in write set changes +CREATE TABLE move_modules ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + transaction_block_height BIGINT NOT NULL, + name TEXT NOT NULL, + address VARCHAR(66) NOT NULL, + bytecode bytea, + friends jsonb, + exposed_functions jsonb, + structs jsonb, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, write_set_change_index), + CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) +); +CREATE INDEX mm_addr_name_ver_index ON move_modules (address, name, transaction_version); +CREATE INDEX mm_insat_index ON move_modules (inserted_at); +-- move resources in write set changes +CREATE TABLE move_resources ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + transaction_block_height BIGINT NOT NULL, + name TEXT NOT NULL, + address VARCHAR(66) NOT NULL, + type TEXT NOT NULL, + module TEXT NOT NULL, + generic_type_params jsonb, + data jsonb, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, write_set_change_index), + CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) +); +CREATE INDEX mr_addr_mod_name_ver_index ON move_resources (address, module, name, transaction_version); +CREATE INDEX mr_insat_index ON move_resources (inserted_at); +-- table items in write set changes +CREATE TABLE table_items ( + key text NOT NULL, + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + transaction_block_height BIGINT NOT NULL, + table_handle VARCHAR(66) NOT NULL, + decoded_key jsonb NOT NULL, + decoded_value jsonb, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, write_set_change_index), + CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version) +); +CREATE INDEX ti_hand_ver_key_index ON table_items (table_handle, transaction_version); +CREATE INDEX ti_insat_index ON table_items (inserted_at); +-- table metadatas from table items +CREATE TABLE table_metadatas ( + handle VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + key_type text NOT NULL, + value_type text NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX tm_insat_index ON table_metadatas (inserted_at); +CREATE TABLE ledger_infos (chain_id BIGINT UNIQUE PRIMARY KEY NOT NULL); diff --git a/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/down.sql b/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/down.sql new file mode 100644 index 000000000..591c0a656 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS tokens; +DROP TABLE IF EXISTS token_ownerships; +DROP TABLE IF EXISTS token_datas; +DROP TABLE IF EXISTS collection_datas; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/up.sql b/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/up.sql new file mode 100644 index 000000000..22183c598 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-09-04-194128_add_token_data/up.sql @@ -0,0 +1,93 @@ +-- Your SQL goes here +-- tracks tokens per version +CREATE TABLE tokens ( + -- sha256 of creator + collection_name + name + token_data_id_hash VARCHAR(64) NOT NULL, + property_version NUMERIC NOT NULL, + transaction_version BIGINT NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + token_properties jsonb NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + token_data_id_hash, + property_version, + transaction_version + ) +); +CREATE INDEX token_crea_cn_name_index ON tokens (creator_address, collection_name, name); +CREATE INDEX token_insat_index ON tokens (inserted_at); +-- tracks who owns tokens at certain version +CREATE TABLE token_ownerships ( + -- sha256 of creator + collection_name + name + token_data_id_hash VARCHAR(64) NOT NULL, + property_version NUMERIC NOT NULL, + transaction_version BIGINT NOT NULL, + table_handle VARCHAR(66) NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + owner_address VARCHAR(66), + amount NUMERIC NOT NULL, + table_type TEXT, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + token_data_id_hash, + property_version, + transaction_version, + table_handle + ) +); +CREATE INDEX to_owner_index ON token_ownerships (owner_address); +CREATE INDEX to_crea_cn_name_index ON token_ownerships (creator_address, collection_name, name); +CREATE INDEX to_insat_index ON token_ownerships (inserted_at); +-- tracks token metadata +CREATE TABLE token_datas ( + -- sha256 of creator + collection_name + name + token_data_id_hash VARCHAR(64) NOT NULL, + transaction_version BIGINT NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + maximum NUMERIC NOT NULL, + supply NUMERIC NOT NULL, + largest_property_version NUMERIC NOT NULL, + metadata_uri VARCHAR(512) NOT NULL, + payee_address VARCHAR(66) NOT NULL, + royalty_points_numerator NUMERIC NOT NULL, + royalty_points_denominator NUMERIC NOT NULL, + maximum_mutable BOOLEAN NOT NULL, + uri_mutable BOOLEAN NOT NULL, + description_mutable BOOLEAN NOT NULL, + properties_mutable BOOLEAN NOT NULL, + royalty_mutable BOOLEAN NOT NULL, + default_properties jsonb NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (token_data_id_hash, transaction_version) +); +CREATE INDEX td_crea_cn_name_index ON token_datas (creator_address, collection_name, name); +CREATE INDEX td_insat_index ON token_datas (inserted_at); +-- tracks collection metadata +CREATE TABLE collection_datas ( + -- sha256 of creator + collection_name + collection_data_id_hash VARCHAR(64) NOT NULL, + transaction_version BIGINT NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + description TEXT NOT NULL, + metadata_uri VARCHAR(512) NOT NULL, + supply NUMERIC NOT NULL, + maximum NUMERIC NOT NULL, + maximum_mutable BOOLEAN NOT NULL, + uri_mutable BOOLEAN NOT NULL, + description_mutable BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (collection_data_id_hash, transaction_version) +); +CREATE INDEX cd_crea_cn_index ON collection_datas (creator_address, collection_name); +CREATE INDEX cd_insat_index ON collection_datas (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/down.sql b/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/down.sql new file mode 100644 index 000000000..fac2b2c3d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_token_ownerships; +DROP TABLE IF EXISTS current_token_datas; +DROP TABLE IF EXISTS current_collection_datas; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/up.sql b/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/up.sql new file mode 100644 index 000000000..10dd6e595 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-09-20-055651_add_current_token_data/up.sql @@ -0,0 +1,67 @@ +-- Your SQL goes here +-- tracks tokens in owner's tokenstore +CREATE TABLE current_token_ownerships ( + -- sha256 of creator + collection_name + name + token_data_id_hash VARCHAR(64) NOT NULL, + property_version NUMERIC NOT NULL, + owner_address VARCHAR(66) NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + amount NUMERIC NOT NULL, + token_properties jsonb NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + token_data_id_hash, + property_version, + owner_address + ) +); +CREATE INDEX curr_to_crea_cn_name_index ON current_token_ownerships (creator_address, collection_name, name); +CREATE INDEX curr_to_owner_index ON current_token_ownerships (owner_address); +CREATE INDEX curr_to_insat_index ON current_token_ownerships (inserted_at); +-- tracks latest token metadata +CREATE TABLE current_token_datas ( + -- sha256 of creator + collection_name + name + token_data_id_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + maximum NUMERIC NOT NULL, + supply NUMERIC NOT NULL, + largest_property_version NUMERIC NOT NULL, + metadata_uri VARCHAR(512) NOT NULL, + payee_address VARCHAR(66) NOT NULL, + royalty_points_numerator NUMERIC NOT NULL, + royalty_points_denominator NUMERIC NOT NULL, + maximum_mutable BOOLEAN NOT NULL, + uri_mutable BOOLEAN NOT NULL, + description_mutable BOOLEAN NOT NULL, + properties_mutable BOOLEAN NOT NULL, + royalty_mutable BOOLEAN NOT NULL, + default_properties jsonb NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX curr_td_crea_cn_name_index ON current_token_datas (creator_address, collection_name, name); +CREATE INDEX curr_td_insat_index ON current_token_datas (inserted_at); +-- tracks latest collection metadata +CREATE TABLE current_collection_datas ( + -- sha256 of creator + collection_name + collection_data_id_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + description TEXT NOT NULL, + metadata_uri VARCHAR(512) NOT NULL, + supply NUMERIC NOT NULL, + maximum NUMERIC NOT NULL, + maximum_mutable BOOLEAN NOT NULL, + uri_mutable BOOLEAN NOT NULL, + description_mutable BOOLEAN NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX curr_cd_crea_cn_index ON current_collection_datas (creator_address, collection_name); +CREATE INDEX curr_cd_insat_index ON current_collection_datas (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/down.sql b/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/down.sql new file mode 100644 index 000000000..545b6fe1e --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/down.sql @@ -0,0 +1,10 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE current_token_ownerships DROP COLUMN collection_data_id_hash, + DROP COLUMN table_type; +ALTER TABLE current_token_datas DROP COLUMN collection_data_id_hash; +ALTER TABLE token_datas DROP COLUMN collection_data_id_hash; +ALTER TABLE tokens DROP COLUMN collection_data_id_hash; +ALTER TABLE token_ownerships DROP COLUMN collection_data_id_hash; +DROP INDEX IF EXISTS curr_to_owner_tt_am_index; +DROP TABLE IF EXISTS token_activities; +DROP TABLE IF EXISTS current_token_pending_claims; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/up.sql b/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/up.sql new file mode 100644 index 000000000..21e095a2c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-09-22-185845_token_offers/up.sql @@ -0,0 +1,80 @@ +-- Your SQL goes here +ALTER TABLE current_token_ownerships +ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL, + ADD COLUMN table_type TEXT NOT NULL; +ALTER TABLE current_token_datas +ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; +ALTER TABLE token_datas +ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; +ALTER TABLE tokens +ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; +ALTER TABLE token_ownerships +ADD COLUMN collection_data_id_hash VARCHAR(64) NOT NULL; +-- add indices for current ownership to speed up queries +CREATE INDEX curr_to_owner_tt_am_index ON current_token_ownerships (owner_address, table_type, amount); +-- tracks all token activities +CREATE TABLE token_activities ( + transaction_version BIGINT NOT NULL, + event_account_address VARCHAR(66) NOT NULL, + event_creation_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + collection_data_id_hash VARCHAR(64) NOT NULL, + token_data_id_hash VARCHAR(64) NOT NULL, + property_version NUMERIC NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + transfer_type VARCHAR(50) NOT NULL, + from_address VARCHAR(66), + to_address VARCHAR(66), + token_amount NUMERIC NOT NULL, + coin_type TEXT, + coin_amount NUMERIC, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + transaction_version, + event_account_address, + event_creation_number, + event_sequence_number + ) +); +CREATE INDEX ta_from_ttyp_index ON token_activities (from_address, transfer_type); +CREATE INDEX ta_to_ttyp_index ON token_activities (to_address, transfer_type); +CREATE INDEX ta_addr_coll_name_pv_index ON token_activities ( + creator_address, + collection_name, + name, + property_version +); +CREATE INDEX ta_tdih_pv_index ON token_activities (token_data_id_hash, property_version); +CREATE INDEX ta_version_index ON token_activities (transaction_version); +CREATE INDEX ta_insat_index ON token_activities (inserted_at); +-- Tracks current pending claims +CREATE TABLE current_token_pending_claims ( + token_data_id_hash VARCHAR(64) NOT NULL, + property_version NUMERIC NOT NULL, + from_address VARCHAR(66) NOT NULL, + to_address VARCHAR(66) NOT NULL, + collection_data_id_hash VARCHAR(64) NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + -- 0 means either claimed or canceled + amount NUMERIC NOT NULL, + table_handle VARCHAR(66) NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + -- This is basically the token offer id + token_data_id_hash, + property_version, + from_address, + to_address + ) +); +CREATE INDEX ctpc_th_index ON current_token_pending_claims (table_handle); +CREATE INDEX ctpc_from_am_index ON current_token_pending_claims (from_address, amount); +CREATE INDEX ctpc_to_am_index ON current_token_pending_claims (to_address, amount); +CREATE INDEX ctpc_insat_index ON current_token_pending_claims (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql new file mode 100644 index 000000000..c1cd8feec --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE collection_datas +DROP COLUMN table_handle; +ALTER TABLE current_collection_datas +DROP COLUMN table_handle; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql new file mode 100644 index 000000000..a2c110c13 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-02-011015_add_table_handle_to_collection/up.sql @@ -0,0 +1,6 @@ +-- Your SQL goes here +ALTER TABLE collection_datas +ADD COLUMN table_handle VARCHAR(66) NOT NULL; +ALTER TABLE current_collection_datas +ADD COLUMN table_handle VARCHAR(66) NOT NULL; +CREATE INDEX curr_cd_th_index ON current_collection_datas (table_handle); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/down.sql new file mode 100644 index 000000000..ac543b764 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_ans_lookup; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/up.sql new file mode 100644 index 000000000..37520b8b9 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-02-011020_ans_lookup_table/up.sql @@ -0,0 +1,29 @@ +-- Your SQL goes here +-- add indices for current ownership to speed up queries +CREATE INDEX curr_to_oa_tt_am_ltv_index ON current_token_ownerships ( + owner_address, + table_type, + amount, + last_transaction_version DESC +); +CREATE INDEX curr_to_oa_tt_ltv_index ON current_token_ownerships ( + owner_address, + table_type, + last_transaction_version DESC +); +-- allows quick lookup for aptos name services registered address +CREATE TABLE current_ans_lookup ( + domain VARCHAR(64) NOT NULL, + -- if subdomain is null set to empty string + subdomain VARCHAR(64) NOT NULL, + registered_address VARCHAR(66), + expiration_timestamp TIMESTAMP NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (domain, subdomain) +); +CREATE INDEX ans_et_index ON current_ans_lookup (expiration_timestamp); +CREATE INDEX ans_ra_et_index ON current_ans_lookup (registered_address, expiration_timestamp); +CREATE INDEX ans_d_s_et_index ON current_ans_lookup (domain, subdomain, expiration_timestamp); +CREATE INDEX ans_insat_index ON current_ans_lookup (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/down.sql new file mode 100644 index 000000000..2aadda38b --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/down.sql @@ -0,0 +1,24 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS coin_infos; +DROP TABLE IF EXISTS coin_balances; +DROP TABLE IF EXISTS current_coin_balances; +DROP TABLE IF EXISTS coin_activities; +ALTER TABLE token_activities +DROP COLUMN IF EXISTS transaction_timestamp; +ALTER TABLE current_token_pending_claims +DROP COLUMN IF EXISTS last_transaction_timestamp; +ALTER TABLE current_token_ownerships +DROP COLUMN IF EXISTS last_transaction_timestamp; +ALTER TABLE current_token_datas +DROP COLUMN IF EXISTS last_transaction_timestamp; +ALTER TABLE current_collection_datas +DROP COLUMN IF EXISTS last_transaction_timestamp; +ALTER TABLE tokens +DROP COLUMN IF EXISTS transaction_timestamp; +ALTER TABLE token_ownerships +DROP COLUMN IF EXISTS transaction_timestamp; +ALTER TABLE token_datas +DROP COLUMN IF EXISTS transaction_timestamp; +ALTER TABLE collection_datas +DROP COLUMN IF EXISTS transaction_timestamp; +DROP VIEW IF EXISTS move_resources_view; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/up.sql new file mode 100644 index 000000000..1b21945d4 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-04-073529_add_coin_tables/up.sql @@ -0,0 +1,120 @@ +-- Your SQL goes here +CREATE VIEW move_resources_view AS +SELECT transaction_version, + write_set_change_index, + transaction_block_height, + name, + address, + "type", + "module", + generic_type_params, + data#>>'{}' as json_data, + is_deleted, + inserted_at +FROM move_resources; +-- adding timestamp to all token tables +ALTER TABLE token_activities +ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE current_token_pending_claims +ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE current_token_ownerships +ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE current_token_datas +ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE current_collection_datas +ADD COLUMN last_transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE tokens +ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE token_ownerships +ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE token_datas +ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; +ALTER TABLE collection_datas +ADD COLUMN transaction_timestamp TIMESTAMP NOT NULL; +-- coin infos. Only first transaction matters +CREATE TABLE coin_infos ( + -- Hash of the non-truncated coin type + coin_type_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL, + -- creator_address::name::symbol + coin_type VARCHAR(5000) NOT NULL, + -- transaction version where coin info was first defined + transaction_version_created BIGINT NOT NULL, + creator_address VARCHAR(66) NOT NULL, + name VARCHAR(32) NOT NULL, + symbol VARCHAR(10) NOT NULL, + decimals INT NOT NULL, + transaction_created_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX ci_ct_index on coin_infos (coin_type); +CREATE INDEX ci_ca_name_symbol_index on coin_infos (creator_address, name, symbol); +CREATE INDEX ci_insat_index ON coin_infos (inserted_at); +-- current coin owned by user +CREATE TABLE coin_balances ( + transaction_version BIGINT NOT NULL, + owner_address VARCHAR(66) NOT NULL, + -- Hash of the non-truncated coin type + coin_type_hash VARCHAR(64) NOT NULL, + -- creator_address::name::symbol + coin_type VARCHAR(5000) NOT NULL, + amount NUMERIC NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + transaction_version, + owner_address, + coin_type_hash + ) +); +CREATE INDEX cb_tv_oa_ct_index on coin_balances (transaction_version, owner_address, coin_type); +CREATE INDEX cb_oa_ct_index on coin_balances (owner_address, coin_type); +CREATE INDEX cb_ct_a_index on coin_balances (coin_type, amount); +CREATE INDEX cb_insat_index ON coin_balances (inserted_at); +-- current coin owned by user +CREATE TABLE current_coin_balances ( + owner_address VARCHAR(66) NOT NULL, + -- Hash of the non-truncated coin type + coin_type_hash VARCHAR(64) NOT NULL, + -- creator_address::name::symbol + coin_type VARCHAR(5000) NOT NULL, + amount NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (owner_address, coin_type_hash) +); +CREATE INDEX ccb_oa_ct_index on current_coin_balances (owner_address, coin_type); +CREATE INDEX ccb_ct_a_index on current_coin_balances (coin_type, amount); +CREATE INDEX ccb_insat_index on current_coin_balances (inserted_at); +-- coinstore activities (send, receive, gas fees). Mint/burn not supported because event missing +CREATE TABLE coin_activities ( + transaction_version BIGINT NOT NULL, + event_account_address VARCHAR(66) NOT NULL, + event_creation_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + owner_address VARCHAR(66) NOT NULL, + -- creator_address::name::symbol + coin_type VARCHAR(5000) NOT NULL, + amount NUMERIC NOT NULL, + activity_type VARCHAR(200) NOT NULL, + is_gas_fee BOOLEAN NOT NULL, + is_transaction_success BOOLEAN NOT NULL, + entry_function_id_str VARCHAR(100), + block_height BIGINT NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + transaction_version, + event_account_address, + event_creation_number, + event_sequence_number + ) +); +CREATE INDEX ca_oa_ct_at_index on coin_activities (owner_address, coin_type, activity_type, amount); +CREATE INDEX ca_oa_igf_index on coin_activities (owner_address, is_gas_fee); +CREATE INDEX ca_ct_at_a_index on coin_activities (coin_type, activity_type, amount); +CREATE INDEX ca_ct_a_index on coin_activities (coin_type, amount); +CREATE INDEX ca_insat_index on coin_activities (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/down.sql new file mode 100644 index 000000000..145a770fc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS indexer_status; +DROP VIEW IF EXISTS events_view; +DROP VIEW IF EXISTS table_items_view; +DROP VIEW IF EXISTS transactions_view; diff --git a/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/up.sql new file mode 100644 index 000000000..d0a1460fe --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-06-193846_add_indexer_status/up.sql @@ -0,0 +1,48 @@ +-- Your SQL goes here +-- manually toggle indexer status on/off +CREATE TABLE indexer_status ( + db VARCHAR(50) UNIQUE PRIMARY KEY NOT NULL, + is_indexer_up BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +-- Create event view to avoid large jsonb +CREATE VIEW events_view AS +SELECT sequence_number, + creation_number, + account_address, + transaction_version, + transaction_block_height, + "type", + "data"#>>'{}' AS json_data, + inserted_at +FROM events; +-- Create table_items view to avoid large jsonb +CREATE VIEW table_items_view AS +SELECT "key", + transaction_version, + write_set_change_index, + transaction_block_height, + table_handle, + decoded_key#>>'{}' AS json_decoded_key, + decoded_value#>>'{}' AS json_decoded_value, + is_deleted, + inserted_at +FROM table_items; +-- Create transactions view to avoid large jsonb +CREATE VIEW transactions_view AS +SELECT "version", + block_height, + "hash", + "type", + payload#>>'{}' AS json_payload, + state_change_hash, + event_root_hash, + state_checkpoint_hash, + gas_used, + success, + vm_status, + accumulator_root_hash, + num_events, + num_write_set_changes, + inserted_at +FROM transactions; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/down.sql new file mode 100644 index 000000000..453b6cdb4 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/down.sql @@ -0,0 +1,12 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS coin_supply; +DROP INDEX IF EXISTS cs_ct_tv_index; +DROP INDEX IF EXISTS cs_epoch_index; +ALTER TABLE coin_infos DROP COLUMN IF EXISTS supply_aggregator_table_handle, + DROP COLUMN IF EXISTS supply_aggregator_table_key; +ALTER TABLE token_datas DROP COLUMN IF EXISTS description; +ALTER TABLE current_token_datas DROP COLUMN IF EXISTS description; +ALTER TABLE user_transactions DROP COLUMN IF EXISTS epoch; +ALTER TABLE transactions DROP COLUMN IF EXISTS epoch; +DROP INDEX IF EXISTS ut_epoch_index; +DROP INDEX IF EXISTS txn_epoch_index; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/up.sql new file mode 100644 index 000000000..ba200264e --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-07-231825_add_coin_supply/up.sql @@ -0,0 +1,33 @@ +-- Your SQL goes here +-- coin supply, currently aptos coin only +CREATE TABLE coin_supply ( + transaction_version BIGINT NOT NULL, + -- Hash of the non-truncated coin type + coin_type_hash VARCHAR(64) NOT NULL, + coin_type VARCHAR(5000) NOT NULL, + supply NUMERIC NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + transaction_epoch BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, coin_type_hash) +); +CREATE INDEX cs_ct_tv_index on coin_supply (coin_type, transaction_version desc); +CREATE INDEX cs_epoch_index on coin_supply (transaction_epoch); +-- Add coin supply aggregator handle to coin infos to be able to access total supply data +ALTER TABLE coin_infos +ADD COLUMN supply_aggregator_table_handle VARCHAR(66), + ADD COLUMN supply_aggregator_table_key TEXT; +-- Add description to token_datas and current_token_datas +ALTER TABLE token_datas +ADD COLUMN description TEXT NOT NULL; +ALTER TABLE current_token_datas +ADD COLUMN description TEXT NOT NULL; +-- Add epoch to user transactions and transactions +ALTER TABLE user_transactions +ADD COLUMN epoch BIGINT NOT NULL; +ALTER TABLE transactions +ADD COLUMN epoch BIGINT NOT NULL; +-- Create index on epoch for easy queries +CREATE INDEX ut_epoch_index ON user_transactions (epoch); +CREATE INDEX txn_epoch_index ON transactions (epoch); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/down.sql new file mode 100644 index 000000000..05524e94c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS processor_status; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/up.sql new file mode 100644 index 000000000..0ad64122d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-15-185912_improve_processor_recovery/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here +-- Tracks latest processed version per processor +CREATE TABLE processor_status ( + processor VARCHAR(50) UNIQUE PRIMARY KEY NOT NULL, + last_success_version BIGINT NOT NULL, + last_updated TIMESTAMP NOT NULL DEFAULT NOW() +); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/down.sql new file mode 100644 index 000000000..cb3805945 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_staking_pool_voter; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/up.sql new file mode 100644 index 000000000..7a44aa574 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-21-055518_stake_to_voter/up.sql @@ -0,0 +1,11 @@ +-- Your SQL goes here +-- allows quick lookup of staking pool address to voter address and vice versa. Each staking pool +-- can only be mapped to one voter address at a time. +CREATE TABLE current_staking_pool_voter ( + staking_pool_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + voter_address VARCHAR(66) NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX ctpv_va_index ON current_staking_pool_voter (voter_address); +CREATE INDEX ctpv_insat_index ON current_staking_pool_voter (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/down.sql b/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/down.sql new file mode 100644 index 000000000..929411b92 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/down.sql @@ -0,0 +1,8 @@ +-- This file should undo anything in `up.sql` +DROP INDEX IF EXISTS ans_tn_index; +ALTER TABLE current_ans_lookup DROP COLUMN IF EXISTS token_name; +DROP INDEX IF EXISTS pv_pi_va_index; +DROP INDEX IF EXISTS pv_va_index; +DROP INDEX IF EXISTS pv_spa_index; +DROP INDEX IF EXISTS pv_ia_index; +DROP TABLE IF EXISTS proposal_votes; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/up.sql b/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/up.sql new file mode 100644 index 000000000..2f13904c1 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-10-30-053525_add_vote_data/up.sql @@ -0,0 +1,22 @@ +-- Your SQL goes here +-- Add token_name to join with token tables, {subdomain}.{domain}.apt +ALTER TABLE current_ans_lookup +ADD COLUMN token_name VARCHAR(140) NOT NULL DEFAULT ''; +CREATE INDEX ans_tn_index ON current_ans_lookup (token_name); +-- Add voting table +CREATE TABLE proposal_votes ( + transaction_version BIGINT NOT NULL, + proposal_id BIGINT NOT NULL, + voter_address VARCHAR(66) NOT NULL, + staking_pool_address VARCHAR(66) NOT NULL, + num_votes NUMERIC NOT NULL, + should_pass BOOLEAN NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, proposal_id, voter_address) +); +CREATE INDEX pv_pi_va_index ON proposal_votes (proposal_id, voter_address); +CREATE INDEX pv_va_index ON proposal_votes (voter_address); +CREATE INDEX pv_spa_index ON proposal_votes (staking_pool_address); +CREATE INDEX pv_ia_index ON proposal_votes (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/down.sql b/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/down.sql new file mode 100644 index 000000000..459971ea0 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_table_items_view; +DROP INDEX IF EXISTS cti_insat_index; +DROP TABLE IF EXISTS current_table_items; +ALTER TABLE events DROP COLUMN IF EXISTS event_index; +ALTER TABLE token_activities DROP COLUMN IF EXISTS event_index; +ALTER TABLE coin_activities DROP COLUMN IF EXISTS event_index; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/up.sql b/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/up.sql new file mode 100644 index 000000000..b4c7de689 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2022-12-29-222902_curr_table_items/up.sql @@ -0,0 +1,35 @@ +-- Your SQL goes here +CREATE TABLE current_table_items ( + table_handle VARCHAR(66) NOT NULL, + -- Hash of the key for pk since key is unbounded + key_hash VARCHAR(64) NOT NULL, + key text NOT NULL, + decoded_key jsonb NOT NULL, + decoded_value jsonb, + is_deleted BOOLEAN NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + table_handle, + key_hash + ) +); +CREATE INDEX cti_insat_index ON current_table_items (inserted_at); +-- Create view to avoid large jsonb in bigquery +CREATE VIEW current_table_items_view AS +SELECT "key", + table_handle, + key_hash, + decoded_key#>>'{}' AS json_decoded_key, + decoded_value#>>'{}' AS json_decoded_value, + is_deleted, + last_transaction_version, + inserted_at +FROM current_table_items; +ALTER TABLE events +ADD COLUMN event_index BIGINT; +ALTER TABLE token_activities +ADD COLUMN event_index BIGINT; +ALTER TABLE coin_activities +ADD COLUMN event_index BIGINT; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/down.sql b/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/down.sql new file mode 100644 index 000000000..8e9f8b0bc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/down.sql @@ -0,0 +1,6 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS delegated_staking_activities; +DROP INDEX IF EXISTS dsa_pa_da_index; +DROP INDEX IF EXISTS dsa_insat_index; +DROP TABLE IF EXISTS current_delegator_balances; +DROP INDEX IF EXISTS cdb_insat_index; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/up.sql b/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/up.sql new file mode 100644 index 000000000..0a487d9bf --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-02-15-070116_stake_delegation/up.sql @@ -0,0 +1,33 @@ +-- Your SQL goes here +-- get delegated staking events such as withdraw , unlock, add stake, etc. +CREATE TABLE delegated_staking_activities ( + transaction_version BIGINT NOT NULL, + event_index BIGINT NOT NULL, + delegator_address VARCHAR(66) NOT NULL, + pool_address VARCHAR(66) NOT NULL, + event_type text NOT NULL, + amount NUMERIC NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, event_index) +); +CREATE INDEX dsa_pa_da_index ON delegated_staking_activities ( + pool_address, + delegator_address, + transaction_version asc, + event_index asc +); +CREATE INDEX dsa_insat_index ON delegated_staking_activities (inserted_at); +-- estimates how much delegator has staked in a pool (currently supports active only) +CREATE TABLE current_delegator_balances ( + delegator_address VARCHAR(66) NOT NULL, + pool_address VARCHAR(66) NOT NULL, + pool_type VARCHAR(100) NOT NULL, + table_handle VARCHAR(66) NOT NULL, + amount NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (delegator_address, pool_address, pool_type) +); +CREATE INDEX cdb_insat_index ON current_delegator_balances (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/down.sql b/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/down.sql new file mode 100644 index 000000000..5a0b650f1 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql`\ +DROP TABLE IF EXISTS nft_points; +DROP INDEX IF EXISTS np_oa_idx; +DROP INDEX IF EXISTS np_tt_oa_idx; +DROP INDEX IF EXISTS np_insat_idx; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/up.sql b/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/up.sql new file mode 100644 index 000000000..5a458b847 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-03-08-205402_nft_points/up.sql @@ -0,0 +1,13 @@ +-- Your SQL goes here +CREATE TABLE nft_points ( + transaction_version BIGINT UNIQUE PRIMARY KEY NOT NULL, + owner_address VARCHAR(66) NOT NULL, + token_name TEXT NOT NULL, + point_type TEXT NOT NULL, + amount NUMERIC NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX np_oa_idx ON nft_points (owner_address); +CREATE INDEX np_tt_oa_idx ON nft_points (transaction_timestamp, owner_address); +CREATE INDEX np_insat_idx ON nft_points (inserted_at); diff --git a/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/down.sql new file mode 100644 index 000000000..271dec8e6 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/down.sql @@ -0,0 +1,6 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS delegated_staking_pools; +DROP INDEX IF EXISTS dsp_oa_index; +DROP INDEX IF EXISTS dsp_insat_index; +ALTER TABLE current_staking_pool_voter +DROP COLUMN IF EXISTS operator_address; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/up.sql new file mode 100644 index 000000000..9fca47c1f --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-02-032121_delegator_pools/up.sql @@ -0,0 +1,9 @@ +-- Your SQL goes here +CREATE TABLE IF NOT EXISTS delegated_staking_pools ( + staking_pool_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + first_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX dsp_insat_index ON delegated_staking_pools (inserted_at); +ALTER TABLE current_staking_pool_voter +ADD COLUMN IF NOT EXISTS operator_address VARCHAR(66) NOT NULL; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/down.sql new file mode 100644 index 000000000..fad4eba1a --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/down.sql @@ -0,0 +1,6 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS address_version_from_events; +DROP VIEW IF EXISTS address_version_from_move_resources; +DROP VIEW IF EXISTS current_collection_ownership_view; +DROP VIEW IF EXISTS num_active_delegator_per_pool; +DROP INDEX IF EXISTS curr_to_collection_hash_owner_index; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/up.sql new file mode 100644 index 000000000..286edb5e6 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-14-033932_optimize_queries/up.sql @@ -0,0 +1,38 @@ +-- Your SQL goes here +-- need this to query transactions that touch an account's events +CREATE OR REPLACE VIEW address_version_from_events AS +SELECT account_address, + transaction_version +FROM events +GROUP BY 1, + 2; +-- need this to query transactions that touch an account's move resources +CREATE OR REPLACE VIEW address_version_from_move_resources AS +SELECT address, + transaction_version +FROM move_resources +GROUP BY 1, + 2; +-- need this for getting NFTs grouped by collections +CREATE OR REPLACE VIEW current_collection_ownership_view AS +SELECT owner_address, + creator_address, + collection_name, + collection_data_id_hash, + MAX(last_transaction_version) AS last_transaction_version, + COUNT(DISTINCT name) AS distinct_tokens +FROM current_token_ownerships +WHERE amount > 0 +GROUP BY 1, + 2, + 3, + 4; +-- need this for delegation staking +CREATE OR REPLACE VIEW num_active_delegator_per_pool AS +SELECT pool_address, + COUNT(DISTINCT delegator_address) AS num_active_delegator +FROM current_delegator_balances +WHERE amount > 0 +GROUP BY 1; +-- indices +CREATE INDEX IF NOT EXISTS curr_to_collection_hash_owner_index ON current_token_ownerships (collection_data_id_hash, owner_address); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/down.sql new file mode 100644 index 000000000..6160df2ba --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/down.sql @@ -0,0 +1,14 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS delegated_staking_pool_balances; +DROP TABLE IF EXISTS current_delegated_staking_pool_balances; +DROP INDEX IF EXISTS dspb_insat_index; +ALTER TABLE current_delegator_balances +ADD COLUMN IF NOT EXISTS amount NUMERIC NOT NULL DEFAULT 0; +-- need this for delegation staking, changing to amount +CREATE OR REPLACE VIEW num_active_delegator_per_pool AS +SELECT pool_address, + COUNT(DISTINCT delegator_address) AS num_active_delegator +FROM current_delegator_balances +WHERE amount > 0 +GROUP BY 1; +ALTER TABLE current_delegator_balances DROP COLUMN IF EXISTS shares; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/up.sql new file mode 100644 index 000000000..13d7ae079 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-27-233343_delegation_pool_balances/up.sql @@ -0,0 +1,29 @@ +-- Your SQL goes here +CREATE TABLE IF NOT EXISTS delegated_staking_pool_balances ( + transaction_version BIGINT NOT NULL, + staking_pool_address VARCHAR(66) NOT NULL, + total_coins NUMERIC NOT NULL, + total_shares NUMERIC NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, staking_pool_address) +); +CREATE INDEX dspb_insat_index ON delegated_staking_pool_balances (inserted_at); +CREATE TABLE IF NOT EXISTS current_delegated_staking_pool_balances ( + staking_pool_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + total_coins NUMERIC NOT NULL, + total_shares NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX cdspb_insat_index ON current_delegated_staking_pool_balances (inserted_at); +ALTER TABLE current_delegator_balances +ADD COLUMN IF NOT EXISTS shares NUMERIC NOT NULL; +-- need this for delegation staking, changing to shares +CREATE OR REPLACE VIEW num_active_delegator_per_pool AS +SELECT pool_address, + COUNT(DISTINCT delegator_address) AS num_active_delegator +FROM current_delegator_balances +WHERE shares > 0 +GROUP BY 1; +ALTER TABLE current_delegator_balances DROP COLUMN IF EXISTS amount; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/down.sql new file mode 100644 index 000000000..bce48fd6a --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/down.sql @@ -0,0 +1,36 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS objects; +DROP INDEX IF EXISTS o_owner_idx; +DROP INDEX IF EXISTS o_object_skh_idx; +DROP INDEX IF EXISTS o_skh_idx; +DROP INDEX IF EXISTS o_insat_idx; +DROP TABLE IF EXISTS current_objects; +DROP INDEX IF EXISTS co_owner_idx; +DROP INDEX IF EXISTS co_object_skh_idx; +DROP INDEX IF EXISTS co_skh_idx; +DROP INDEX IF EXISTS co_insat_idx; +ALTER TABLE move_resources DROP COLUMN IF EXISTS state_key_hash; +DROP TABLE IF EXISTS token_ownerships_v2; +DROP INDEX IF EXISTS to2_id_index; +DROP INDEX IF EXISTS to2_owner_index; +DROP INDEX IF EXISTS to2_insat_index; +DROP TABLE IF EXISTS current_token_ownerships_v2; +DROP INDEX IF EXISTS curr_to2_owner_index; +DROP INDEX IF EXISTS curr_to2_wa_index; +DROP INDEX IF EXISTS curr_to2_insat_index; +DROP TABLE IF EXISTS collections_v2; +DROP INDEX IF EXISTS col2_id_index; +DROP INDEX IF EXISTS col2_crea_cn_index; +DROP INDEX IF EXISTS col2_insat_index; +DROP TABLE IF EXISTS current_collections_v2; +DROP INDEX IF EXISTS cur_col2_crea_cn_index; +DROP INDEX IF EXISTS cur_col2_insat_index; +DROP TABLE IF EXISTS token_datas_v2; +DROP INDEX IF EXISTS td2_id_index; +DROP INDEX IF EXISTS td2_cid_name_index; +DROP INDEX IF EXISTS td2_insat_index; +DROP TABLE IF EXISTS current_token_datas_v2; +DROP INDEX IF EXISTS cur_td2_cid_name_index; +DROP INDEX IF EXISTS cur_td2_insat_index; +ALTER TABLE current_token_pending_claims DROP COLUMN IF EXISTS token_data_id; +ALTER TABLE current_token_pending_claims DROP COLUMN IF EXISTS collection_id; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/up.sql new file mode 100644 index 000000000..bf8f5f4d7 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-04-28-053048_object_token_v2/up.sql @@ -0,0 +1,170 @@ +-- Your SQL goes here +-- objects, basically normalizing ObjectCore +CREATE TABLE IF NOT EXISTS objects ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + object_address VARCHAR(66) NOT NULL, + owner_address VARCHAR(66), + state_key_hash VARCHAR(66) NOT NULL, + guid_creation_num NUMERIC, + allow_ungated_transfer BOOLEAN, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- constraints + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS o_owner_idx ON objects (owner_address); +CREATE INDEX IF NOT EXISTS o_object_skh_idx ON objects (object_address, state_key_hash); +CREATE INDEX IF NOT EXISTS o_skh_idx ON objects (state_key_hash); +CREATE INDEX IF NOT EXISTS o_insat_idx ON objects (inserted_at); +-- latest instance of objects +CREATE TABLE IF NOT EXISTS current_objects ( + object_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + owner_address VARCHAR(66) NOT NULL, + state_key_hash VARCHAR(66) NOT NULL, + allow_ungated_transfer BOOLEAN NOT NULL, + last_guid_creation_num NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS co_owner_idx ON current_objects (owner_address); +CREATE INDEX IF NOT EXISTS co_object_skh_idx ON current_objects (object_address, state_key_hash); +CREATE INDEX IF NOT EXISTS co_skh_idx ON current_objects (state_key_hash); +CREATE INDEX IF NOT EXISTS co_insat_idx ON current_objects (inserted_at); +-- Add this so that we can find resource groups by their state_key_hash +ALTER TABLE move_resources +ADD COLUMN IF NOT EXISTS state_key_hash VARCHAR(66) NOT NULL DEFAULT ''; +-- NFT stuff +-- tracks who owns tokens +CREATE TABLE IF NOT EXISTS token_ownerships_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + token_data_id VARCHAR(66) NOT NULL, + property_version_v1 NUMERIC NOT NULL, + owner_address VARCHAR(66), + storage_id VARCHAR(66) NOT NULL, + amount NUMERIC NOT NULL, + table_type_v1 VARCHAR(66), + token_properties_mutated_v1 JSONB, + is_soulbound_v2 BOOLEAN, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS to2_id_index ON token_ownerships_v2 (token_data_id); +CREATE INDEX IF NOT EXISTS to2_owner_index ON token_ownerships_v2 (owner_address); +CREATE INDEX IF NOT EXISTS to2_insat_index ON token_ownerships_v2 (inserted_at); +CREATE TABLE IF NOT EXISTS current_token_ownerships_v2 ( + token_data_id VARCHAR(66) NOT NULL, + property_version_v1 NUMERIC NOT NULL, + owner_address VARCHAR(66) NOT NULL, + storage_id VARCHAR(66) NOT NULL, + amount NUMERIC NOT NULL, + table_type_v1 VARCHAR(66), + token_properties_mutated_v1 JSONB, + is_soulbound_v2 BOOLEAN, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY ( + token_data_id, + property_version_v1, + owner_address, + storage_id + ) +); +CREATE INDEX IF NOT EXISTS curr_to2_owner_index ON current_token_ownerships_v2 (owner_address); +CREATE INDEX IF NOT EXISTS curr_to2_wa_index ON current_token_ownerships_v2 (storage_id); +CREATE INDEX IF NOT EXISTS curr_to2_insat_index ON current_token_ownerships_v2 (inserted_at); +-- tracks collections +CREATE TABLE IF NOT EXISTS collections_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + collection_id VARCHAR(66) NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + description TEXT NOT NULL, + uri VARCHAR(512) NOT NULL, + current_supply NUMERIC NOT NULL, + max_supply NUMERIC, + total_minted_v2 NUMERIC, + mutable_description BOOLEAN, + mutable_uri BOOLEAN, + table_handle_v1 VARCHAR(66), + token_standard VARCHAR(10) NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS col2_id_index ON collections_v2 (collection_id); +CREATE INDEX IF NOT EXISTS col2_crea_cn_index ON collections_v2 (creator_address, collection_name); +CREATE INDEX IF NOT EXISTS col2_insat_index ON collections_v2 (inserted_at); +CREATE TABLE IF NOT EXISTS current_collections_v2 ( + collection_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + description TEXT NOT NULL, + uri VARCHAR(512) NOT NULL, + current_supply NUMERIC NOT NULL, + max_supply NUMERIC, + total_minted_v2 NUMERIC, + mutable_description BOOLEAN, + mutable_uri BOOLEAN, + table_handle_v1 VARCHAR(66), + token_standard VARCHAR(10) NOT NULL, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cur_col2_crea_cn_index ON current_collections_v2 (creator_address, collection_name); +CREATE INDEX IF NOT EXISTS cur_col2_insat_index ON current_collections_v2 (inserted_at); +-- tracks token metadata +CREATE TABLE IF NOT EXISTS token_datas_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + token_data_id VARCHAR(66) NOT NULL, + collection_id VARCHAR(66) NOT NULL, + token_name VARCHAR(128) NOT NULL, + maximum NUMERIC, + supply NUMERIC NOT NULL, + largest_property_version_v1 NUMERIC, + token_uri VARCHAR(512) NOT NULL, + token_properties JSONB NOT NULL, + description TEXT NOT NULL, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS td2_id_index ON token_datas_v2 (token_data_id); +CREATE INDEX IF NOT EXISTS td2_cid_name_index ON token_datas_v2 (collection_id, token_name); +CREATE INDEX IF NOT EXISTS td2_insat_index ON token_datas_v2 (inserted_at); +CREATE TABLE IF NOT EXISTS current_token_datas_v2 ( + token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + collection_id VARCHAR(66) NOT NULL, + token_name VARCHAR(128) NOT NULL, + maximum NUMERIC, + supply NUMERIC NOT NULL, + largest_property_version_v1 NUMERIC, + token_uri VARCHAR(512) NOT NULL, + description TEXT NOT NULL, + token_properties JSONB NOT NULL, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cur_td2_cid_name_index ON current_token_datas_v2 (collection_id, token_name); +CREATE INDEX IF NOT EXISTS cur_td2_insat_index ON current_token_datas_v2 (inserted_at); +-- Add ID (with 0x prefix) +ALTER TABLE current_token_pending_claims +ADD COLUMN IF NOT EXISTS token_data_id VARCHAR(66) NOT NULL DEFAULT ''; +ALTER TABLE current_token_pending_claims +ADD COLUMN IF NOT EXISTS collection_id VARCHAR(66) NOT NULL DEFAULT ''; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/down.sql new file mode 100644 index 000000000..eac13ae1d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/down.sql @@ -0,0 +1,8 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS token_activities_v2; +DROP INDEX IF EXISTS ta2_owner_type_index; +DROP INDEX IF EXISTS ta2_from_type_index; +DROP INDEX IF EXISTS ta2_to_type_index; +DROP INDEX IF EXISTS ta2_tid_index; +DROP INDEX IF EXISTS ta2_cid_index; +DROP INDEX IF EXISTS ta2_insat_index; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/up.sql new file mode 100644 index 000000000..6a753b82f --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-05-17-010107_activities_v2/up.sql @@ -0,0 +1,25 @@ +-- Your SQL goes here +CREATE TABLE IF NOT EXISTS token_activities_v2 ( + transaction_version BIGINT NOT NULL, + event_index BIGINT NOT NULL, + event_account_address VARCHAR(66) NOT NULL, + token_data_id VARCHAR(66) NOT NULL, + property_version_v1 NUMERIC NOT NULL, + type VARCHAR(50) NOT NULL, + from_address VARCHAR(66), + to_address VARCHAR(66), + token_amount NUMERIC NOT NULL, + before_value TEXT, + after_value TEXT, + entry_function_id_str VARCHAR(100), + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, event_index) +); +CREATE INDEX IF NOT EXISTS ta2_owner_type_index ON token_activities_v2 (event_account_address, type); +CREATE INDEX IF NOT EXISTS ta2_from_type_index ON token_activities_v2 (from_address, type); +CREATE INDEX IF NOT EXISTS ta2_to_type_index ON token_activities_v2 (to_address, type); +CREATE INDEX IF NOT EXISTS ta2_tid_index ON token_activities_v2 (token_data_id); +CREATE INDEX IF NOT EXISTS ta2_insat_index ON token_activities_v2 (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql b/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql new file mode 100644 index 000000000..4337e764c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/down.sql @@ -0,0 +1,24 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE current_delegated_staking_pool_balances DROP COLUMN IF EXISTS operator_commission_percentage, + DROP COLUMN IF EXISTS inactive_table_handle, + DROP COLUMN IF EXISTS active_table_handle; +DROP INDEX IF EXISTS cdspb_inactive_index; +ALTER TABLE delegated_staking_pool_balances DROP COLUMN IF EXISTS operator_commission_percentage, + DROP COLUMN IF EXISTS inactive_table_handle, + DROP COLUMN IF EXISTS active_table_handle; +ALTER TABLE current_delegator_balances DROP COLUMN IF EXISTS parent_table_handle; +ALTER TABLE current_delegator_balances DROP CONSTRAINT current_delegator_balances_pkey; +ALTER TABLE current_delegator_balances +ADD CONSTRAINT current_delegator_balances_pkey PRIMARY KEY ( + delegator_address, + pool_address, + pool_type + ); +CREATE OR REPLACE VIEW num_active_delegator_per_pool AS +SELECT pool_address, + COUNT(DISTINCT delegator_address) AS num_active_delegator +FROM current_delegator_balances +WHERE shares > 0 +GROUP BY 1; +DROP VIEW IF EXISTS delegator_distinct_pool; +DROP VIEW IF EXISTS address_events_summary; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql b/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql new file mode 100644 index 000000000..4b5c32ad9 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-05-22-234344_delegated_staking_improvements/up.sql @@ -0,0 +1,46 @@ +-- Your SQL goes here +-- adding new fields to staking pool balances for display and handling inactive pools +ALTER TABLE current_delegated_staking_pool_balances +ADD COLUMN IF NOT EXISTS operator_commission_percentage NUMERIC NOT NULL, + ADD COLUMN IF NOT EXISTS inactive_table_handle VARCHAR(66) NOT NULL, + ADD COLUMN IF NOT EXISTS active_table_handle VARCHAR(66) NOT NULL; +CREATE INDEX IF NOT EXISTS cdspb_inactive_index ON current_delegated_staking_pool_balances (inactive_table_handle); +-- adding new fields to staking pool balances for display and handling inactive pools +ALTER TABLE delegated_staking_pool_balances +ADD COLUMN IF NOT EXISTS operator_commission_percentage NUMERIC NOT NULL, + ADD COLUMN IF NOT EXISTS inactive_table_handle VARCHAR(66) NOT NULL, + ADD COLUMN IF NOT EXISTS active_table_handle VARCHAR(66) NOT NULL; +-- add new field to composite primary key because technically a user could have inactive pools +ALTER TABLE current_delegator_balances +ADD COLUMN IF NOT EXISTS parent_table_handle VARCHAR(66) NOT NULL; +ALTER TABLE current_delegator_balances DROP CONSTRAINT current_delegator_balances_pkey; +ALTER TABLE current_delegator_balances +ADD CONSTRAINT current_delegator_balances_pkey PRIMARY KEY ( + delegator_address, + pool_address, + pool_type, + table_handle + ); +-- need this for delegation staking +CREATE OR REPLACE VIEW num_active_delegator_per_pool AS +SELECT pool_address, + COUNT(DISTINCT delegator_address) AS num_active_delegator +FROM current_delegator_balances +WHERE shares > 0 + AND pool_type = 'active_shares' +GROUP BY 1; +-- need this for delegation staking +CREATE OR REPLACE VIEW delegator_distinct_pool AS +SELECT delegator_address, + pool_address +FROM current_delegator_balances +WHERE shares > 0 +GROUP BY 1, + 2; +-- new query for wallet +CREATE OR REPLACE VIEW address_events_summary AS +SELECT account_address, + min(transaction_block_height) AS min_block_height, + count(DISTINCT transaction_version) AS num_distinct_versions +FROM events +GROUP BY 1 \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/down.sql new file mode 100644 index 000000000..759d57bc3 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_collection_ownership_v2_view; +DROP TABLE IF EXISTS current_token_v2_metadata; +ALTER TABLE token_datas_v2 DROP COLUMN IF EXISTS decimals; +ALTER TABLE current_token_datas_v2 DROP COLUMN IF EXISTS decimals; +ALTER TABLE token_ownerships_v2 DROP COLUMN IF EXISTS non_transferrable_by_owner; +ALTER TABLE current_token_ownerships_v2 DROP COLUMN IF EXISTS non_transferrable_by_owner; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/up.sql new file mode 100644 index 000000000..9703d8742 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-05-24-052435_token_properties_v2/up.sql @@ -0,0 +1,58 @@ +-- Your SQL goes here +-- need this for getting NFTs grouped by collections +create or replace view current_collection_ownership_v2_view as +select owner_address, + b.collection_id, + MAX(a.last_transaction_version) as last_transaction_version, + COUNT(distinct a.token_data_id) as distinct_tokens +from current_token_ownerships_v2 a + join current_token_datas_v2 b on a.token_data_id = b.token_data_id +where a.amount > 0 +group by 1, + 2; +-- create table for all structs in token object core +CREATE TABLE IF NOT EXISTS current_token_v2_metadata ( + object_address VARCHAR(66) NOT NULL, + resource_type VARCHAR(128) NOT NULL, + data jsonb NOT NULL, + state_key_hash VARCHAR(66) NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- constraints + PRIMARY KEY (object_address, resource_type) +); +-- create table for all structs in token object core +ALTER TABLE token_datas_v2 +ADD COLUMN IF NOT EXISTS decimals BIGINT NOT NULL DEFAULT 0; +ALTER TABLE current_token_datas_v2 +ADD COLUMN IF NOT EXISTS decimals BIGINT NOT NULL DEFAULT 0; +ALTER TABLE token_ownerships_v2 +ADD COLUMN IF NOT EXISTS non_transferrable_by_owner BOOLEAN; +ALTER TABLE current_token_ownerships_v2 +ADD COLUMN IF NOT EXISTS non_transferrable_by_owner BOOLEAN; +-- These are needed b/c for some reason we're getting build errors when setting +-- type field with a length limit +ALTER TABLE signatures +ALTER COLUMN type TYPE VARCHAR; +ALTER TABLE token_activities_v2 +ALTER COLUMN type TYPE VARCHAR; +DROP VIEW IF EXISTS transactions_view; +ALTER TABLE transactions +ALTER COLUMN type TYPE VARCHAR; +CREATE VIEW transactions_view AS +SELECT "version", + block_height, + "hash", + "type", + payload#>>'{}' AS json_payload, + state_change_hash, + event_root_hash, + state_checkpoint_hash, + gas_used, + success, + vm_status, + accumulator_root_hash, + num_events, + num_write_set_changes, + inserted_at +FROM transactions; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/down.sql b/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/down.sql new file mode 100644 index 000000000..81190795c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP INDEX IF EXISTS mr_ver_index; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/up.sql b/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/up.sql new file mode 100644 index 000000000..c40d650f0 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-07-06-042159_minor_optimizations/up.sql @@ -0,0 +1,29 @@ +-- Your SQL goes here +-- This is needed to improve performance when querying an account with a large number of transactions +CREATE INDEX IF NOT EXISTS mr_ver_index ON move_resources(transaction_version DESC); +-- These are needed b/c for some reason we're getting build errors when setting +-- type field with a length limit +ALTER TABLE signatures +ALTER COLUMN type TYPE VARCHAR; +ALTER TABLE token_activities_v2 +ALTER COLUMN type TYPE VARCHAR; +DROP VIEW IF EXISTS transactions_view; +ALTER TABLE transactions +ALTER COLUMN type TYPE VARCHAR; +CREATE VIEW transactions_view AS +SELECT "version", + block_height, + "hash", + "type", + payload#>>'{}' AS json_payload, + state_change_hash, + event_root_hash, + state_checkpoint_hash, + gas_used, + success, + vm_status, + accumulator_root_hash, + num_events, + num_write_set_changes, + inserted_at +FROM transactions; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/down.sql b/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/down.sql new file mode 100644 index 000000000..4f1714472 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/down.sql @@ -0,0 +1,10 @@ +-- This file should undo anything in `up.sql` +DROP INDEX IF EXISTS at_version_index; +DROP INDEX IF EXISTS at_insat_index; +DROP TABLE IF EXISTS account_transactions; +ALTER TABLE objects +ALTER COLUMN owner_address DROP NOT NULL; +ALTER TABLE objects +ALTER COLUMN guid_creation_num DROP NOT NULL; +ALTER TABLE objects +ALTER COLUMN allow_ungated_transfer DROP NOT NULL; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/up.sql b/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/up.sql new file mode 100644 index 000000000..8672514c8 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-07-13-060328_transactions_by_address/up.sql @@ -0,0 +1,20 @@ +-- Your SQL goes here +-- Records transactions - account pairs. Account here can represent +-- user account, resource account, or object account. +CREATE TABLE IF NOT EXISTS account_transactions ( + transaction_version BIGINT NOT NULL, + account_address VARCHAR(66) NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (account_address, transaction_version) +); +CREATE INDEX IF NOT EXISTS at_version_index ON account_transactions (transaction_version DESC); +CREATE INDEX IF NOT EXISTS at_insat_index ON account_transactions (inserted_at); +ALTER TABLE objects +ALTER COLUMN owner_address +SET NOT NULL; +ALTER TABLE objects +ALTER COLUMN guid_creation_num +SET NOT NULL; +ALTER TABLE objects +ALTER COLUMN allow_ungated_transfer +SET NOT NULL; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/down.sql b/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/down.sql new file mode 100644 index 000000000..0b511f7df --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE user_transactions +ALTER COLUMN entry_function_id_str TYPE TEXT; +ALTER TABLE coin_activities +ALTER COLUMN entry_function_id_str TYPE VARCHAR(100); +ALTER TABLE token_activities_v2 +ALTER COLUMN entry_function_id_str TYPE VARCHAR(100); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/up.sql b/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/up.sql new file mode 100644 index 000000000..070933b5a --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-07-28-053854_entry_function/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here +ALTER TABLE user_transactions +ALTER COLUMN entry_function_id_str TYPE VARCHAR(1000); +ALTER TABLE coin_activities +ALTER COLUMN entry_function_id_str TYPE VARCHAR(1000); +ALTER TABLE token_activities_v2 +ALTER COLUMN entry_function_id_str TYPE VARCHAR(1000); diff --git a/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/down.sql new file mode 100644 index 000000000..808d91b09 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/down.sql @@ -0,0 +1,29 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_collection_ownership_v2_view; +create or replace view current_collection_ownership_v2_view as +select owner_address, + b.collection_id, + MAX(a.last_transaction_version) as last_transaction_version, + COUNT(distinct a.token_data_id) as distinct_tokens +from current_token_ownerships_v2 a + join current_token_datas_v2 b on a.token_data_id = b.token_data_id +where a.amount > 0 +group by 1, + 2; +DROP INDEX IF EXISTS faa_owner_type_index; +DROP INDEX IF EXISTS faa_si_index; +DROP INDEX IF EXISTS faa_at_index; +DROP INDEX IF EXISTS faa_gfpa_index; +DROP INDEX IF EXISTS faa_insat_idx; +DROP TABLE IF EXISTS fungible_asset_activities; +DROP INDEX IF EXISTS cfab_owner_at_index; +DROP INDEX IF EXISTS cfab_insat_index; +DROP TABLE IF EXISTS current_fungible_asset_balances; +DROP INDEX IF EXISTS fab_owner_at_index; +DROP INDEX IF EXISTS fab_insat_index; +DROP TABLE IF EXISTS fungible_asset_balances; +DROP INDEX IF EXISTS fam_creator_index; +DROP INDEX IF EXISTS fam_insat_index; +DROP TABLE IF EXISTS fungible_asset_metadata; +DROP INDEX IF EXISTS ca_gfpa_index; +ALTER TABLE coin_activities DROP COLUMN IF EXISTS gas_fee_payer_address; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/up.sql new file mode 100644 index 000000000..6cab2614d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-01-042050_fungible_assets/up.sql @@ -0,0 +1,101 @@ +-- Your SQL goes here +-- Redo the view for collection view +DROP VIEW IF EXISTS current_collection_ownership_v2_view; +CREATE OR REPLACE VIEW current_collection_ownership_v2_view as +select owner_address, + creator_address, + collection_name, + b.collection_id, + max(a.last_transaction_version) as last_transaction_version, + count(distinct a.token_data_id) as distinct_tokens, + min(c.uri) as collection_uri, + min(token_uri) as single_token_uri +from current_token_ownerships_v2 a + join token_datas_v2 b on a.token_data_id = b.token_data_id + join current_collections_v2 c on b.collection_id = c.collection_id +where amount > 0 +group by 1, + 2, + 3, + 4; +-- fungible asset activities +CREATE TABLE IF NOT EXISTS fungible_asset_activities ( + transaction_version BIGINT NOT NULL, + event_index BIGINT NOT NULL, + owner_address VARCHAR(66) NOT NULL, + storage_id VARCHAR(66) NOT NULL, + asset_type VARCHAR(1000) NOT NULL, + is_frozen BOOLEAN, + amount NUMERIC, + type VARCHAR NOT NULL, + is_gas_fee BOOLEAN NOT NULL, + gas_fee_payer_address VARCHAR(66), + is_transaction_success BOOLEAN NOT NULL, + entry_function_id_str VARCHAR(1000), + block_height BIGINT NOT NULL, + token_standard VARCHAR(10) NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- constraints + PRIMARY KEY (transaction_version, event_index) +); +CREATE INDEX IF NOT EXISTS faa_owner_type_index ON fungible_asset_activities (owner_address, type); +CREATE INDEX IF NOT EXISTS faa_si_index ON fungible_asset_activities (storage_id); +CREATE INDEX IF NOT EXISTS faa_at_index ON fungible_asset_activities (asset_type); +CREATE INDEX IF NOT EXISTS faa_gfpa_index ON fungible_asset_activities (gas_fee_payer_address); +CREATE INDEX IF NOT EXISTS faa_insat_idx ON fungible_asset_activities (inserted_at); +-- current fungible asset balances +CREATE TABLE IF NOT EXISTS current_fungible_asset_balances ( + storage_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + owner_address VARCHAR(66) NOT NULL, + asset_type VARCHAR(1000) NOT NULL, + is_primary BOOLEAN NOT NULL, + is_frozen BOOLEAN NOT NULL, + amount NUMERIC NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + last_transaction_version BIGINT NOT NULL, + token_standard VARCHAR(10) NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cfab_owner_at_index ON current_fungible_asset_balances (owner_address, asset_type); +CREATE INDEX IF NOT EXISTS cfab_insat_index ON current_fungible_asset_balances (inserted_at); +-- balances +CREATE TABLE IF NOT EXISTS fungible_asset_balances ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + storage_id VARCHAR(66) NOT NULL, + owner_address VARCHAR(66) NOT NULL, + asset_type VARCHAR(1000) NOT NULL, + is_primary BOOLEAN NOT NULL, + is_frozen BOOLEAN NOT NULL, + amount NUMERIC NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + token_standard VARCHAR(10) NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- constraints + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS fab_owner_at_index ON fungible_asset_balances (owner_address, asset_type); +CREATE INDEX IF NOT EXISTS fab_insat_index ON fungible_asset_balances (inserted_at); +-- fungible asset metadata +CREATE TABLE IF NOT EXISTS fungible_asset_metadata ( + asset_type VARCHAR(1000) UNIQUE PRIMARY KEY NOT NULL, + creator_address VARCHAR(66) NOT NULL, + "name" VARCHAR(32) NOT NULL, + symbol VARCHAR(10) NOT NULL, + decimals INT NOT NULL, + icon_uri VARCHAR(512), + project_uri VARCHAR(512), + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + supply_aggregator_table_handle_v1 VARCHAR(66), + supply_aggregator_table_key_v1 text, + token_standard VARCHAR(10) NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS fam_creator_index ON fungible_asset_metadata (creator_address); +CREATE INDEX IF NOT EXISTS fam_insat_index ON fungible_asset_metadata (inserted_at); +-- adding fee payer handling to old coin activities +ALTER TABLE coin_activities +ADD COLUMN IF NOT EXISTS gas_fee_payer_address VARCHAR(66) DEFAULT NULL; +CREATE INDEX IF NOT EXISTS ca_gfpa_index ON coin_activities (gas_fee_payer_address); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/down.sql new file mode 100644 index 000000000..9242b9e2e --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/down.sql @@ -0,0 +1,10 @@ +ALTER TABLE current_ans_lookup DROP COLUMN IF EXISTS is_deleted; +DROP INDEX IF EXISTS capn_tn_index; +DROP INDEX IF EXISTS capn_insat_index; +DROP INDEX IF EXISTS apn_tn_index; +DROP INDEX IF EXISTS apn_insat_index; +DROP INDEX IF EXISTS al_tn_index; +DROP INDEX IF EXISTS al_insat_index; +DROP TABLE IF EXISTS current_ans_primary_name; +DROP TABLE IF EXISTS ans_primary_name; +DROP TABLE IF EXISTS ans_lookup; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/up.sql new file mode 100644 index 000000000..cc2eeb317 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-12-190707_add_ans_is_primary/up.sql @@ -0,0 +1,49 @@ +ALTER TABLE current_ans_lookup +ADD COLUMN IF NOT EXISTS is_deleted BOOLEAN NOT NULL DEFAULT FALSE; +-- Tracks current primary name, deleted means that address no longer has a primary name +CREATE TABLE IF NOT EXISTS current_ans_primary_name ( + registered_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + domain VARCHAR(64), + subdomain VARCHAR(64), + token_name VARCHAR(140), + is_deleted BOOLEAN NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS capn_tn_index on current_ans_primary_name (token_name); +CREATE INDEX IF NOT EXISTS capn_insat_index on current_ans_primary_name (inserted_at); +-- Tracks primary name, deleted means that address no longer has a primary name +CREATE TABLE IF NOT EXISTS ans_primary_name ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + registered_address VARCHAR(66) NOT NULL, + domain VARCHAR(64), + subdomain VARCHAR(64), + token_name VARCHAR(140), + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + transaction_version, + write_set_change_index + ) +); +CREATE INDEX IF NOT EXISTS apn_tn_index on ans_primary_name (token_name); +CREATE INDEX IF NOT EXISTS apn_insat_index on ans_primary_name (inserted_at); +-- Tracks full history of the ans records table +CREATE TABLE IF NOT EXISTS ans_lookup ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + domain VARCHAR(64) NOT NULL, + -- if subdomain is null set to empty string + subdomain VARCHAR(64) NOT NULL, + registered_address VARCHAR(66), + expiration_timestamp TIMESTAMP, + token_name VARCHAR(140) NOT NULL, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS al_tn_index on ans_lookup (token_name); +CREATE INDEX IF NOT EXISTS al_insat_index on ans_lookup (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql new file mode 100644 index 000000000..4a84b31d5 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +DROP INDEX IF EXISTS cdv_da_index; +DROP INDEX IF EXISTS cdv_v_index; +DROP INDEX IF EXISTS cdv_th_index; +DROP INDEX IF EXISTS cdv_pv_index; +DROP INDEX IF EXISTS cdv_insat_index; +DROP TABLE IF EXISTS current_delegated_voter; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql new file mode 100644 index 000000000..6f8bb2709 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-14-235438_add_current_delegated_voter_table/up.sql @@ -0,0 +1,18 @@ +-- Your SQL goes here +-- current delegated voters +CREATE TABLE IF NOT EXISTS current_delegated_voter ( + delegation_pool_address VARCHAR(66) NOT NULL, + delegator_address VARCHAR(66) NOT NULL, + table_handle VARCHAR(66), + voter VARCHAR(66), + pending_voter VARCHAR(66), + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (delegation_pool_address, delegator_address) +); +CREATE INDEX IF NOT EXISTS cdv_da_index ON current_delegated_voter (delegator_address); +CREATE INDEX IF NOT EXISTS cdv_v_index ON current_delegated_voter (voter); +CREATE INDEX IF NOT EXISTS cdv_th_index ON current_delegated_voter (table_handle); +CREATE INDEX IF NOT EXISTS cdv_pv_index ON current_delegated_voter (pending_voter); +CREATE INDEX IF NOT EXISTS cdv_insat_index ON current_delegated_voter (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/down.sql new file mode 100644 index 000000000..88a051ac3 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_aptos_names; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/up.sql new file mode 100644 index 000000000..ac5b842bc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-22-232603_add_ans_view/up.sql @@ -0,0 +1,12 @@ +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT + current_ans_lookup.domain, + current_ans_lookup.subdomain, + current_ans_lookup.registered_address, + current_ans_lookup.expiration_timestamp, + current_ans_lookup.is_deleted, + COALESCE(NOT current_ans_primary_name.is_deleted, false) AS is_primary +FROM current_ans_lookup +LEFT JOIN current_ans_primary_name +ON current_ans_lookup.token_name = current_ans_primary_name.token_name +WHERE current_ans_lookup.expiration_timestamp > current_timestamp; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/down.sql b/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/down.sql new file mode 100644 index 000000000..88a051ac3 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_aptos_names; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/up.sql b/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/up.sql new file mode 100644 index 000000000..fde222ef3 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-08-23-192343_fix_ans_view/up.sql @@ -0,0 +1,14 @@ +DROP VIEW IF EXISTS current_aptos_names; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT + current_ans_lookup.domain, + current_ans_lookup.subdomain, + current_ans_lookup.token_name, + current_ans_lookup.registered_address, + current_ans_lookup.expiration_timestamp, + GREATEST(current_ans_lookup.last_transaction_version, current_ans_primary_name.last_transaction_version) as last_transaction_version, + COALESCE(NOT current_ans_primary_name.is_deleted, false) AS is_primary +FROM current_ans_lookup +LEFT JOIN current_ans_primary_name +ON current_ans_lookup.token_name = current_ans_primary_name.token_name +WHERE current_ans_lookup.expiration_timestamp > current_timestamp and current_ans_lookup.is_deleted is false; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/down.sql new file mode 100644 index 000000000..2ec8f5410 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/down.sql @@ -0,0 +1,8 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE events DROP CONSTRAINT events_pkey; +ALTER TABLE events +ADD CONSTRAINT events_pkey PRIMARY KEY ( + account_address, + creation_number, + sequence_number + ); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/up.sql new file mode 100644 index 000000000..318ccc9ab --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-01-231248_events_v2/up.sql @@ -0,0 +1,5 @@ +-- Your SQL goes here +ALTER TABLE events DROP CONSTRAINT events_pkey; +ALTER TABLE events DROP CONSTRAINT IF EXISTS fk_transaction_versions; +ALTER TABLE events +ADD CONSTRAINT events_pkey PRIMARY KEY (transaction_version, event_index); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/down.sql new file mode 100644 index 000000000..744939022 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE coin_activities DROP COLUMN IF EXISTS storage_refund_amount; +ALTER TABLE fungible_asset_activities DROP COLUMN IF EXISTS storage_refund_amount; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/up.sql new file mode 100644 index 000000000..6eb15c924 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-07-175640_storage_refund/up.sql @@ -0,0 +1,5 @@ +-- Your SQL goes here +ALTER TABLE coin_activities +ADD COLUMN IF NOT EXISTS storage_refund_amount NUMERIC NOT NULL DEFAULT 0; +ALTER TABLE fungible_asset_activities +ADD COLUMN IF NOT EXISTS storage_refund_amount NUMERIC NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/down.sql new file mode 100644 index 000000000..eef8a50d4 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE user_transactions DROP CONSTRAINT IF EXISTS fk_versions; +ALTER TABLE signatures DROP CONSTRAINT IF EXISTS fk_transaction_versions; +ALTER TABLE user_transactions +ADD CONSTRAINT fk_versions FOREIGN KEY (version) REFERENCES transactions (version); +ALTER TABLE signatures +ADD CONSTRAINT fk_transaction_versions FOREIGN KEY (transaction_version) REFERENCES transactions (version); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/up.sql new file mode 100644 index 000000000..922996c6b --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-11-164718_ut_remove_constraint/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE user_transactions DROP CONSTRAINT IF EXISTS fk_versions; +ALTER TABLE signatures DROP CONSTRAINT IF EXISTS fk_transaction_versions; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/down.sql new file mode 100644 index 000000000..18f7fb276 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/down.sql @@ -0,0 +1,32 @@ +DROP VIEW IF EXISTS current_aptos_names; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT + current_ans_lookup.domain, + current_ans_lookup.subdomain, + current_ans_lookup.token_name, + current_ans_lookup.registered_address, + current_ans_lookup.expiration_timestamp, + GREATEST(current_ans_lookup.last_transaction_version, current_ans_primary_name.last_transaction_version) as last_transaction_version, + COALESCE(NOT current_ans_primary_name.is_deleted, false) AS is_primary +FROM current_ans_lookup +LEFT JOIN current_ans_primary_name +ON current_ans_lookup.token_name = current_ans_primary_name.token_name +WHERE current_ans_lookup.expiration_timestamp > current_timestamp and current_ans_lookup.is_deleted is false; + +DROP INDEX IF EXISTS ans_v2_ts_index; +DROP INDEX IF EXISTS ans_v2_tn_index; +DROP INDEX IF EXISTS ans_v2_et_index; +DROP INDEX IF EXISTS ans_v2_ra_index; +DROP INDEX IF EXISTS capn_v2_tn_index; +DROP INDEX IF EXISTS al_v2_ts_index; +DROP INDEX IF EXISTS al_v2_tn_index; +DROP INDEX IF EXISTS al_v2_ra_index; +DROP INDEX IF EXISTS al_v2_name_index; +DROP INDEX IF EXISTS apn_v2_ts_index; +DROP INDEX IF EXISTS apn_v2_tn_index; +DROP INDEX IF EXISTS apn_v2_name_index; +DROP INDEX IF EXISTS apn_v2_ra_index; +DROP TABLE IF EXISTS current_ans_lookup_v2; +DROP TABLE IF EXISTS current_ans_primary_name_v2; +DROP TABLE IF EXISTS ans_lookup_v2; +DROP TABLE IF EXISTS ans_primary_name_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/up.sql new file mode 100644 index 000000000..45ee765a5 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-22-161603_add_ans_v2/up.sql @@ -0,0 +1,118 @@ +-- Tracks ans v1 and v2 records +CREATE TABLE IF NOT EXISTS current_ans_lookup_v2 ( + domain VARCHAR(64) NOT NULL, + -- if subdomain is null set to empty string + subdomain VARCHAR(64) NOT NULL, + token_standard VARCHAR(10) NOT NULL, + token_name VARCHAR(140), + registered_address VARCHAR(66), + expiration_timestamp TIMESTAMP NOT NULL, + last_transaction_version BIGINT NOT NULL, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (domain, subdomain, token_standard) +); +CREATE INDEX IF NOT EXISTS ans_v2_tn_index on current_ans_lookup_v2 (token_name, token_standard); +CREATE INDEX IF NOT EXISTS ans_v2_et_index ON current_ans_lookup_v2 (expiration_timestamp); +CREATE INDEX IF NOT EXISTS ans_v2_ra_index ON current_ans_lookup_v2 (registered_address); +CREATE INDEX IF NOT EXISTS ans_v2_insat_index ON current_ans_lookup_v2 (inserted_at); + +-- Tracks current ans v1 and v2 primary names, +CREATE TABLE IF NOT EXISTS current_ans_primary_name_v2 ( + registered_address VARCHAR(66) NOT NULL, + token_standard VARCHAR(10) NOT NULL, + domain VARCHAR(64), + subdomain VARCHAR(64), + token_name VARCHAR(140), + -- Deleted means registered_address no longer has a primary name + is_deleted BOOLEAN NOT NULL, + last_transaction_version BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (registered_address, token_standard) +); +CREATE INDEX IF NOT EXISTS capn_v2_tn_index on current_ans_primary_name_v2 (token_name, token_standard); +CREATE INDEX IF NOT EXISTS capn_v2_insat_index ON current_ans_primary_name_v2 (inserted_at); + +-- Tracks full history of the ans v1 and v2 records table +CREATE TABLE IF NOT EXISTS ans_lookup_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + domain VARCHAR(64) NOT NULL, + -- if subdomain is null set to empty string + subdomain VARCHAR(64) NOT NULL, + token_standard VARCHAR(10) NOT NULL, + registered_address VARCHAR(66), + expiration_timestamp TIMESTAMP, + token_name VARCHAR(140) NOT NULL, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS al_v2_name_index on ans_lookup_v2 (domain, subdomain, token_standard); +CREATE INDEX IF NOT EXISTS al_v2_ra_index on ans_lookup_v2 (registered_address); +CREATE INDEX IF NOT EXISTS al_v2_insat_index on ans_lookup_v2 (inserted_at); + +-- Tracks full history of ans v1 and v2 primary names +CREATE TABLE IF NOT EXISTS ans_primary_name_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + registered_address VARCHAR(66) NOT NULL, + domain VARCHAR(64), + subdomain VARCHAR(64), + token_standard VARCHAR(10) NOT NULL, + token_name VARCHAR(140), + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY ( + transaction_version, + write_set_change_index + ) +); +CREATE INDEX IF NOT EXISTS apn_v2_name_index on ans_primary_name_v2 (domain, subdomain, token_standard); +CREATE INDEX IF NOT EXISTS apn_v2_ra_index on ans_primary_name_v2 (registered_address); +CREATE INDEX IF NOT EXISTS apn_v2_insat_index on ans_primary_name_v2 (inserted_at); + +DROP VIEW IF EXISTS current_aptos_names; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT + cal.domain, + cal.subdomain, + cal.token_name, + cal.token_standard, + cal.registered_address, + cal.expiration_timestamp, + greatest(cal.last_transaction_version, + capn.last_transaction_version) as last_transaction_version, + coalesce(not capn.is_deleted, + false) as is_primary, + concat(cal.domain, '.apt') as domain_with_suffix, + c.owner_address as owner_address, + cal.expiration_timestamp >= CURRENT_TIMESTAMP as is_active +FROM current_ans_lookup_v2 cal +LEFT JOIN current_ans_primary_name_v2 capn +ON + cal.token_name = capn.token_name + AND cal.token_standard = capn.token_standard +JOIN current_token_datas_v2 b +ON + cal.token_name = b.token_name + AND cal.token_standard = b.token_standard +JOIN current_token_ownerships_v2 c +ON + b.token_data_id = c.token_data_id + AND b.token_standard = c.token_standard +WHERE + cal.is_deleted IS false + AND c.amount > 0 + AND b.collection_id IN ( + '0x1c380887f0cfcc8a82c0df44b24116985a92c58e686a0ea4a441c9f423a72b47', -- Testnet ANS v1 domain collection + '0x56654f4bf4e528bfef33094d11a3475f0638e949b0976ec831ca0d66a2efb673', -- Testnet ANS v2 domain collection + '0x3a2c902067bb4f0e37a2a89675d5cbceb07cf1a27479229b269fb1afffa62230', -- Testnet ANS v2 subdomain collection + '0x09e63a48047b1c2bc51c0abc4b67ffcd9922e0adc99a6cc36532662172976a4b', -- Mainnet ANS v1 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746', -- Mainnet ANS v2 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746' -- Mainnet ANS v2 subdomain collection + ) \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/down.sql b/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/down.sql new file mode 100644 index 000000000..6055dda8e --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS nft_metadata_crawler.parsed_asset_uris; +DROP TABLE IF EXISTS nft_metadata_crawler.ledger_infos; +DROP SCHEMA IF EXISTS nft_metadata_crawler CASCADE; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/up.sql b/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/up.sql new file mode 100644 index 000000000..87c30e1c8 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-09-28-210956_nft_metadata/up.sql @@ -0,0 +1,19 @@ +-- Your SQL goes here +-- This already exists as part of the NFT metadata crawler. Adding these here for compatibility in case we introduce them in the API +CREATE SCHEMA IF NOT EXISTS nft_metadata_crawler; +CREATE TABLE IF NOT EXISTS nft_metadata_crawler.parsed_asset_uris ( + asset_uri VARCHAR UNIQUE PRIMARY KEY NOT NULL, + raw_image_uri VARCHAR, + raw_animation_uri VARCHAR, + cdn_json_uri VARCHAR, + cdn_image_uri VARCHAR, + cdn_animation_uri VARCHAR, + json_parser_retry_count INT NOT NULL, + image_optimizer_retry_count INT NOT NULL, + animation_optimizer_retry_count INT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE TABLE IF NOT EXISTS nft_metadata_crawler.ledger_infos (chain_id BIGINT UNIQUE PRIMARY KEY NOT NULL); +CREATE INDEX IF NOT EXISTS nft_raw_image_uri ON nft_metadata_crawler.parsed_asset_uris (raw_image_uri); +CREATE INDEX IF NOT EXISTS nft_raw_animation_uri ON nft_metadata_crawler.parsed_asset_uris (raw_animation_uri); +CREATE INDEX IF NOT EXISTS nft_inserted_at ON nft_metadata_crawler.parsed_asset_uris (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql b/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql new file mode 100644 index 000000000..79b6d9ca3 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE signatures ALTER COLUMN public_key TYPE VARCHAR(66); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql b/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql new file mode 100644 index 000000000..c99f8a369 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-10-16-002253_alter_the_pubkey_column_length/up.sql @@ -0,0 +1,4 @@ +-- Your SQL goes here +-- Secp256k1 public key, example: +-- `0x40d0d634e843b61339473b028105930ace022980708b2855954b977da09df84a770c0b68c29c8ca1b5409a5085b0ec263be80e433c83fcf6debb82f3447e71edca` +ALTER TABLE signatures ALTER COLUMN public_key TYPE VARCHAR(136); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/down.sql b/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/down.sql new file mode 100644 index 000000000..c957080e6 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +DROP INDEX IF EXISTS ev_itype_index; +ALTER TABLE events DROP COLUMN IF EXISTS indexed_type; +DROP TABLE IF EXISTS spam_assets; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/up.sql b/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/up.sql new file mode 100644 index 000000000..a442c5dd8 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-10-27-030502_event_type/up.sql @@ -0,0 +1,9 @@ +-- Your SQL goes here +-- p99 currently is 303 so using 300 as a safe max length +ALTER TABLE events ADD COLUMN IF NOT EXISTS indexed_type VARCHAR(300) NOT NULL DEFAULT ''; +CREATE INDEX IF NOT EXISTS ev_itype_index ON events (indexed_type); +CREATE TABLE IF NOT EXISTS spam_assets ( + asset VARCHAR(1100) PRIMARY KEY NOT NULL, + is_spam BOOLEAN NOT NULL DEFAULT TRUE, + last_updated TIMESTAMP NOT NULL DEFAULT NOW() +); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/down.sql b/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/down.sql new file mode 100644 index 000000000..a88ba3d01 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +-- estimates how much delegator has staked in a pool (currently supports active only) +DROP INDEX IF EXISTS db_da_index; +DROP INDEX IF EXISTS db_insat_index; +DROP TABLE IF EXISTS delegator_balances; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/up.sql b/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/up.sql new file mode 100644 index 000000000..ad10e6dbc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-11-09-234724_delegator_balances/up.sql @@ -0,0 +1,20 @@ +-- Your SQL goes here +-- oops made a mistake here before +DROP INDEX IF EXISTS cdb_insat_index; +CREATE INDEX cdb_insat_index ON current_delegator_balances (inserted_at); +-- estimates how much delegator has staked in a pool (currently supports active only) +CREATE TABLE IF NOT EXISTS delegator_balances ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + delegator_address VARCHAR(66) NOT NULL, + pool_address VARCHAR(66) NOT NULL, + pool_type VARCHAR(100) NOT NULL, + table_handle VARCHAR(66) NOT NULL, + shares NUMERIC NOT NULL, + parent_table_handle VARCHAR(66) NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- Constraints + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS db_da_index ON delegator_balances (delegator_address); +CREATE INDEX IF NOT EXISTS db_insat_index ON delegator_balances (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/down.sql b/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/down.sql new file mode 100644 index 000000000..0e715da71 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE transactions DROP COLUMN IF EXISTS payload_type; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/up.sql b/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/up.sql new file mode 100644 index 000000000..f54adc8ea --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-12-15-221028_payload_type/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE transactions +ADD COLUMN IF NOT EXISTS payload_type VARCHAR(50); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/down.sql b/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/down.sql new file mode 100644 index 000000000..d70123ae4 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE objects DROP COLUMN IF EXISTS is_token; +ALTER TABLE objects DROP COLUMN IF EXISTS is_fungible_asset; +ALTER TABLE current_objects DROP COLUMN IF EXISTS is_token; +ALTER TABLE current_objects DROP COLUMN IF EXISTS is_fungible_asset; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/up.sql b/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/up.sql new file mode 100644 index 000000000..65fd8fb28 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2023-12-16-233224_add_objects_model/up.sql @@ -0,0 +1,14 @@ +-- Your SQL goes here +ALTER TABLE objects +ADD COLUMN IF NOT EXISTS is_token BOOLEAN; +ALTER TABLE objects +ADD COLUMN IF NOT EXISTS is_fungible_asset BOOLEAN; +ALTER TABLE current_objects +ADD COLUMN IF NOT EXISTS is_token BOOLEAN; +ALTER TABLE current_objects +ADD COLUMN IF NOT EXISTS is_fungible_asset BOOLEAN; +ALTER TABLE block_metadata_transactions DROP CONSTRAINT IF EXISTS fk_versions; +ALTER TABLE move_modules DROP CONSTRAINT IF EXISTS fk_transaction_versions; +ALTER TABLE move_resources DROP CONSTRAINT IF EXISTS fk_transaction_versions; +ALTER TABLE table_items DROP CONSTRAINT IF EXISTS fk_transaction_versions; +ALTER TABLE write_set_changes DROP CONSTRAINT IF EXISTS fk_transaction_versions; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/down.sql b/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/down.sql new file mode 100644 index 000000000..7d28c69a7 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE processor_status DROP COLUMN last_transaction_timestamp; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/up.sql b/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/up.sql new file mode 100644 index 000000000..3cadf67bc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-01-11-224315_update_process_status/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE processor_status +ADD COLUMN last_transaction_timestamp TIMESTAMP; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/down.sql b/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/down.sql new file mode 100644 index 000000000..2d335ce26 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE signatures +ALTER COLUMN signature TYPE VARCHAR(200); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/up.sql b/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/up.sql new file mode 100644 index 000000000..10592d04e --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-02-16-234847_any_signature/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE signatures +ALTER COLUMN signature TYPE text; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/down.sql b/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/down.sql new file mode 100644 index 000000000..0cff4649c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/down.sql @@ -0,0 +1,7 @@ +-- This file should undo anything in `up.sql` +DROP INDEX IF EXISTS tsi_insat_index; +DROP INDEX IF EXISTS esi_insat_index; +DROP INDEX IF EXISTS wsi_insat_index; +DROP TABLE IF EXISTS transaction_size_info; +DROP TABLE IF EXISTS event_size_info; +DROP TABLE IF EXISTS write_set_size_info; diff --git a/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/up.sql b/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/up.sql new file mode 100644 index 000000000..10c3bf5c6 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-02-29-210322_transaction_metadata/up.sql @@ -0,0 +1,25 @@ +-- Your SQL goes here +CREATE TABLE IF NOT EXISTS transaction_size_info ( + transaction_version BIGINT UNIQUE PRIMARY KEY NOT NULL, + size_bytes BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE TABLE IF NOT EXISTS event_size_info ( + transaction_version BIGINT NOT NULL, + index BIGINT NOT NULL, + type_tag_bytes BIGINT NOT NULL, + total_bytes BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, index) +); +CREATE TABLE IF NOT EXISTS write_set_size_info ( + transaction_version BIGINT NOT NULL, + index BIGINT NOT NULL, + key_bytes BIGINT NOT NULL, + value_bytes BIGINT NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, index) +); +CREATE INDEX IF NOT EXISTS tsi_insat_index ON transaction_size_info (inserted_at); +CREATE INDEX IF NOT EXISTS esi_insat_index ON event_size_info (inserted_at); +CREATE INDEX IF NOT EXISTS wsi_insat_index ON write_set_size_info (inserted_at); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql b/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql new file mode 100644 index 000000000..c92803930 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/down.sql @@ -0,0 +1,9 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE fungible_asset_metadata +DROP COLUMN IF EXISTS is_token_v2; +ALTER TABLE objects +ADD COLUMN IF NOT EXISTS is_token BOOLEAN, +ADD COLUMN IF NOT EXISTS is_fungible_asset BOOLEAN; +ALTER TABLE current_objects +ADD COLUMN IF NOT EXISTS is_token BOOLEAN, +ADD COLUMN IF NOT EXISTS is_fungible_asset BOOLEAN; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql b/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql new file mode 100644 index 000000000..9899f4f3d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-03-07-224504_fungible_asset_metadata_is_token_v2/up.sql @@ -0,0 +1,9 @@ +-- Your SQL goes here +ALTER TABLE fungible_asset_metadata +ADD COLUMN IF NOT EXISTS is_token_v2 BOOLEAN; +ALTER TABLE objects +DROP COLUMN IF EXISTS is_fungible_asset, +DROP COLUMN IF EXISTS is_token; +ALTER TABLE current_objects +DROP COLUMN IF EXISTS is_fungible_asset, +DROP COLUMN IF EXISTS is_token; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/down.sql b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/down.sql new file mode 100644 index 000000000..29d6c0618 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/down.sql @@ -0,0 +1,40 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS current_aptos_names; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT cal.domain, + cal.subdomain, + cal.token_name, + cal.token_standard, + cal.registered_address, + cal.expiration_timestamp, + greatest( + cal.last_transaction_version, + capn.last_transaction_version + ) as last_transaction_version, + coalesce(not capn.is_deleted, false) as is_primary, + concat(cal.domain, '.apt') as domain_with_suffix, + c.owner_address as owner_address, + cal.expiration_timestamp >= CURRENT_TIMESTAMP as is_active +FROM current_ans_lookup_v2 cal + LEFT JOIN current_ans_primary_name_v2 capn ON cal.token_name = capn.token_name + AND cal.token_standard = capn.token_standard + JOIN current_token_datas_v2 b ON cal.token_name = b.token_name + AND cal.token_standard = b.token_standard + JOIN current_token_ownerships_v2 c ON b.token_data_id = c.token_data_id + AND b.token_standard = c.token_standard +WHERE cal.is_deleted IS false + AND c.amount > 0 + AND b.collection_id IN ( + '0x1c380887f0cfcc8a82c0df44b24116985a92c58e686a0ea4a441c9f423a72b47', + -- Testnet ANS v1 domain collection + '0x56654f4bf4e528bfef33094d11a3475f0638e949b0976ec831ca0d66a2efb673', + -- Testnet ANS v2 domain collection + '0x3a2c902067bb4f0e37a2a89675d5cbceb07cf1a27479229b269fb1afffa62230', + -- Testnet ANS v2 subdomain collection + '0x09e63a48047b1c2bc51c0abc4b67ffcd9922e0adc99a6cc36532662172976a4b', + -- Mainnet ANS v1 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746', + -- Mainnet ANS v2 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746' -- Mainnet ANS v2 subdomain collection + ); +ALTER TABLE current_ans_lookup_v2 DROP COLUMN IF EXISTS subdomain_expiration_policy; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/up.sql b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/up.sql new file mode 100644 index 000000000..7933cc972 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-09-204519_ans_expiration_policy/up.sql @@ -0,0 +1,47 @@ +ALTER TABLE current_ans_lookup_v2 +ADD COLUMN IF NOT EXISTS subdomain_expiration_policy BIGINT; +ALTER TABLE ans_lookup_v2 +ADD COLUMN IF NOT EXISTS subdomain_expiration_policy BIGINT; +CREATE OR REPLACE VIEW current_aptos_names AS +SELECT cal.domain, + cal.subdomain, + cal.token_name, + cal.token_standard, + cal.registered_address, + cal.expiration_timestamp, + greatest( + cal.last_transaction_version, + capn.last_transaction_version + ) as last_transaction_version, + coalesce(not capn.is_deleted, false) as is_primary, + concat(cal.domain, '.apt') as domain_with_suffix, + c.owner_address as owner_address, + cal.expiration_timestamp >= CURRENT_TIMESTAMP as is_active, + cal2.expiration_timestamp as domain_expiration_timestamp, + b.token_data_id as token_data_id, + cal.subdomain_expiration_policy as subdomain_expiration_policy +FROM current_ans_lookup_v2 cal + LEFT JOIN current_ans_primary_name_v2 capn ON cal.token_name = capn.token_name + AND cal.token_standard = capn.token_standard + JOIN current_token_datas_v2 b ON cal.token_name = b.token_name + AND cal.token_standard = b.token_standard + JOIN current_token_ownerships_v2 c ON b.token_data_id = c.token_data_id + AND b.token_standard = c.token_standard + LEFT JOIN current_ans_lookup_v2 cal2 ON cal.domain = cal2.domain + AND cal2.subdomain = '' + AND cal.token_standard = cal2.token_standard +WHERE cal.is_deleted IS false + AND c.amount > 0 + AND b.collection_id IN ( + '0x1c380887f0cfcc8a82c0df44b24116985a92c58e686a0ea4a441c9f423a72b47', + -- Testnet ANS v1 domain collection + '0x56654f4bf4e528bfef33094d11a3475f0638e949b0976ec831ca0d66a2efb673', + -- Testnet ANS v2 domain collection + '0x3a2c902067bb4f0e37a2a89675d5cbceb07cf1a27479229b269fb1afffa62230', + -- Testnet ANS v2 subdomain collection + '0x09e63a48047b1c2bc51c0abc4b67ffcd9922e0adc99a6cc36532662172976a4b', + -- Mainnet ANS v1 domain collection + '0x63d26a4e3a8aeececf9b878e46bad78997fb38e50936efeabb2c4453f4d7f746', + -- Mainnet ANS v2 domain collection + '0x30fbc956f0f38db2d314bd9c018d34be3e047a804a71e30a4e5d43d8b7c539eb' -- Mainnet ANS v2 subdomain collection + ); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/down.sql b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/down.sql new file mode 100644 index 000000000..a97ee07d9 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE fungible_asset_metadata +DROP COLUMN supply_v2, +DROP COLUMN maximum_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/up.sql b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/up.sql new file mode 100644 index 000000000..186d5fa15 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-18-173631_fungible_token/up.sql @@ -0,0 +1,12 @@ +-- Your SQL goes here +ALTER TABLE fungible_asset_metadata +ADD COLUMN supply_v2 NUMERIC, +ADD COLUMN maximum_v2 NUMERIC; + +ALTER TABLE current_token_datas_v2 +ALTER COLUMN supply DROP NOT NULL, +ALTER COLUMN decimals DROP NOT NULL; + +ALTER TABLE token_datas_v2 +ALTER COLUMN supply DROP NOT NULL, +ALTER COLUMN decimals DROP NOT NULL; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/down.sql b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/down.sql new file mode 100644 index 000000000..cd15c8946 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE IF EXISTS current_token_datas_v2 DROP COLUMN is_deleted_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/up.sql b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/up.sql new file mode 100644 index 000000000..f7fac2f87 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-04-29-215042_token_datas_burn/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE current_token_datas_v2 +ADD COLUMN IF NOT EXISTS is_deleted_v2 BOOLEAN; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/down.sql new file mode 100644 index 000000000..425c1d6da --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/down.sql @@ -0,0 +1,4 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_unified_fungible_asset_balances; +DROP INDEX IF EXISTS cufab_owner_at_index; +DROP INDEX IF EXISTS cufab_insat_index; diff --git a/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/up.sql new file mode 100644 index 000000000..073cf8014 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-04-025823_current_unified_fungible_asset_balance/up.sql @@ -0,0 +1,21 @@ +-- current fungible asset balances +CREATE TABLE IF NOT EXISTS current_unified_fungible_asset_balances ( + storage_id VARCHAR(66) PRIMARY KEY NOT NULL, + owner_address VARCHAR(66) NOT NULL, + asset_type VARCHAR(66) NOT NULL, + coin_type VARCHAR(1000), + is_primary BOOLEAN, + is_frozen BOOLEAN NOT NULL, + amount_v1 NUMERIC, + amount_v2 NUMERIC, + amount NUMERIC GENERATED ALWAYS AS (COALESCE(amount_v1, 0) + COALESCE(amount_v2, 0)) STORED, + last_transaction_version_v1 BIGINT, + last_transaction_version_v2 BIGINT, + last_transaction_version BIGINT GENERATED ALWAYS AS (GREATEST(last_transaction_version_v1, last_transaction_version_v2)) STORED, + last_transaction_timestamp_v1 TIMESTAMP, + last_transaction_timestamp_v2 TIMESTAMP, + last_transaction_timestamp TIMESTAMP GENERATED ALWAYS AS (GREATEST(last_transaction_timestamp_v1, last_transaction_timestamp_v2)) STORED, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cufab_owner_at_index ON current_unified_fungible_asset_balances (owner_address, asset_type); +CREATE INDEX IF NOT EXISTS cufab_insat_index ON current_unified_fungible_asset_balances (inserted_at); diff --git a/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/down.sql new file mode 100644 index 000000000..6ec036407 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE IF EXISTS token_datas_v2 DROP COLUMN is_deleted_v2; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/up.sql new file mode 100644 index 000000000..266d4375c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-17-215042_token_datas_burn_2/up.sql @@ -0,0 +1,3 @@ +-- Your SQL goes here +ALTER TABLE token_datas_v2 +ADD COLUMN IF NOT EXISTS is_deleted_v2 BOOLEAN; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/down.sql new file mode 100644 index 000000000..dadb0f6d6 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS current_token_royalty_v1; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/up.sql new file mode 100644 index 000000000..d7b57adc9 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-21-221101_add_royalty_v1/up.sql @@ -0,0 +1,11 @@ +-- Your SQL goes here +-- This'll only work with royalty v1 because royalty_v2 requires collection id +CREATE TABLE IF NOT EXISTS current_token_royalty_v1 ( + token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + payee_address VARCHAR(66) NOT NULL, + royalty_points_numerator NUMERIC NOT NULL, + royalty_points_denominator NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/down.sql b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/down.sql new file mode 100644 index 000000000..af3ba489d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/down.sql @@ -0,0 +1,30 @@ +-- This file should undo anything in `up.sql` +DROP VIEW IF EXISTS legacy_migration_v1.move_resources; +DROP VIEW IF EXISTS legacy_migration_v1.address_version_from_move_resources; +DROP VIEW IF EXISTS legacy_migration_v1.coin_activities; +DROP VIEW IF EXISTS legacy_migration_v1.coin_balances; +DROP VIEW IF EXISTS legacy_migration_v1.coin_infos; +DROP VIEW IF EXISTS legacy_migration_v1.current_coin_balances; +DROP VIEW IF EXISTS legacy_migration_v1.token_activities; +DROP VIEW IF EXISTS legacy_migration_v1.token_ownerships; +DROP VIEW IF EXISTS legacy_migration_v1.current_token_ownerships; +DROP VIEW IF EXISTS legacy_migration_v1.tokens; +DROP VIEW IF EXISTS legacy_migration_v1.token_datas; +DROP VIEW IF EXISTS legacy_migration_v1.current_token_datas; +DROP VIEW IF EXISTS legacy_migration_v1.collection_datas; +DROP VIEW IF EXISTS legacy_migration_v1.current_ans_primary_name; +DROP VIEW IF EXISTS legacy_migration_v1.current_ans_lookup; +DROP INDEX IF EXISTS lm1_cv_ci_tv_index; +DROP INDEX IF EXISTS lm1_ta_tdih_pv_index; +DROP INDEX IF EXISTS lm1_cb_tv_oa_ct_index; +DROP INDEX IF EXISTS lm1_curr_to_oa_tt_ltv_index; +DROP INDEX IF EXISTS lm1_ccb_ct_a_index; +DROP INDEX IF EXISTS lm1_tdv_tdi_tv_index; +DROP INDEX IF EXISTS lm1_curr_to_oa_tt_am_ltv_index; +DROP INDEX IF EXISTS lm1_ca_ct_a_index; +DROP INDEX IF EXISTS lm1_ca_ct_at_a_index; +DROP INDEX IF EXISTS lm1_ca_oa_ct_at_index; +DROP INDEX IF EXISTS lm1_ca_oa_igf_index; +DROP INDEX IF EXISTS lm1_ans_d_s_et_index; +DROP INDEX IF EXISTS lm1_ans_ra_et_index; +DROP SCHEMA IF EXISTS legacy_migration_v1; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/up.sql b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/up.sql new file mode 100644 index 000000000..4c8864c1c --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-05-22-200847_add_v1_migration_views/up.sql @@ -0,0 +1,285 @@ +-- Your SQL goes here +-- Create the schema +CREATE SCHEMA IF NOT EXISTS legacy_migration_v1; +-- Replace `move_resources` with account transactions +CREATE OR REPLACE VIEW legacy_migration_v1.move_resources AS +SELECT transaction_version, + account_address as address +FROM account_transactions at2; +CREATE OR REPLACE VIEW legacy_migration_v1.address_version_from_move_resources AS +SELECT transaction_version, + account_address as address +FROM account_transactions at2; +-- replace `coin_activities` with `fungible_asset_activities` +CREATE OR REPLACE VIEW legacy_migration_v1.coin_activities AS +SElECT transaction_version, + owner_address as event_account_address, + -- these two below are mildly concerning + 0 as event_creation_number, + 0 as event_sequence_number, + owner_address, + asset_type AS coin_type, + amount, + "type" AS activity_type, + is_gas_fee, + is_transaction_success, + entry_function_id_str, + block_height, + transaction_timestamp, + inserted_at, + event_index, + gas_fee_payer_address, + storage_refund_amount +FROM public.fungible_asset_activities +WHERE token_standard = 'v1'; +-- replace `coin_balances` with `fungible_asset_balances` +CREATE OR REPLACE VIEW legacy_migration_v1.coin_balances AS +SELECT transaction_version, + owner_address, + -- this is mainly for hashing the coin type for primary key + encode(sha256(asset_type::bytea), 'hex') as coin_type_hash, + asset_type as coin_type, + amount, + transaction_timestamp, + inserted_at +FROM public.fungible_asset_balances +WHERE token_standard = 'v1'; +-- replace `coin_infos` with `fungible_asset_metadata` +CREATE OR REPLACE VIEW legacy_migration_v1.coin_infos AS +SELECT encode(sha256(asset_type::bytea), 'hex') as coin_type_hash, + asset_type as coin_type, + last_transaction_version as transaction_version_created, + creator_address, + name, + symbol, + decimals, + last_transaction_timestamp as transaction_created_timestamp, + inserted_at, + supply_aggregator_table_handle_v1 as supply_aggregator_table_handle, + supply_aggregator_table_key_v1 as supply_aggregator_table_key +FROM public.fungible_asset_metadata +WHERE token_standard = 'v1'; +-- replace `current_coin_balances` with `current_fungible_asset_balances` +CREATE OR REPLACE VIEW legacy_migration_v1.current_coin_balances AS +SELECT owner_address, + encode(sha256(asset_type::bytea), 'hex') as coin_type_hash, + asset_type as coin_type, + amount, + last_transaction_version, + last_transaction_timestamp, + inserted_at +FROM public.current_fungible_asset_balances +WHERE token_standard = 'v1'; +-- replace `token_activities` with `token_activities_v2` +-- token_activities_v2.token_data_id is 0x prefixed, but token_activities.token_data_id is not. We need to create an index on the substring +CREATE OR REPLACE VIEW legacy_migration_v1.token_activities AS +SELECT tav.transaction_version, + event_account_address, + -- These were only used for hashing pk in v1 table + 0 as event_creation_number, + 0 as event_sequence_number, + tdv.collection_id as collection_data_id_hash, + ltrim(tav.token_data_id, '0x') as token_data_id_hash, + property_version_v1 AS property_version, + cv.creator_address, + cv.collection_name, + tdv.token_name AS "name", + "type" AS transfer_type, + from_address, + to_address, + token_amount, + -- These are not columns in v2 + NULL AS coin_type, + NULL AS coin_amount, + tav.inserted_at, + tav.transaction_timestamp, + event_index +FROM public.token_activities_v2 tav + JOIN token_datas_v2 tdv ON tav.token_data_id = tdv.token_data_id + AND tav.transaction_version = tdv.transaction_version + JOIN collections_v2 cv ON tdv.collection_id = cv.collection_id + AND tdv.transaction_version = cv.transaction_version +WHERE tav.token_standard = 'v1'; +-- replace `token_ownerships` with `token_ownerships_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.token_ownerships AS +SELECT tov.token_data_id AS token_data_id_hash, + property_version_v1 AS property_version, + tov.transaction_version, + -- this is a bit concerning + '' AS table_handle, + creator_address, + collection_name, + tdv.token_name AS name, + owner_address, + amount, + table_type_v1 AS table_type, + tov.inserted_at, + tdv.collection_id AS collection_data_id_hash, + tov.transaction_timestamp +FROM public.token_ownerships_v2 tov + JOIN public.token_datas_v2 tdv ON tov.token_data_id = tdv.token_data_id + AND tov.transaction_version = tdv.transaction_version + JOIN public.collections_v2 cv ON tdv.collection_id = cv.collection_id + AND tdv.transaction_version = cv.transaction_version +WHERE tov.token_standard = 'v1'; +-- replace `current_token_ownerships` with `current_token_ownerships_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_token_ownerships AS +SELECT ctov.token_data_id AS token_data_id_hash, + ctov.property_version_v1 AS property_version, + ctov.owner_address, + ccv.creator_address, + ccv.collection_name, + ctdv.token_name AS "name", + ctov.amount, + ctov.token_properties_mutated_v1 AS token_properties, + ctov.last_transaction_version, + ctov.inserted_at, + ctdv.collection_id AS collection_data_id_hash, + ctov.table_type_v1 AS table_type, + ctov.last_transaction_timestamp +FROM current_token_ownerships_v2 ctov + JOIN current_token_datas_v2 ctdv ON ctov.token_data_id = ctdv.token_data_id + JOIN current_collections_v2 ccv ON ctdv.collection_id = ccv.collection_id +WHERE ctov.token_standard = 'v1'; +-- replace `tokens` with `current_token_datas_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.tokens AS +SELECT tdv.token_data_id AS token_data_id_hash, + tdv.largest_property_version_v1 AS property_version, + tdv.transaction_version, + ccv.creator_address, + ccv.collection_name, + tdv.token_name AS "name", + tdv.token_properties, + tdv.inserted_at, + tdv.collection_id AS collection_data_id_hash, + tdv.transaction_timestamp +FROM token_datas_v2 tdv + JOIN current_collections_v2 ccv ON tdv.collection_id = ccv.collection_id +WHERE tdv.token_standard = 'v1'; +-- replace `token_datas` with `token_datas_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.token_datas AS +SELECT token_data_id AS token_data_id_hash, + tdv.transaction_version, + creator_address, + collection_name, + token_name AS "name", + maximum, + supply, + largest_property_version_v1 AS largest_property_version, + token_uri AS metadata_uri, + -- Null b/c we're not tracking royalty on transaction level + '' as payee_address, + null as royalty_points_numerator, + null as royalty_points_denominator, + -- Validated this is fine, since most are true anyway + TRUE AS maximum_mutable, + TRUE AS uri_mutable, + TRUE AS description_mutable, + TRUE AS properties_mutable, + TRUE AS royalty_mutable, + token_properties AS default_properties, + tdv.inserted_at, + tdv.collection_id AS collection_data_id_hash, + tdv.transaction_timestamp, + tdv.description +FROM token_datas_v2 tdv + JOIN collections_v2 cv ON tdv.collection_id = cv.collection_id + AND tdv.transaction_version = cv.transaction_version +WHERE tdv.token_standard = 'v1'; +-- replace `current_token_datas` with `current_token_datas_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_token_datas AS +SELECT ctdv.token_data_id AS token_data_id_hash, + creator_address, + collection_name, + token_name AS "name", + COALESCE(maximum, 0) AS maximum, + COALESCE(supply, 0) AS supply, + largest_property_version_v1 AS largest_property_version, + token_uri AS metadata_uri, + COALESCE(payee_address, '') as payee_address, + royalty_points_numerator, + royalty_points_denominator, + -- Validated this is fine, since most are true anyway + TRUE AS maximum_mutable, + TRUE AS uri_mutable, + TRUE AS description_mutable, + TRUE AS properties_mutable, + TRUE AS royalty_mutable, + token_properties AS default_properties, + ctdv.last_transaction_version, + ctdv.inserted_at, + ctdv.collection_id AS collection_data_id_hash, + ctdv.last_transaction_timestamp, + ctdv."description" AS "description" +FROM current_token_datas_v2 ctdv + JOIN current_collections_v2 ccv ON ctdv.collection_id = ccv.collection_id + LEFT JOIN current_token_royalty_v1 ctrv on ctdv.token_data_id = ctrv.token_data_id +WHERE ctdv.token_standard = 'v1'; +-- replace `collection_datas` with `collection_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.collection_datas AS +SELECT collection_id AS collection_data_id_hash, + transaction_version, + creator_address, + collection_name, + description, + uri AS metadata_uri, + current_supply AS supply, + max_supply AS maximum, + -- Validated this is fine, since most are true anyway + TRUE AS maximum_mutable, + TRUE AS uri_mutable, + TRUE AS description_mutable, + inserted_at, + table_handle_v1 AS table_handle, + transaction_timestamp +FROM collections_v2 +WHERE token_standard = 'v1'; +-- replace `current_ans_primary_name` with `current_ans_primary_name_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_ans_primary_name AS +SELECT registered_address, + domain, + subdomain, + token_name, + is_deleted, + last_transaction_version, + 0 AS last_transaction_timestamp +FROM current_ans_primary_name_v2 +WHERE token_standard = 'v1'; +-- replace `current_ans_lookup` with `current_ans_lookup_v2` +CREATE OR REPLACE VIEW legacy_migration_v1.current_ans_lookup AS +SELECT domain, + subdomain, + registered_address, + expiration_timestamp, + last_transaction_version, + inserted_at, + token_name, + is_deleted +FROM current_ans_lookup_v2 +WHERE token_standard = 'v1'; +----- +----- +----- +-- If you would like to run these indices, please do it outside of diesel migration since it will be blocking processing +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_ct_a_index ON public.fungible_asset_activities USING btree (asset_type, amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_ct_at_a_index ON public.fungible_asset_activities USING btree (asset_type, "type", amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_oa_ct_at_index ON public.fungible_asset_activities USING btree (owner_address, asset_type, "type", amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ca_oa_igf_index ON public.fungible_asset_activities USING btree (owner_address, is_gas_fee); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_cb_tv_oa_ct_index ON public.fungible_asset_balances USING btree (transaction_version, owner_address, asset_type); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ccb_ct_a_index ON public.current_fungible_asset_balances USING btree (asset_type, amount); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_tdv_tdi_tv_index ON public.token_datas_v2 USING btree (token_data_id, transaction_version); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_cv_ci_tv_index ON public.collections_v2 USING btree (collection_id, transaction_version); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ta_tdih_pv_index ON public.token_activities_v2 USING btree (token_data_id, property_version_v1); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ans_d_s_et_index ON public.current_ans_lookup_v2 USING btree (domain, subdomain, expiration_timestamp); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_ans_ra_et_index ON public.current_ans_lookup_v2 USING btree (registered_address, expiration_timestamp); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_curr_to_oa_tt_am_ltv_index ON current_token_ownerships_v2 USING btree ( +-- owner_address, +-- table_type_v1, +-- amount, +-- last_transaction_version DESC +-- ); +-- CREATE INDEX CONCURRENTLY IF NOT EXISTS lm1_curr_to_oa_tt_ltv_index ON current_token_ownerships_v2 USING btree ( +-- owner_address, +-- table_type_v1, +-- last_transaction_version DESC +-- ); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/down.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/down.sql new file mode 100644 index 000000000..ed4fe2e46 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE public.current_objects DROP COLUMN IF EXISTS untransferrable; +ALTER TABLE public.objects DROP COLUMN IF EXISTS untransferrable; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/up.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/up.sql new file mode 100644 index 000000000..77d13bd34 --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-061711_untransferrable/up.sql @@ -0,0 +1,5 @@ +-- Your SQL goes here +ALTER TABLE public.current_objects +ADD COLUMN IF NOT EXISTS untransferrable BOOLEAN NOT NULL DEFAULT FALSE; +ALTER TABLE public.objects +ADD COLUMN IF NOT EXISTS untransferrable BOOLEAN NOT NULL DEFAULT FALSE; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/down.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/down.sql new file mode 100644 index 000000000..671e83e4d --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/down.sql @@ -0,0 +1,13 @@ +-- This file should undo anything in `up.sql` +ALTER TABLE current_unified_fungible_asset_balances_to_be_renamed + RENAME TO current_unified_fungible_asset_balances; +DROP INDEX IF EXISTS cufab_owner_at_index; +ALTER TABLE current_unified_fungible_asset_balances DROP COLUMN asset_type; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN asset_type_v2 TO asset_type; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN asset_type_v1 TO coin_type; +ALTER TABLE current_unified_fungible_asset_balances +ALTER COLUMN asset_type +SET NOT NULL; +CREATE INDEX IF NOT EXISTS cufab_owner_at_index ON current_unified_fungible_asset_balances (owner_address, asset_type); \ No newline at end of file diff --git a/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/up.sql b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/up.sql new file mode 100644 index 000000000..6aea4a3cc --- /dev/null +++ b/rust/processor/src/db/postgres/migrations/2024-06-13-065302_current_unified_fungible_asset_balance_edit/up.sql @@ -0,0 +1,15 @@ +-- Your SQL goes here +-- Rename asset_type and coin_type to v1 and v2, and make a generated asset_type to be v2 if exists, else v1. +DROP INDEX IF EXISTS cufab_owner_at_index; +ALTER TABLE current_unified_fungible_asset_balances +ALTER COLUMN asset_type DROP NOT NULL; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN asset_type TO asset_type_v2; +ALTER TABLE current_unified_fungible_asset_balances + RENAME COLUMN coin_type TO asset_type_v1; +ALTER TABLE current_unified_fungible_asset_balances +ADD COLUMN asset_type VARCHAR(1000) GENERATED ALWAYS AS (COALESCE(asset_type_v2, asset_type_v1)) STORED; +CREATE INDEX IF NOT EXISTS cufab_owner_at_index ON current_unified_fungible_asset_balances (owner_address, asset_type); +-- Rename table to set expectation that we'll rename this table to current_fungible_asset_balances after testing +ALTER TABLE current_unified_fungible_asset_balances + RENAME TO current_unified_fungible_asset_balances_to_be_renamed; \ No newline at end of file diff --git a/rust/processor/src/db/postgres/schema.rs b/rust/processor/src/db/postgres/schema.rs new file mode 100644 index 000000000..5e23ad755 --- /dev/null +++ b/rust/processor/src/db/postgres/schema.rs @@ -0,0 +1,1347 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + account_transactions (account_address, transaction_version) { + transaction_version -> Int8, + #[max_length = 66] + account_address -> Varchar, + inserted_at -> Timestamp, + } +} + +diesel::table! { + ans_lookup (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 64] + domain -> Varchar, + #[max_length = 64] + subdomain -> Varchar, + #[max_length = 66] + registered_address -> Nullable, + expiration_timestamp -> Nullable, + #[max_length = 140] + token_name -> Varchar, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + +diesel::table! { + ans_lookup_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 64] + domain -> Varchar, + #[max_length = 64] + subdomain -> Varchar, + #[max_length = 10] + token_standard -> Varchar, + #[max_length = 66] + registered_address -> Nullable, + expiration_timestamp -> Nullable, + #[max_length = 140] + token_name -> Varchar, + is_deleted -> Bool, + inserted_at -> Timestamp, + subdomain_expiration_policy -> Nullable, + } +} + +diesel::table! { + ans_primary_name (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + registered_address -> Varchar, + #[max_length = 64] + domain -> Nullable, + #[max_length = 64] + subdomain -> Nullable, + #[max_length = 140] + token_name -> Nullable, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + +diesel::table! { + ans_primary_name_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + registered_address -> Varchar, + #[max_length = 64] + domain -> Nullable, + #[max_length = 64] + subdomain -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + #[max_length = 140] + token_name -> Nullable, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + +diesel::table! { + block_metadata_transactions (version) { + version -> Int8, + block_height -> Int8, + #[max_length = 66] + id -> Varchar, + round -> Int8, + epoch -> Int8, + previous_block_votes_bitvec -> Jsonb, + #[max_length = 66] + proposer -> Varchar, + failed_proposer_indices -> Jsonb, + timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + coin_activities (transaction_version, event_account_address, event_creation_number, event_sequence_number) { + transaction_version -> Int8, + #[max_length = 66] + event_account_address -> Varchar, + event_creation_number -> Int8, + event_sequence_number -> Int8, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 5000] + coin_type -> Varchar, + amount -> Numeric, + #[max_length = 200] + activity_type -> Varchar, + is_gas_fee -> Bool, + is_transaction_success -> Bool, + #[max_length = 1000] + entry_function_id_str -> Nullable, + block_height -> Int8, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + event_index -> Nullable, + #[max_length = 66] + gas_fee_payer_address -> Nullable, + storage_refund_amount -> Numeric, + } +} + +diesel::table! { + coin_balances (transaction_version, owner_address, coin_type_hash) { + transaction_version -> Int8, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 64] + coin_type_hash -> Varchar, + #[max_length = 5000] + coin_type -> Varchar, + amount -> Numeric, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + coin_infos (coin_type_hash) { + #[max_length = 64] + coin_type_hash -> Varchar, + #[max_length = 5000] + coin_type -> Varchar, + transaction_version_created -> Int8, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 32] + name -> Varchar, + #[max_length = 10] + symbol -> Varchar, + decimals -> Int4, + transaction_created_timestamp -> Timestamp, + inserted_at -> Timestamp, + #[max_length = 66] + supply_aggregator_table_handle -> Nullable, + supply_aggregator_table_key -> Nullable, + } +} + +diesel::table! { + coin_supply (transaction_version, coin_type_hash) { + transaction_version -> Int8, + #[max_length = 64] + coin_type_hash -> Varchar, + #[max_length = 5000] + coin_type -> Varchar, + supply -> Numeric, + transaction_timestamp -> Timestamp, + transaction_epoch -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + collection_datas (collection_data_id_hash, transaction_version) { + #[max_length = 64] + collection_data_id_hash -> Varchar, + transaction_version -> Int8, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + description -> Text, + #[max_length = 512] + metadata_uri -> Varchar, + supply -> Numeric, + maximum -> Numeric, + maximum_mutable -> Bool, + uri_mutable -> Bool, + description_mutable -> Bool, + inserted_at -> Timestamp, + #[max_length = 66] + table_handle -> Varchar, + transaction_timestamp -> Timestamp, + } +} + +diesel::table! { + collections_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + collection_id -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + description -> Text, + #[max_length = 512] + uri -> Varchar, + current_supply -> Numeric, + max_supply -> Nullable, + total_minted_v2 -> Nullable, + mutable_description -> Nullable, + mutable_uri -> Nullable, + #[max_length = 66] + table_handle_v1 -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_ans_lookup (domain, subdomain) { + #[max_length = 64] + domain -> Varchar, + #[max_length = 64] + subdomain -> Varchar, + #[max_length = 66] + registered_address -> Nullable, + expiration_timestamp -> Timestamp, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + #[max_length = 140] + token_name -> Varchar, + is_deleted -> Bool, + } +} + +diesel::table! { + current_ans_lookup_v2 (domain, subdomain, token_standard) { + #[max_length = 64] + domain -> Varchar, + #[max_length = 64] + subdomain -> Varchar, + #[max_length = 10] + token_standard -> Varchar, + #[max_length = 140] + token_name -> Nullable, + #[max_length = 66] + registered_address -> Nullable, + expiration_timestamp -> Timestamp, + last_transaction_version -> Int8, + is_deleted -> Bool, + inserted_at -> Timestamp, + subdomain_expiration_policy -> Nullable, + } +} + +diesel::table! { + current_ans_primary_name (registered_address) { + #[max_length = 66] + registered_address -> Varchar, + #[max_length = 64] + domain -> Nullable, + #[max_length = 64] + subdomain -> Nullable, + #[max_length = 140] + token_name -> Nullable, + is_deleted -> Bool, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_ans_primary_name_v2 (registered_address, token_standard) { + #[max_length = 66] + registered_address -> Varchar, + #[max_length = 10] + token_standard -> Varchar, + #[max_length = 64] + domain -> Nullable, + #[max_length = 64] + subdomain -> Nullable, + #[max_length = 140] + token_name -> Nullable, + is_deleted -> Bool, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_coin_balances (owner_address, coin_type_hash) { + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 64] + coin_type_hash -> Varchar, + #[max_length = 5000] + coin_type -> Varchar, + amount -> Numeric, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_collection_datas (collection_data_id_hash) { + #[max_length = 64] + collection_data_id_hash -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + description -> Text, + #[max_length = 512] + metadata_uri -> Varchar, + supply -> Numeric, + maximum -> Numeric, + maximum_mutable -> Bool, + uri_mutable -> Bool, + description_mutable -> Bool, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + #[max_length = 66] + table_handle -> Varchar, + last_transaction_timestamp -> Timestamp, + } +} + +diesel::table! { + current_collections_v2 (collection_id) { + #[max_length = 66] + collection_id -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + description -> Text, + #[max_length = 512] + uri -> Varchar, + current_supply -> Numeric, + max_supply -> Nullable, + total_minted_v2 -> Nullable, + mutable_description -> Nullable, + mutable_uri -> Nullable, + #[max_length = 66] + table_handle_v1 -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_delegated_staking_pool_balances (staking_pool_address) { + #[max_length = 66] + staking_pool_address -> Varchar, + total_coins -> Numeric, + total_shares -> Numeric, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + operator_commission_percentage -> Numeric, + #[max_length = 66] + inactive_table_handle -> Varchar, + #[max_length = 66] + active_table_handle -> Varchar, + } +} + +diesel::table! { + current_delegated_voter (delegation_pool_address, delegator_address) { + #[max_length = 66] + delegation_pool_address -> Varchar, + #[max_length = 66] + delegator_address -> Varchar, + #[max_length = 66] + table_handle -> Nullable, + #[max_length = 66] + voter -> Nullable, + #[max_length = 66] + pending_voter -> Nullable, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_delegator_balances (delegator_address, pool_address, pool_type, table_handle) { + #[max_length = 66] + delegator_address -> Varchar, + #[max_length = 66] + pool_address -> Varchar, + #[max_length = 100] + pool_type -> Varchar, + #[max_length = 66] + table_handle -> Varchar, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + shares -> Numeric, + #[max_length = 66] + parent_table_handle -> Varchar, + } +} + +diesel::table! { + current_fungible_asset_balances (storage_id) { + #[max_length = 66] + storage_id -> Varchar, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 1000] + asset_type -> Varchar, + is_primary -> Bool, + is_frozen -> Bool, + amount -> Numeric, + last_transaction_timestamp -> Timestamp, + last_transaction_version -> Int8, + #[max_length = 10] + token_standard -> Varchar, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_objects (object_address) { + #[max_length = 66] + object_address -> Varchar, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + state_key_hash -> Varchar, + allow_ungated_transfer -> Bool, + last_guid_creation_num -> Numeric, + last_transaction_version -> Int8, + is_deleted -> Bool, + inserted_at -> Timestamp, + untransferrable -> Bool, + } +} + +diesel::table! { + current_staking_pool_voter (staking_pool_address) { + #[max_length = 66] + staking_pool_address -> Varchar, + #[max_length = 66] + voter_address -> Varchar, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + #[max_length = 66] + operator_address -> Varchar, + } +} + +diesel::table! { + current_table_items (table_handle, key_hash) { + #[max_length = 66] + table_handle -> Varchar, + #[max_length = 64] + key_hash -> Varchar, + key -> Text, + decoded_key -> Jsonb, + decoded_value -> Nullable, + is_deleted -> Bool, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_token_datas (token_data_id_hash) { + #[max_length = 64] + token_data_id_hash -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + maximum -> Numeric, + supply -> Numeric, + largest_property_version -> Numeric, + #[max_length = 512] + metadata_uri -> Varchar, + #[max_length = 66] + payee_address -> Varchar, + royalty_points_numerator -> Numeric, + royalty_points_denominator -> Numeric, + maximum_mutable -> Bool, + uri_mutable -> Bool, + description_mutable -> Bool, + properties_mutable -> Bool, + royalty_mutable -> Bool, + default_properties -> Jsonb, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + #[max_length = 64] + collection_data_id_hash -> Varchar, + last_transaction_timestamp -> Timestamp, + description -> Text, + } +} + +diesel::table! { + current_token_datas_v2 (token_data_id) { + #[max_length = 66] + token_data_id -> Varchar, + #[max_length = 66] + collection_id -> Varchar, + #[max_length = 128] + token_name -> Varchar, + maximum -> Nullable, + supply -> Nullable, + largest_property_version_v1 -> Nullable, + #[max_length = 512] + token_uri -> Varchar, + description -> Text, + token_properties -> Jsonb, + #[max_length = 10] + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + decimals -> Nullable, + is_deleted_v2 -> Nullable, + } +} + +diesel::table! { + current_token_ownerships (token_data_id_hash, property_version, owner_address) { + #[max_length = 64] + token_data_id_hash -> Varchar, + property_version -> Numeric, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + amount -> Numeric, + token_properties -> Jsonb, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + #[max_length = 64] + collection_data_id_hash -> Varchar, + table_type -> Text, + last_transaction_timestamp -> Timestamp, + } +} + +diesel::table! { + current_token_ownerships_v2 (token_data_id, property_version_v1, owner_address, storage_id) { + #[max_length = 66] + token_data_id -> Varchar, + property_version_v1 -> Numeric, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + storage_id -> Varchar, + amount -> Numeric, + #[max_length = 66] + table_type_v1 -> Nullable, + token_properties_mutated_v1 -> Nullable, + is_soulbound_v2 -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + non_transferrable_by_owner -> Nullable, + } +} + +diesel::table! { + current_token_pending_claims (token_data_id_hash, property_version, from_address, to_address) { + #[max_length = 64] + token_data_id_hash -> Varchar, + property_version -> Numeric, + #[max_length = 66] + from_address -> Varchar, + #[max_length = 66] + to_address -> Varchar, + #[max_length = 64] + collection_data_id_hash -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + amount -> Numeric, + #[max_length = 66] + table_handle -> Varchar, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + last_transaction_timestamp -> Timestamp, + #[max_length = 66] + token_data_id -> Varchar, + #[max_length = 66] + collection_id -> Varchar, + } +} + +diesel::table! { + current_token_royalty_v1 (token_data_id) { + #[max_length = 66] + token_data_id -> Varchar, + #[max_length = 66] + payee_address -> Varchar, + royalty_points_numerator -> Numeric, + royalty_points_denominator -> Numeric, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_token_v2_metadata (object_address, resource_type) { + #[max_length = 66] + object_address -> Varchar, + #[max_length = 128] + resource_type -> Varchar, + data -> Jsonb, + #[max_length = 66] + state_key_hash -> Varchar, + last_transaction_version -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + current_unified_fungible_asset_balances_to_be_renamed (storage_id) { + #[max_length = 66] + storage_id -> Varchar, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + asset_type_v2 -> Nullable, + #[max_length = 1000] + asset_type_v1 -> Nullable, + is_primary -> Nullable, + is_frozen -> Bool, + amount_v1 -> Nullable, + amount_v2 -> Nullable, + amount -> Nullable, + last_transaction_version_v1 -> Nullable, + last_transaction_version_v2 -> Nullable, + last_transaction_version -> Nullable, + last_transaction_timestamp_v1 -> Nullable, + last_transaction_timestamp_v2 -> Nullable, + last_transaction_timestamp -> Nullable, + inserted_at -> Timestamp, + #[max_length = 1000] + asset_type -> Nullable, + } +} + +diesel::table! { + delegated_staking_activities (transaction_version, event_index) { + transaction_version -> Int8, + event_index -> Int8, + #[max_length = 66] + delegator_address -> Varchar, + #[max_length = 66] + pool_address -> Varchar, + event_type -> Text, + amount -> Numeric, + inserted_at -> Timestamp, + } +} + +diesel::table! { + delegated_staking_pool_balances (transaction_version, staking_pool_address) { + transaction_version -> Int8, + #[max_length = 66] + staking_pool_address -> Varchar, + total_coins -> Numeric, + total_shares -> Numeric, + inserted_at -> Timestamp, + operator_commission_percentage -> Numeric, + #[max_length = 66] + inactive_table_handle -> Varchar, + #[max_length = 66] + active_table_handle -> Varchar, + } +} + +diesel::table! { + delegated_staking_pools (staking_pool_address) { + #[max_length = 66] + staking_pool_address -> Varchar, + first_transaction_version -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + delegator_balances (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + delegator_address -> Varchar, + #[max_length = 66] + pool_address -> Varchar, + #[max_length = 100] + pool_type -> Varchar, + #[max_length = 66] + table_handle -> Varchar, + shares -> Numeric, + #[max_length = 66] + parent_table_handle -> Varchar, + inserted_at -> Timestamp, + } +} + +diesel::table! { + event_size_info (transaction_version, index) { + transaction_version -> Int8, + index -> Int8, + type_tag_bytes -> Int8, + total_bytes -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + events (transaction_version, event_index) { + sequence_number -> Int8, + creation_number -> Int8, + #[max_length = 66] + account_address -> Varchar, + transaction_version -> Int8, + transaction_block_height -> Int8, + #[sql_name = "type"] + type_ -> Text, + data -> Jsonb, + inserted_at -> Timestamp, + event_index -> Int8, + #[max_length = 300] + indexed_type -> Varchar, + } +} + +diesel::table! { + fungible_asset_activities (transaction_version, event_index) { + transaction_version -> Int8, + event_index -> Int8, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + storage_id -> Varchar, + #[max_length = 1000] + asset_type -> Varchar, + is_frozen -> Nullable, + amount -> Nullable, + #[sql_name = "type"] + type_ -> Varchar, + is_gas_fee -> Bool, + #[max_length = 66] + gas_fee_payer_address -> Nullable, + is_transaction_success -> Bool, + #[max_length = 1000] + entry_function_id_str -> Nullable, + block_height -> Int8, + #[max_length = 10] + token_standard -> Varchar, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + storage_refund_amount -> Numeric, + } +} + +diesel::table! { + fungible_asset_balances (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + storage_id -> Varchar, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 1000] + asset_type -> Varchar, + is_primary -> Bool, + is_frozen -> Bool, + amount -> Numeric, + transaction_timestamp -> Timestamp, + #[max_length = 10] + token_standard -> Varchar, + inserted_at -> Timestamp, + } +} + +diesel::table! { + fungible_asset_metadata (asset_type) { + #[max_length = 1000] + asset_type -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 32] + name -> Varchar, + #[max_length = 10] + symbol -> Varchar, + decimals -> Int4, + #[max_length = 512] + icon_uri -> Nullable, + #[max_length = 512] + project_uri -> Nullable, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + #[max_length = 66] + supply_aggregator_table_handle_v1 -> Nullable, + supply_aggregator_table_key_v1 -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + inserted_at -> Timestamp, + is_token_v2 -> Nullable, + supply_v2 -> Nullable, + maximum_v2 -> Nullable, + } +} + +diesel::table! { + indexer_status (db) { + #[max_length = 50] + db -> Varchar, + is_indexer_up -> Bool, + inserted_at -> Timestamp, + } +} + +diesel::table! { + ledger_infos (chain_id) { + chain_id -> Int8, + } +} + +diesel::table! { + move_modules (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + transaction_block_height -> Int8, + name -> Text, + #[max_length = 66] + address -> Varchar, + bytecode -> Nullable, + friends -> Nullable, + exposed_functions -> Nullable, + structs -> Nullable, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + +diesel::table! { + move_resources (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + transaction_block_height -> Int8, + name -> Text, + #[max_length = 66] + address -> Varchar, + #[sql_name = "type"] + type_ -> Text, + module -> Text, + generic_type_params -> Nullable, + data -> Nullable, + is_deleted -> Bool, + inserted_at -> Timestamp, + #[max_length = 66] + state_key_hash -> Varchar, + } +} + +diesel::table! { + nft_points (transaction_version) { + transaction_version -> Int8, + #[max_length = 66] + owner_address -> Varchar, + token_name -> Text, + point_type -> Text, + amount -> Numeric, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + objects (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + object_address -> Varchar, + #[max_length = 66] + owner_address -> Varchar, + #[max_length = 66] + state_key_hash -> Varchar, + guid_creation_num -> Numeric, + allow_ungated_transfer -> Bool, + is_deleted -> Bool, + inserted_at -> Timestamp, + untransferrable -> Bool, + } +} + +diesel::table! { + processor_status (processor) { + #[max_length = 50] + processor -> Varchar, + last_success_version -> Int8, + last_updated -> Timestamp, + last_transaction_timestamp -> Nullable, + } +} + +diesel::table! { + proposal_votes (transaction_version, proposal_id, voter_address) { + transaction_version -> Int8, + proposal_id -> Int8, + #[max_length = 66] + voter_address -> Varchar, + #[max_length = 66] + staking_pool_address -> Varchar, + num_votes -> Numeric, + should_pass -> Bool, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + signatures (transaction_version, multi_agent_index, multi_sig_index, is_sender_primary) { + transaction_version -> Int8, + multi_agent_index -> Int8, + multi_sig_index -> Int8, + transaction_block_height -> Int8, + #[max_length = 66] + signer -> Varchar, + is_sender_primary -> Bool, + #[sql_name = "type"] + type_ -> Varchar, + #[max_length = 136] + public_key -> Varchar, + signature -> Text, + threshold -> Int8, + public_key_indices -> Jsonb, + inserted_at -> Timestamp, + } +} + +diesel::table! { + spam_assets (asset) { + #[max_length = 1100] + asset -> Varchar, + is_spam -> Bool, + last_updated -> Timestamp, + } +} + +diesel::table! { + table_items (transaction_version, write_set_change_index) { + key -> Text, + transaction_version -> Int8, + write_set_change_index -> Int8, + transaction_block_height -> Int8, + #[max_length = 66] + table_handle -> Varchar, + decoded_key -> Jsonb, + decoded_value -> Nullable, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + +diesel::table! { + table_metadatas (handle) { + #[max_length = 66] + handle -> Varchar, + key_type -> Text, + value_type -> Text, + inserted_at -> Timestamp, + } +} + +diesel::table! { + token_activities (transaction_version, event_account_address, event_creation_number, event_sequence_number) { + transaction_version -> Int8, + #[max_length = 66] + event_account_address -> Varchar, + event_creation_number -> Int8, + event_sequence_number -> Int8, + #[max_length = 64] + collection_data_id_hash -> Varchar, + #[max_length = 64] + token_data_id_hash -> Varchar, + property_version -> Numeric, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + #[max_length = 50] + transfer_type -> Varchar, + #[max_length = 66] + from_address -> Nullable, + #[max_length = 66] + to_address -> Nullable, + token_amount -> Numeric, + coin_type -> Nullable, + coin_amount -> Nullable, + inserted_at -> Timestamp, + transaction_timestamp -> Timestamp, + event_index -> Nullable, + } +} + +diesel::table! { + token_activities_v2 (transaction_version, event_index) { + transaction_version -> Int8, + event_index -> Int8, + #[max_length = 66] + event_account_address -> Varchar, + #[max_length = 66] + token_data_id -> Varchar, + property_version_v1 -> Numeric, + #[sql_name = "type"] + type_ -> Varchar, + #[max_length = 66] + from_address -> Nullable, + #[max_length = 66] + to_address -> Nullable, + token_amount -> Numeric, + before_value -> Nullable, + after_value -> Nullable, + #[max_length = 1000] + entry_function_id_str -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + +diesel::table! { + token_datas (token_data_id_hash, transaction_version) { + #[max_length = 64] + token_data_id_hash -> Varchar, + transaction_version -> Int8, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + maximum -> Numeric, + supply -> Numeric, + largest_property_version -> Numeric, + #[max_length = 512] + metadata_uri -> Varchar, + #[max_length = 66] + payee_address -> Varchar, + royalty_points_numerator -> Numeric, + royalty_points_denominator -> Numeric, + maximum_mutable -> Bool, + uri_mutable -> Bool, + description_mutable -> Bool, + properties_mutable -> Bool, + royalty_mutable -> Bool, + default_properties -> Jsonb, + inserted_at -> Timestamp, + #[max_length = 64] + collection_data_id_hash -> Varchar, + transaction_timestamp -> Timestamp, + description -> Text, + } +} + +diesel::table! { + token_datas_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + token_data_id -> Varchar, + #[max_length = 66] + collection_id -> Varchar, + #[max_length = 128] + token_name -> Varchar, + maximum -> Nullable, + supply -> Nullable, + largest_property_version_v1 -> Nullable, + #[max_length = 512] + token_uri -> Varchar, + token_properties -> Jsonb, + description -> Text, + #[max_length = 10] + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + decimals -> Nullable, + is_deleted_v2 -> Nullable, + } +} + +diesel::table! { + token_ownerships (token_data_id_hash, property_version, transaction_version, table_handle) { + #[max_length = 64] + token_data_id_hash -> Varchar, + property_version -> Numeric, + transaction_version -> Int8, + #[max_length = 66] + table_handle -> Varchar, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + #[max_length = 66] + owner_address -> Nullable, + amount -> Numeric, + table_type -> Nullable, + inserted_at -> Timestamp, + #[max_length = 64] + collection_data_id_hash -> Varchar, + transaction_timestamp -> Timestamp, + } +} + +diesel::table! { + token_ownerships_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + #[max_length = 66] + token_data_id -> Varchar, + property_version_v1 -> Numeric, + #[max_length = 66] + owner_address -> Nullable, + #[max_length = 66] + storage_id -> Varchar, + amount -> Numeric, + #[max_length = 66] + table_type_v1 -> Nullable, + token_properties_mutated_v1 -> Nullable, + is_soulbound_v2 -> Nullable, + #[max_length = 10] + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + non_transferrable_by_owner -> Nullable, + } +} + +diesel::table! { + tokens (token_data_id_hash, property_version, transaction_version) { + #[max_length = 64] + token_data_id_hash -> Varchar, + property_version -> Numeric, + transaction_version -> Int8, + #[max_length = 66] + creator_address -> Varchar, + #[max_length = 128] + collection_name -> Varchar, + #[max_length = 128] + name -> Varchar, + token_properties -> Jsonb, + inserted_at -> Timestamp, + #[max_length = 64] + collection_data_id_hash -> Varchar, + transaction_timestamp -> Timestamp, + } +} + +diesel::table! { + transaction_size_info (transaction_version) { + transaction_version -> Int8, + size_bytes -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::table! { + transactions (version) { + version -> Int8, + block_height -> Int8, + #[max_length = 66] + hash -> Varchar, + #[sql_name = "type"] + type_ -> Varchar, + payload -> Nullable, + #[max_length = 66] + state_change_hash -> Varchar, + #[max_length = 66] + event_root_hash -> Varchar, + #[max_length = 66] + state_checkpoint_hash -> Nullable, + gas_used -> Numeric, + success -> Bool, + vm_status -> Text, + #[max_length = 66] + accumulator_root_hash -> Varchar, + num_events -> Int8, + num_write_set_changes -> Int8, + inserted_at -> Timestamp, + epoch -> Int8, + #[max_length = 50] + payload_type -> Nullable, + } +} + +diesel::table! { + user_transactions (version) { + version -> Int8, + block_height -> Int8, + #[max_length = 50] + parent_signature_type -> Varchar, + #[max_length = 66] + sender -> Varchar, + sequence_number -> Int8, + max_gas_amount -> Numeric, + expiration_timestamp_secs -> Timestamp, + gas_unit_price -> Numeric, + timestamp -> Timestamp, + #[max_length = 1000] + entry_function_id_str -> Varchar, + inserted_at -> Timestamp, + epoch -> Int8, + } +} + +diesel::table! { + write_set_changes (transaction_version, index) { + transaction_version -> Int8, + index -> Int8, + #[max_length = 66] + hash -> Varchar, + transaction_block_height -> Int8, + #[sql_name = "type"] + type_ -> Text, + #[max_length = 66] + address -> Varchar, + inserted_at -> Timestamp, + } +} + +diesel::table! { + write_set_size_info (transaction_version, index) { + transaction_version -> Int8, + index -> Int8, + key_bytes -> Int8, + value_bytes -> Int8, + inserted_at -> Timestamp, + } +} + +diesel::allow_tables_to_appear_in_same_query!( + account_transactions, + ans_lookup, + ans_lookup_v2, + ans_primary_name, + ans_primary_name_v2, + block_metadata_transactions, + coin_activities, + coin_balances, + coin_infos, + coin_supply, + collection_datas, + collections_v2, + current_ans_lookup, + current_ans_lookup_v2, + current_ans_primary_name, + current_ans_primary_name_v2, + current_coin_balances, + current_collection_datas, + current_collections_v2, + current_delegated_staking_pool_balances, + current_delegated_voter, + current_delegator_balances, + current_fungible_asset_balances, + current_objects, + current_staking_pool_voter, + current_table_items, + current_token_datas, + current_token_datas_v2, + current_token_ownerships, + current_token_ownerships_v2, + current_token_pending_claims, + current_token_royalty_v1, + current_token_v2_metadata, + current_unified_fungible_asset_balances_to_be_renamed, + delegated_staking_activities, + delegated_staking_pool_balances, + delegated_staking_pools, + delegator_balances, + event_size_info, + events, + fungible_asset_activities, + fungible_asset_balances, + fungible_asset_metadata, + indexer_status, + ledger_infos, + move_modules, + move_resources, + nft_points, + objects, + processor_status, + proposal_votes, + signatures, + spam_assets, + table_items, + table_metadatas, + token_activities, + token_activities_v2, + token_datas, + token_datas_v2, + token_ownerships, + token_ownerships_v2, + tokens, + transaction_size_info, + transactions, + user_transactions, + write_set_changes, + write_set_size_info, +); diff --git a/rust/processor/src/gap_detectors/gap_detector.rs b/rust/processor/src/gap_detectors/gap_detector.rs new file mode 100644 index 000000000..c5707f798 --- /dev/null +++ b/rust/processor/src/gap_detectors/gap_detector.rs @@ -0,0 +1,144 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + gap_detectors::{GapDetectorResult, ProcessingResult}, + processors::DefaultProcessingResult, +}; +use ahash::AHashMap; + +pub trait GapDetectorTrait { + fn process_versions(&mut self, result: ProcessingResult) -> anyhow::Result; +} + +pub struct DefaultGapDetector { + next_version_to_process: u64, + seen_versions: AHashMap, + last_success_batch: Option, +} + +pub struct DefaultGapDetectorResult { + pub next_version_to_process: u64, + pub num_gaps: u64, + pub last_success_batch: Option, +} + +impl GapDetectorTrait for DefaultGapDetector { + fn process_versions(&mut self, result: ProcessingResult) -> anyhow::Result { + match result { + ProcessingResult::DefaultProcessingResult(result) => { + // Check for gaps + if self.next_version_to_process != result.start_version { + self.seen_versions.insert(result.start_version, result); + tracing::debug!("Gap detected"); + } else { + // If no gap is detected, find the latest processed batch without gaps + self.update_prev_batch(result); + tracing::debug!("No gap detected"); + } + + Ok(GapDetectorResult::DefaultGapDetectorResult( + DefaultGapDetectorResult { + next_version_to_process: self.next_version_to_process, + num_gaps: self.seen_versions.len() as u64, + last_success_batch: self.last_success_batch.clone(), + }, + )) + }, + _ => { + panic!("Invalid result type"); + }, + } + } +} + +impl DefaultGapDetector { + pub fn new(starting_version: u64) -> Self { + Self { + next_version_to_process: starting_version, + seen_versions: AHashMap::new(), + last_success_batch: None, + } + } + + fn update_prev_batch(&mut self, result: DefaultProcessingResult) { + let mut new_prev_batch = result; + while let Some(next_version) = self.seen_versions.remove(&(new_prev_batch.end_version + 1)) + { + new_prev_batch = next_version; + } + self.next_version_to_process = new_prev_batch.end_version + 1; + self.last_success_batch = Some(new_prev_batch); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::gap_detectors::DEFAULT_GAP_DETECTION_BATCH_SIZE; + + #[tokio::test] + async fn detect_gap_test() { + let starting_version = 0; + let mut default_gap_detector = DefaultGapDetector::new(starting_version); + + // Processing batches with gaps + for i in 0..DEFAULT_GAP_DETECTION_BATCH_SIZE { + let result = DefaultProcessingResult { + start_version: 100 + i * 100, + end_version: 199 + i * 100, + last_transaction_timestamp: None, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + }; + let default_gap_detector_result = default_gap_detector + .process_versions(ProcessingResult::DefaultProcessingResult(result)) + .unwrap(); + let default_gap_detector_result = match default_gap_detector_result { + GapDetectorResult::DefaultGapDetectorResult(res) => res, + _ => panic!("Invalid result type"), + }; + + assert_eq!(default_gap_detector_result.num_gaps, i + 1); + assert_eq!(default_gap_detector_result.next_version_to_process, 0); + assert_eq!(default_gap_detector_result.last_success_batch, None); + } + + // Process a batch without a gap + let default_gap_detector_result = default_gap_detector + .process_versions(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version: 0, + end_version: 99, + last_transaction_timestamp: None, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + }, + )) + .unwrap(); + let default_gap_detector_result = match default_gap_detector_result { + GapDetectorResult::DefaultGapDetectorResult(res) => res, + _ => panic!("Invalid result type"), + }; + assert_eq!(default_gap_detector_result.num_gaps, 0); + assert_eq!( + default_gap_detector_result.next_version_to_process, + 100 + (DEFAULT_GAP_DETECTION_BATCH_SIZE) * 100 + ); + assert_eq!( + default_gap_detector_result + .last_success_batch + .clone() + .unwrap() + .start_version, + 100 + (DEFAULT_GAP_DETECTION_BATCH_SIZE - 1) * 100 + ); + assert_eq!( + default_gap_detector_result + .last_success_batch + .unwrap() + .end_version, + 199 + (DEFAULT_GAP_DETECTION_BATCH_SIZE - 1) * 100 + ); + } +} diff --git a/rust/processor/src/gap_detectors/mod.rs b/rust/processor/src/gap_detectors/mod.rs new file mode 100644 index 000000000..022c56812 --- /dev/null +++ b/rust/processor/src/gap_detectors/mod.rs @@ -0,0 +1,178 @@ +use crate::{ + gap_detectors::{ + gap_detector::{DefaultGapDetector, DefaultGapDetectorResult, GapDetectorTrait}, + parquet_gap_detector::{ParquetFileGapDetector, ParquetFileGapDetectorResult}, + }, + parquet_processors::ParquetProcessingResult, + processors::{DefaultProcessingResult, Processor, ProcessorTrait}, + utils::counters::{PARQUET_PROCESSOR_DATA_GAP_COUNT, PROCESSOR_DATA_GAP_COUNT}, + worker::PROCESSOR_SERVICE_TYPE, +}; +use enum_dispatch::enum_dispatch; +use kanal::AsyncReceiver; +use tracing::{error, info}; + +pub mod gap_detector; +pub mod parquet_gap_detector; + +// Size of a gap (in txn version) before gap detected +pub const DEFAULT_GAP_DETECTION_BATCH_SIZE: u64 = 500; +// Number of seconds between each processor status update +const UPDATE_PROCESSOR_STATUS_SECS: u64 = 1; + +#[enum_dispatch(GapDetectorTrait)] +pub enum GapDetector { + DefaultGapDetector, + ParquetFileGapDetector, +} + +#[enum_dispatch(GapDetectorTrait)] +pub enum GapDetectorResult { + DefaultGapDetectorResult, + ParquetFileGapDetectorResult, +} +pub enum ProcessingResult { + DefaultProcessingResult(DefaultProcessingResult), + ParquetProcessingResult(ParquetProcessingResult), +} + +pub async fn create_gap_detector_status_tracker_loop( + gap_detector_receiver: AsyncReceiver, + processor: Processor, + starting_version: u64, + gap_detection_batch_size: u64, +) { + let processor_name = processor.name(); + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[Parser] Starting gap detector task", + ); + + loop { + match gap_detector_receiver.recv().await { + Ok(ProcessingResult::DefaultProcessingResult(result)) => { + let mut gap_detector = DefaultGapDetector::new(starting_version); + let last_update_time = std::time::Instant::now(); + match gap_detector + .process_versions(ProcessingResult::DefaultProcessingResult(result)) + { + Ok(res) => { + match res { + GapDetectorResult::DefaultGapDetectorResult(res) => { + PROCESSOR_DATA_GAP_COUNT + .with_label_values(&[processor_name]) + .set(res.num_gaps as i64); + if res.num_gaps >= gap_detection_batch_size { + tracing::debug!( + processor_name, + gap_start_version = res.next_version_to_process, + num_gaps = res.num_gaps, + "[Parser] Processed {gap_detection_batch_size} batches with a gap", + ); + // We don't panic as everything downstream will panic if it doesn't work/receive + } + if let Some(res_last_success_batch) = res.last_success_batch { + if last_update_time.elapsed().as_secs() + >= UPDATE_PROCESSOR_STATUS_SECS + { + processor + .update_last_processed_version( + res_last_success_batch.end_version, + res_last_success_batch + .last_transaction_timestamp + .clone(), + ) + .await + .unwrap(); + } + } + }, + _ => { + panic!("Invalid result type"); + }, + } + }, + Err(e) => { + error!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + error = ?e, + "[Parser] Gap detector task has panicked" + ); + panic!("[Parser] Gap detector task has panicked: {:?}", e); + }, + } + }, + Ok(ProcessingResult::ParquetProcessingResult(result)) => { + info!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[ParquetGapDetector] received parquet gap detector task", + ); + let mut parquet_gap_detector = ParquetFileGapDetector::new(starting_version); + let last_update_time = std::time::Instant::now(); + match parquet_gap_detector + .process_versions(ProcessingResult::ParquetProcessingResult(result)) + { + Ok(res) => { + match res { + GapDetectorResult::ParquetFileGapDetectorResult(res) => { + PARQUET_PROCESSOR_DATA_GAP_COUNT + .with_label_values(&[processor_name]) + .set(res.num_gaps as i64); + // we need a new gap detection batch size + if res.num_gaps >= gap_detection_batch_size { + tracing::debug!( + processor_name, + gap_start_version = res.next_version_to_process, + num_gaps = res.num_gaps, + "[Parser] Processed {gap_detection_batch_size} batches with a gap", + ); + // We don't panic as everything downstream will panic if it doesn't work/receive + } + + if let Some(res_last_success_batch) = res.last_success_batch { + if last_update_time.elapsed().as_secs() + >= UPDATE_PROCESSOR_STATUS_SECS + { + processor + .update_last_processed_version( + res_last_success_batch.end_version as u64, + res_last_success_batch + .last_transaction_timestamp + .clone(), + ) + .await + .unwrap(); + } + } + }, + _ => { + panic!("Invalid result type"); + }, + } + }, + Err(e) => { + error!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + error = ?e, + "[Parser] Gap detector task has panicked" + ); + panic!("[Parser] Gap detector task has panicked: {:?}", e); + }, + } + }, + Err(e) => { + info!( + processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + error = ?e, + "[Parser] Gap detector channel has been closed", + ); + return; + }, + }; + } +} diff --git a/rust/processor/src/gap_detectors/parquet_gap_detector.rs b/rust/processor/src/gap_detectors/parquet_gap_detector.rs new file mode 100644 index 000000000..808deb892 --- /dev/null +++ b/rust/processor/src/gap_detectors/parquet_gap_detector.rs @@ -0,0 +1,95 @@ +// // Copyright © Aptos Foundation +// // SPDX-License-Identifier: Apache-2.0 + +use crate::{ + gap_detectors::{gap_detector::GapDetectorTrait, GapDetectorResult, ProcessingResult}, + parquet_processors::ParquetProcessingResult, +}; +use ahash::AHashMap; +use tracing::{debug, info}; + +pub struct ParquetFileGapDetector { + next_version_to_process: i64, + seen_versions: AHashMap, + last_success_batch: Option, + version_counters: AHashMap, +} + +pub struct ParquetFileGapDetectorResult { + pub next_version_to_process: u64, + pub num_gaps: u64, + pub last_success_batch: Option, +} + +impl ParquetFileGapDetector { + pub fn new(starting_version: u64) -> Self { + Self { + next_version_to_process: starting_version as i64, + seen_versions: AHashMap::new(), + last_success_batch: None, + version_counters: AHashMap::new(), + } + } +} +impl GapDetectorTrait for ParquetFileGapDetector { + fn process_versions(&mut self, result: ProcessingResult) -> anyhow::Result { + // Update counts of structures for each transaction version + let result = match result { + ProcessingResult::ParquetProcessingResult(r) => r, + _ => panic!("Invalid result type"), + }; + for (version, count) in result.txn_version_to_struct_count.iter() { + if !self.version_counters.contains_key(version) { + // info!("Inserting version {} with count {} into parquet gap detector", version, count); + self.version_counters.insert(*version, *count); + } + + *self.version_counters.entry(*version).or_default() -= 1; + } + + // Update next version to process and move forward + let mut current_version = result.start_version; + while current_version <= result.end_version { + match self.version_counters.get_mut(¤t_version) { + Some(count) => { + if *count == 0 && current_version == self.next_version_to_process { + while let Some(&count) = + self.version_counters.get(&self.next_version_to_process) + { + if count == 0 { + self.version_counters.remove(&self.next_version_to_process); // Remove the fully processed version + self.next_version_to_process += 1; // Increment to the next version + info!("Version {} fully processed. Next version to process updated to {}", self.next_version_to_process - 1, self.next_version_to_process); + } else { + break; + } + } + } + }, + None => { + // TODO: validate this that we shouldn't reach this b/c we already added default count. + // or it could mean that we have duplicates. + debug!("No struct count found for version {}", current_version); + }, + } + current_version += 1; // Move to the next version in sequence + } + + if current_version == result.end_version { + debug!("No gap detected"); + } else { + self.seen_versions.insert(current_version, result); + debug!("Gap detected"); + } + + Ok(GapDetectorResult::ParquetFileGapDetectorResult( + ParquetFileGapDetectorResult { + next_version_to_process: self.next_version_to_process as u64, + num_gaps: self.seen_versions.len() as u64, + last_success_batch: self.last_success_batch.clone(), + }, + )) + } +} + +// TODO: add tests diff --git a/rust/processor/src/grpc_stream.rs b/rust/processor/src/grpc_stream.rs new file mode 100644 index 000000000..e1674dcd0 --- /dev/null +++ b/rust/processor/src/grpc_stream.rs @@ -0,0 +1,694 @@ +use crate::utils::{ + counters::{ + ProcessorStep, FETCHER_THREAD_CHANNEL_SIZE, LATEST_PROCESSED_VERSION, + NUM_TRANSACTIONS_FILTERED_OUT_COUNT, NUM_TRANSACTIONS_PROCESSED_COUNT, + PROCESSED_BYTES_COUNT, TRANSACTION_UNIX_TIMESTAMP, + }, + util::{timestamp_to_iso, timestamp_to_unixtime}, +}; +use aptos_moving_average::MovingAverage; +use aptos_protos::{ + indexer::v1::{raw_data_client::RawDataClient, GetTransactionsRequest, TransactionsResponse}, + transaction::v1::Transaction, + util::timestamp::Timestamp, +}; +use bigdecimal::Zero; +use futures_util::StreamExt; +use itertools::Itertools; +use kanal::AsyncSender; +use prost::Message; +use std::time::Duration; +use tokio::time::timeout; +use tonic::{Response, Streaming}; +use tracing::{debug, error, info}; +use url::Url; + +/// GRPC request metadata key for the token ID. +const GRPC_API_GATEWAY_API_KEY_HEADER: &str = "authorization"; +/// GRPC request metadata key for the request name. This is used to identify the +/// data destination. +const GRPC_REQUEST_NAME_HEADER: &str = "x-aptos-request-name"; +/// GRPC connection id +const GRPC_CONNECTION_ID: &str = "x-aptos-connection-id"; +/// We will try to reconnect to GRPC 5 times in case upstream connection is being updated +pub const RECONNECTION_MAX_RETRIES: u64 = 5; +/// 256MB +pub const MAX_RESPONSE_SIZE: usize = 1024 * 1024 * 256; + +#[derive(Clone)] +pub struct TransactionsPBResponse { + pub transactions: Vec, + pub chain_id: u64, + // We put start/end versions here as filtering means there are potential "gaps" here now + pub start_version: u64, + pub end_version: u64, + pub start_txn_timestamp: Option, + pub end_txn_timestamp: Option, + pub size_in_bytes: u64, +} + +pub fn grpc_request_builder( + starting_version: u64, + transactions_count: Option, + grpc_auth_token: String, + processor_name: String, +) -> tonic::Request { + let mut request = tonic::Request::new(GetTransactionsRequest { + starting_version: Some(starting_version), + transactions_count, + ..GetTransactionsRequest::default() + }); + request.metadata_mut().insert( + GRPC_API_GATEWAY_API_KEY_HEADER, + format!("Bearer {}", grpc_auth_token.clone()) + .parse() + .unwrap(), + ); + request + .metadata_mut() + .insert(GRPC_REQUEST_NAME_HEADER, processor_name.parse().unwrap()); + request +} + +pub async fn get_stream( + indexer_grpc_data_service_address: Url, + indexer_grpc_http2_ping_interval: Duration, + indexer_grpc_http2_ping_timeout: Duration, + indexer_grpc_reconnection_timeout_secs: Duration, + starting_version: u64, + ending_version: Option, + auth_token: String, + processor_name: String, +) -> Response> { + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = ending_version, + "[Parser] Setting up rpc channel" + ); + + let channel = tonic::transport::Channel::from_shared( + indexer_grpc_data_service_address.to_string(), + ) + .expect( + "[Parser] Failed to build GRPC channel, perhaps because the data service URL is invalid", + ) + .http2_keep_alive_interval(indexer_grpc_http2_ping_interval) + .keep_alive_timeout(indexer_grpc_http2_ping_timeout); + + // If the scheme is https, add a TLS config. + let channel = if indexer_grpc_data_service_address.scheme() == "https" { + let config = tonic::transport::channel::ClientTlsConfig::new(); + channel + .tls_config(config) + .expect("[Parser] Failed to create TLS config") + } else { + channel + }; + + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = ending_version, + "[Parser] Setting up GRPC client" + ); + + // TODO: move this to a config file + // Retry this connection a few times before giving up + let mut connect_retries = 0; + let connect_res = loop { + let res = timeout( + indexer_grpc_reconnection_timeout_secs, + RawDataClient::connect(channel.clone()), + ) + .await; + match res { + Ok(client) => break Ok(client), + Err(e) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = ending_version, + retries = connect_retries, + error = ?e, + "[Parser] Error connecting to GRPC client" + ); + connect_retries += 1; + if connect_retries >= RECONNECTION_MAX_RETRIES { + break Err(e); + } + }, + } + } + .expect("[Parser] Timeout connecting to GRPC server"); + + let mut rpc_client = match connect_res { + Ok(client) => client + .accept_compressed(tonic::codec::CompressionEncoding::Gzip) + .accept_compressed(tonic::codec::CompressionEncoding::Zstd) + .send_compressed(tonic::codec::CompressionEncoding::Zstd) + .max_decoding_message_size(MAX_RESPONSE_SIZE) + .max_encoding_message_size(MAX_RESPONSE_SIZE), + Err(e) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + ending_version = ending_version, + error = ?e, + "[Parser] Error connecting to GRPC client" + ); + panic!("[Parser] Error connecting to GRPC client"); + }, + }; + let count = ending_version.map(|v| (v as i64 - starting_version as i64 + 1) as u64); + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = ending_version, + num_of_transactions = ?count, + "[Parser] Setting up GRPC stream", + ); + + // TODO: move this to a config file + // Retry this connection a few times before giving up + let mut connect_retries = 0; + let stream_res = loop { + let timeout_res = timeout(indexer_grpc_reconnection_timeout_secs, async { + let request = grpc_request_builder( + starting_version, + count, + auth_token.clone(), + processor_name.clone(), + ); + rpc_client.get_transactions(request).await + }) + .await; + match timeout_res { + Ok(client) => break Ok(client), + Err(e) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = ending_version, + retries = connect_retries, + error = ?e, + "[Parser] Timeout making grpc request. Retrying...", + ); + connect_retries += 1; + if connect_retries >= RECONNECTION_MAX_RETRIES { + break Err(e); + } + }, + } + } + .expect("[Parser] Timed out making grpc request after max retries."); + + match stream_res { + Ok(stream) => stream, + Err(e) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + ending_version = ending_version, + error = ?e, + "[Parser] Failed to get grpc response. Is the server running?" + ); + panic!("[Parser] Failed to get grpc response. Is the server running?"); + }, + } +} + +pub async fn get_chain_id( + indexer_grpc_data_service_address: Url, + indexer_grpc_http2_ping_interval: Duration, + indexer_grpc_http2_ping_timeout: Duration, + indexer_grpc_reconnection_timeout_secs: Duration, + auth_token: String, + processor_name: String, +) -> u64 { + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + "[Parser] Connecting to GRPC stream to get chain id", + ); + let response = get_stream( + indexer_grpc_data_service_address.clone(), + indexer_grpc_http2_ping_interval, + indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, + 1, + Some(2), + auth_token.clone(), + processor_name.to_string(), + ) + .await; + let connection_id = match response.metadata().get(GRPC_CONNECTION_ID) { + Some(connection_id) => connection_id.to_str().unwrap().to_string(), + None => "".to_string(), + }; + let mut resp_stream = response.into_inner(); + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + "[Parser] Successfully connected to GRPC stream to get chain id", + ); + + match resp_stream.next().await { + Some(Ok(r)) => r.chain_id.expect("[Parser] Chain Id doesn't exist."), + Some(Err(rpc_error)) => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + error = ?rpc_error, + "[Parser] Error receiving datastream response for chain id" + ); + panic!("[Parser] Error receiving datastream response for chain id"); + }, + None => { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + "[Parser] Stream ended before getting response fo for chain id" + ); + panic!("[Parser] Stream ended before getting response fo for chain id"); + }, + } +} + +/// Gets a batch of transactions from the stream. Batch size is set in the grpc server. +/// The number of batches depends on our config +/// There could be several special scenarios: +/// 1. If we lose the connection, we will try reconnecting X times within Y seconds before crashing. +/// 2. If we specified an end version and we hit that, we will stop fetching, but we will make sure that +/// all existing transactions are processed +pub async fn create_fetcher_loop( + txn_sender: AsyncSender, + indexer_grpc_data_service_address: Url, + indexer_grpc_http2_ping_interval: Duration, + indexer_grpc_http2_ping_timeout: Duration, + indexer_grpc_reconnection_timeout_secs: Duration, + indexer_grpc_response_item_timeout_secs: Duration, + starting_version: u64, + request_ending_version: Option, + auth_token: String, + processor_name: String, + transaction_filter: crate::transaction_filter::TransactionFilter, + // The number of transactions per protobuf batch + pb_channel_txn_chunk_size: usize, +) { + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + start_version = starting_version, + end_version = request_ending_version, + "[Parser] Connecting to GRPC stream", + ); + let mut response = get_stream( + indexer_grpc_data_service_address.clone(), + indexer_grpc_http2_ping_interval, + indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, + starting_version, + request_ending_version, + auth_token.clone(), + processor_name.to_string(), + ) + .await; + let mut connection_id = match response.metadata().get(GRPC_CONNECTION_ID) { + Some(connection_id) => connection_id.to_str().unwrap().to_string(), + None => "".to_string(), + }; + let mut resp_stream = response.into_inner(); + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version = starting_version, + end_version = request_ending_version, + "[Parser] Successfully connected to GRPC stream", + ); + + let mut grpc_channel_recv_latency = std::time::Instant::now(); + let mut next_version_to_fetch = starting_version; + let mut reconnection_retries = 0; + let mut last_fetched_version = starting_version as i64 - 1; + let mut fetch_ma = MovingAverage::new(3000); + let mut send_ma = MovingAverage::new(3000); + + loop { + let is_success = match tokio::time::timeout( + indexer_grpc_response_item_timeout_secs, + resp_stream.next(), + ) + .await + { + // Received datastream response + Ok(response) => { + match response { + Some(Ok(mut r)) => { + reconnection_retries = 0; + let start_version = r.transactions.as_slice().first().unwrap().version; + let start_txn_timestamp = + r.transactions.as_slice().first().unwrap().timestamp.clone(); + let end_version = r.transactions.as_slice().last().unwrap().version; + let end_txn_timestamp = + r.transactions.as_slice().last().unwrap().timestamp.clone(); + + next_version_to_fetch = end_version + 1; + + let size_in_bytes = r.encoded_len() as u64; + let chain_id: u64 = r.chain_id.expect("[Parser] Chain Id doesn't exist."); + let num_txns = r.transactions.len(); + let duration_in_secs = grpc_channel_recv_latency.elapsed().as_secs_f64(); + fetch_ma.tick_now(num_txns as u64); + + let num_txns = r.transactions.len(); + + // Filter out the txns we don't care about + r.transactions.retain(|txn| transaction_filter.include(txn)); + + let num_txn_post_filter = r.transactions.len(); + let num_filtered_txns = num_txns - num_txn_post_filter; + let step = ProcessorStep::ReceivedTxnsFromGrpc.get_step(); + let label = ProcessorStep::ReceivedTxnsFromGrpc.get_label(); + + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version, + end_version, + start_txn_timestamp_iso = start_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(), + end_txn_timestamp_iso = end_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(), + num_of_transactions = end_version - start_version + 1, + num_filtered_txns, + channel_size = txn_sender.len(), + size_in_bytes, + duration_in_secs, + tps = fetch_ma.avg().ceil() as u64, + bytes_per_sec = size_in_bytes as f64 / duration_in_secs, + step, + "{}", + label, + ); + + if last_fetched_version + 1 != start_version as i64 { + error!( + batch_start_version = last_fetched_version + 1, + last_fetched_version, + current_fetched_version = start_version, + "[Parser] Received batch with gap from GRPC stream" + ); + panic!("[Parser] Received batch with gap from GRPC stream"); + } + last_fetched_version = end_version as i64; + + LATEST_PROCESSED_VERSION + .with_label_values(&[&processor_name, step, label, "-"]) + .set(end_version as i64); + TRANSACTION_UNIX_TIMESTAMP + .with_label_values(&[&processor_name, step, label, "-"]) + .set( + start_txn_timestamp + .as_ref() + .map(timestamp_to_unixtime) + .unwrap_or_default(), + ); + PROCESSED_BYTES_COUNT + .with_label_values(&[&processor_name, step, label, "-"]) + .inc_by(size_in_bytes); + NUM_TRANSACTIONS_PROCESSED_COUNT + .with_label_values(&[&processor_name, step, label, "-"]) + .inc_by(end_version - start_version + 1); + + let txn_channel_send_latency = std::time::Instant::now(); + + //potentially break txn_pb into many `TransactionsPBResponse` that are each `pb_channel_txn_chunk_size` txns max in size + if num_txn_post_filter < pb_channel_txn_chunk_size { + // We only need to send one; avoid the chunk/clone + let txn_pb = TransactionsPBResponse { + transactions: r.transactions, + chain_id, + start_version, + end_version, + start_txn_timestamp, + end_txn_timestamp, + size_in_bytes, + }; + + match txn_sender.send(txn_pb).await { + Ok(()) => {}, + Err(e) => { + error!( + processor_name = processor_name, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + error = ?e, + "[Parser] Error sending GRPC response to channel." + ); + panic!("[Parser] Error sending GRPC response to channel.") + }, + } + } else { + // We are breaking down a big batch into small batches; this involves an iterator + let average_size_in_bytes = size_in_bytes / num_txns as u64; + + let pb_txn_chunks: Vec> = r + .transactions + .into_iter() + .chunks(pb_channel_txn_chunk_size) + .into_iter() + .map(|chunk| chunk.collect()) + .collect(); + for txns in pb_txn_chunks { + let size_in_bytes = average_size_in_bytes * txns.len() as u64; + let txn_pb = TransactionsPBResponse { + transactions: txns, + chain_id, + start_version, + end_version, + // TODO: this is only for gap checker + filtered txns, but this is wrong + start_txn_timestamp: start_txn_timestamp.clone(), + end_txn_timestamp: end_txn_timestamp.clone(), + size_in_bytes, + }; + + match txn_sender.send(txn_pb).await { + Ok(()) => {}, + Err(e) => { + error!( + processor_name = processor_name, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + error = ?e, + "[Parser] Error sending GRPC response to channel." + ); + panic!("[Parser] Error sending GRPC response to channel.") + }, + } + } + } + + let duration_in_secs = txn_channel_send_latency.elapsed().as_secs_f64(); + send_ma.tick_now(num_txns as u64); + let tps = send_ma.avg().ceil() as u64; + let bytes_per_sec = size_in_bytes as f64 / duration_in_secs; + + let channel_size = txn_sender.len(); + debug!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version, + end_version, + channel_size, + size_in_bytes, + duration_in_secs, + bytes_per_sec, + tps, + num_filtered_txns, + "[Parser] Successfully sent transactions to channel." + ); + FETCHER_THREAD_CHANNEL_SIZE + .with_label_values(&[&processor_name]) + .set(channel_size as i64); + grpc_channel_recv_latency = std::time::Instant::now(); + + NUM_TRANSACTIONS_FILTERED_OUT_COUNT + .with_label_values(&[&processor_name]) + .inc_by(num_filtered_txns as u64); + true + }, + // Error receiving datastream response + Some(Err(rpc_error)) => { + tracing::warn!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version = starting_version, + end_version = request_ending_version, + error = ?rpc_error, + "[Parser] Error receiving datastream response." + ); + false + }, + // Stream is finished + None => { + tracing::warn!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version = starting_version, + end_version = request_ending_version, + "[Parser] Stream ended." + ); + false + }, + } + }, + // Timeout receiving datastream response + Err(e) => { + tracing::warn!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + start_version = starting_version, + end_version = request_ending_version, + error = ?e, + "[Parser] Timeout receiving datastream response." + ); + false + }, + }; + // Check if we're at the end of the stream + let is_end = if let Some(ending_version) = request_ending_version { + next_version_to_fetch > ending_version + } else { + false + }; + if is_end { + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + ending_version = request_ending_version, + next_version_to_fetch = next_version_to_fetch, + "[Parser] Reached ending version.", + ); + // Wait for the fetched transactions to finish processing before closing the channel + loop { + let channel_size = txn_sender.len(); + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + channel_size, + "[Parser] Waiting for channel to be empty" + ); + if channel_size.is_zero() { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + "[Parser] Transaction fetcher send channel is closed." + ); + break; + } else { + // The rest is to see if we need to reconnect + if is_success { + continue; + } + + // Sleep for 100ms between reconnect tries + // TODO: Turn this into exponential backoff + tokio::time::sleep(Duration::from_millis(100)).await; + + if reconnection_retries >= RECONNECTION_MAX_RETRIES { + error!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + "[Parser] Reconnected more than {RECONNECTION_MAX_RETRIES} times. Will not retry.", + ); + panic!("[Parser] Reconnected more than {RECONNECTION_MAX_RETRIES} times. Will not retry.") + } + reconnection_retries += 1; + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + starting_version = next_version_to_fetch, + ending_version = request_ending_version, + reconnection_retries = reconnection_retries, + "[Parser] Reconnecting to GRPC stream" + ); + response = get_stream( + indexer_grpc_data_service_address.clone(), + indexer_grpc_http2_ping_interval, + indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, + next_version_to_fetch, + request_ending_version, + auth_token.clone(), + processor_name.to_string(), + ) + .await; + connection_id = match response.metadata().get(GRPC_CONNECTION_ID) { + Some(connection_id) => connection_id.to_str().unwrap().to_string(), + None => "".to_string(), + }; + resp_stream = response.into_inner(); + info!( + processor_name = processor_name, + service_type = crate::worker::PROCESSOR_SERVICE_TYPE, + stream_address = indexer_grpc_data_service_address.to_string(), + connection_id, + starting_version = next_version_to_fetch, + ending_version = request_ending_version, + reconnection_retries = reconnection_retries, + "[Parser] Successfully reconnected to GRPC stream" + ); + } + } +} diff --git a/rust/processor/src/lib.rs b/rust/processor/src/lib.rs new file mode 100644 index 000000000..6c05d7596 --- /dev/null +++ b/rust/processor/src/lib.rs @@ -0,0 +1,32 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// Increase recursion limit for `serde_json::json!` macro parsing +#![recursion_limit = "256"] + +// #[macro_use] +// extern crate diesel_migrations; + +// Need to use this for because schema.rs uses the macros and is autogenerated +#[macro_use] +extern crate diesel; + +// for parquet_derive +extern crate canonical_json; +extern crate parquet; +extern crate parquet_derive; + +pub use config::IndexerGrpcProcessorConfig; + +mod config; +mod db; +pub mod gap_detectors; +pub mod grpc_stream; +pub mod parquet_handler; +pub mod parquet_processors; +pub mod processors; +#[path = "db/postgres/schema.rs"] +pub mod schema; +pub mod transaction_filter; +pub mod utils; +pub mod worker; diff --git a/rust/processor/src/main.rs b/rust/processor/src/main.rs new file mode 100644 index 000000000..f26edb921 --- /dev/null +++ b/rust/processor/src/main.rs @@ -0,0 +1,35 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use clap::Parser; +use processor::IndexerGrpcProcessorConfig; +use server_framework::ServerArgs; + +#[cfg(unix)] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +const RUNTIME_WORKER_MULTIPLIER: usize = 2; + +fn main() -> Result<()> { + let num_cpus = num_cpus::get(); + let worker_threads = (num_cpus * RUNTIME_WORKER_MULTIPLIER).max(16); + println!( + "[Processor] Starting processor tokio runtime: num_cpus={}, worker_threads={}", + num_cpus, worker_threads + ); + + let mut builder = tokio::runtime::Builder::new_multi_thread(); + builder + .disable_lifo_slot() + .enable_all() + .worker_threads(worker_threads) + .build() + .unwrap() + .block_on(async { + let args = ServerArgs::parse(); + args.run::(tokio::runtime::Handle::current()) + .await + }) +} diff --git a/rust/processor/src/parquet_handler.rs b/rust/processor/src/parquet_handler.rs new file mode 100644 index 000000000..cb4077306 --- /dev/null +++ b/rust/processor/src/parquet_handler.rs @@ -0,0 +1,78 @@ +use crate::{ + gap_detectors::ProcessingResult, + parquet_processors::generic_parquet_processor::{ + HasParquetSchema, HasVersion, NamedTable, ParquetDataGeneric, + ParquetHandler as GenericParquetHandler, + }, + worker::PROCESSOR_SERVICE_TYPE, +}; +use allocative::Allocative; +use google_cloud_storage::client::{Client as GCSClient, ClientConfig as GcsClientConfig}; +use kanal::AsyncSender; +use parquet::record::RecordWriter; +use std::sync::Arc; +use tracing::{debug, error, info}; + +pub fn create_parquet_handler_loop( + new_gap_detector_sender: AsyncSender, + processor_name: &str, + bucket_name: String, + parquet_handler_response_channel_size: usize, + max_buffer_size: usize, +) -> AsyncSender> +where + ParquetType: NamedTable + HasVersion + HasParquetSchema + Send + Sync + 'static + Allocative, + for<'a> &'a [ParquetType]: RecordWriter, +{ + let processor_name = processor_name.to_owned(); + + let (parquet_sender, parquet_receiver) = kanal::bounded_async::>( + parquet_handler_response_channel_size, + ); + + debug!( + processor_name = processor_name.clone(), + service_type = PROCESSOR_SERVICE_TYPE, + "[Parquet Handler] Starting parquet handler loop", + ); + + let mut parquet_manager = GenericParquetHandler::new( + bucket_name.clone(), + new_gap_detector_sender.clone(), + ParquetType::schema(), + ) + .expect("Failed to create parquet manager"); + + tokio::spawn(async move { + let gcs_config = GcsClientConfig::default() + .with_auth() + .await + .expect("Failed to create GCS client config"); + let gcs_client = Arc::new(GCSClient::new(gcs_config)); + + loop { + let txn_pb_res = parquet_receiver.recv().await.unwrap(); // handle error properly + + let result = parquet_manager.handle(&gcs_client, txn_pb_res, max_buffer_size).await; + match result { + Ok(_) => { + info!( + processor_name = processor_name.clone(), + service_type = PROCESSOR_SERVICE_TYPE, + "[Parquet Handler] Successfully processed parquet files", + ); + }, + Err(e) => { + error!( + processor_name = processor_name.clone(), + service_type = PROCESSOR_SERVICE_TYPE, + "[Parquet Handler] Error processing parquet files: {:?}", + e + ); + }, + } + } + }); + + parquet_sender +} diff --git a/rust/processor/src/parquet_processors/generic_parquet_processor.rs b/rust/processor/src/parquet_processors/generic_parquet_processor.rs new file mode 100644 index 000000000..cdaab7e55 --- /dev/null +++ b/rust/processor/src/parquet_processors/generic_parquet_processor.rs @@ -0,0 +1,235 @@ +use super::ParquetProcessingResult; +use crate::{ + gap_detectors::ProcessingResult, + parquet_processors::upload_parquet_to_gcs, + utils::counters::{PARQUET_HANDLER_BUFFER_SIZE, PARQUET_STRUCT_SIZE}, +}; +use ahash::AHashMap; +use allocative::Allocative; +use anyhow::{anyhow, Result}; +use google_cloud_storage::client::Client as GCSClient; +use parquet::{ + file::{properties::WriterProperties, writer::SerializedFileWriter}, + record::RecordWriter, + schema::types::Type, +}; +use std::{ + fs::{remove_file, rename, File}, + path::PathBuf, + sync::Arc, +}; +use tracing::{debug, error}; +use uuid::Uuid; + +#[derive(Debug, Default, Clone)] +pub struct ParquetDataGeneric { + pub data: Vec, + pub first_txn_version: u64, + pub last_txn_version: u64, + pub last_transaction_timestamp: Option, + pub transaction_version_to_struct_count: AHashMap, +} + +pub trait NamedTable { + const TABLE_NAME: &'static str; +} + +pub trait HasVersion { + fn version(&self) -> i64; +} + +pub trait HasParquetSchema { + fn schema() -> Arc; +} + +/// Auto-implement this for all types that implement `Default` and `RecordWriter` +impl HasParquetSchema for ParquetType +where + ParquetType: std::fmt::Debug + Default + Sync + Send, + for<'a> &'a [ParquetType]: RecordWriter, +{ + fn schema() -> Arc { + let example: Self = Default::default(); + [example].as_slice().schema().unwrap() + } +} + +pub struct ParquetHandler +where + ParquetType: NamedTable + HasVersion + HasParquetSchema + 'static + Allocative, + for<'a> &'a [ParquetType]: RecordWriter, +{ + pub schema: Arc, + pub writer: SerializedFileWriter, + pub buffer: Vec, + pub buffer_size_bytes: usize, + + pub transaction_version_to_struct_count: AHashMap, + pub bucket_name: String, + pub gap_detector_sender: kanal::AsyncSender, + pub file_path: String, +} + +fn create_new_writer( + file_path: &str, + schema: Arc, +) -> Result> { + let props = WriterProperties::builder() + .set_compression(parquet::basic::Compression::LZ4) + .build(); + let props_arc = Arc::new(props); + let file: File = File::options().create(true).write(true).open(file_path)?; + + Ok(SerializedFileWriter::new( + file.try_clone()?, + schema, + props_arc, + )?) +} + +impl ParquetHandler +where + ParquetType: NamedTable + HasVersion + HasParquetSchema + 'static + Allocative, + for<'a> &'a [ParquetType]: RecordWriter, +{ + fn create_new_writer(&self) -> Result> { + let file_path = &self.file_path; + create_new_writer(file_path, self.schema.clone()) + } + + fn close_writer(&mut self) -> Result<()> { + let mut writer = self.create_new_writer()?; + std::mem::swap(&mut self.writer, &mut writer); + writer.close()?; + Ok(()) + } + + pub fn new( + bucket_name: String, + gap_detector_sender: kanal::AsyncSender, + schema: Arc, + ) -> Result { + // had to append unique id to avoid concurrent write issues + let file_path = format!("{}_{}.parquet", ParquetType::TABLE_NAME, Uuid::new_v4()); + let writer = create_new_writer(&file_path, schema.clone())?; + + Ok(Self { + writer, + buffer: Vec::new(), + buffer_size_bytes: 0, + transaction_version_to_struct_count: AHashMap::new(), + bucket_name, + gap_detector_sender, + schema, + file_path, + }) + } + + pub async fn handle( + &mut self, + gcs_client: &GCSClient, + changes: ParquetDataGeneric, + max_buffer_size: usize, + ) -> Result<()> { + let last_transaction_timestamp = changes.last_transaction_timestamp; + let parquet_structs = changes.data; + self.transaction_version_to_struct_count + .extend(changes.transaction_version_to_struct_count); + + for parquet_struct in parquet_structs { + let size_of_struct = allocative::size_of_unique(&parquet_struct); + PARQUET_STRUCT_SIZE + .with_label_values(&[ParquetType::TABLE_NAME]) + .set(size_of_struct as i64); + self.buffer_size_bytes += size_of_struct; + self.buffer.push(parquet_struct); + + if self.buffer_size_bytes >= max_buffer_size { + let start_version = self.buffer.first().unwrap().version(); + let end_version = self.buffer.last().unwrap().version(); + + let txn_version_to_struct_count = process_struct_count_map( + &self.buffer, + &self.transaction_version_to_struct_count, + ); + + let new_file_path: PathBuf = PathBuf::from(format!( + "{}_{}.parquet", + ParquetType::TABLE_NAME, + Uuid::new_v4() + )); + rename(&self.file_path, &new_file_path)?; // this fixes an issue with concurrent file access issues + + let struct_buffer = std::mem::take(&mut self.buffer); + + let mut row_group_writer = self.writer.next_row_group()?; + struct_buffer + .as_slice() + .write_to_row_group(&mut row_group_writer) + .unwrap(); + row_group_writer.close()?; + self.close_writer()?; + + debug!( + table_name = ParquetType::TABLE_NAME, + start_version = start_version, + end_version = end_version, + "Max buffer size reached, uploading to GCS." + ); + let upload_result = upload_parquet_to_gcs( + gcs_client, + &new_file_path, + ParquetType::TABLE_NAME, + &self.bucket_name, + ) + .await; + self.buffer_size_bytes = 0; + remove_file(&new_file_path)?; + + return match upload_result { + Ok(_) => { + let parquet_processing_result = ParquetProcessingResult { + start_version, + end_version, + last_transaction_timestamp: last_transaction_timestamp.clone(), + txn_version_to_struct_count, + }; + + self.gap_detector_sender + .send(ProcessingResult::ParquetProcessingResult( + parquet_processing_result, + )) + .await + .expect("[Parser] Failed to send versions to gap detector"); + Ok(()) + }, + Err(e) => { + error!("Failed to upload file to GCS: {}", e); + Err(anyhow!("Failed to upload file to GCS: {}", e)) + }, + }; + } + } + + PARQUET_HANDLER_BUFFER_SIZE + .with_label_values(&[ParquetType::TABLE_NAME]) + .set(self.buffer.len() as i64); + Ok(()) + } +} + +fn process_struct_count_map( + buffer: &[ParquetType], + txn_version_to_struct_count: &AHashMap, +) -> AHashMap { + let mut txn_version_to_struct_count_for_gap_detector = AHashMap::new(); + + for item in buffer.iter() { + let version = item.version(); + + if let Some(count) = txn_version_to_struct_count.get(&(version)) { + txn_version_to_struct_count_for_gap_detector.insert(version, *count); + } + } + txn_version_to_struct_count_for_gap_detector +} diff --git a/rust/processor/src/parquet_processors/mod.rs b/rust/processor/src/parquet_processors/mod.rs new file mode 100644 index 000000000..6dc332e6b --- /dev/null +++ b/rust/processor/src/parquet_processors/mod.rs @@ -0,0 +1,191 @@ +pub mod generic_parquet_processor; + +use ahash::AHashMap; +use anyhow::{anyhow, Result}; +use chrono::{Datelike, Timelike}; +use google_cloud_storage::{ + client::Client as GCSClient, + http::{ + objects::upload::{Media, UploadObjectRequest, UploadType}, + Error as StorageError, + }, +}; +use hyper::Body; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Debug, Display, Formatter, Result as FormatResult}, + path::PathBuf, +}; +use tokio::io::AsyncReadExt; // for read_to_end() +use tokio::{ + fs::File as TokioFile, + io, + time::{sleep, timeout, Duration}, +}; +use tracing::{debug, error, info}; + +// TODO: make it configurable, write now there is no difference between running parquet for backfill and regular traffic +const BUCKET_REGULAR_TRAFFIC: &str = "devnet-airflow-continue"; + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct ParquetProcessingResult { + pub start_version: i64, + pub end_version: i64, + pub last_transaction_timestamp: Option, + pub txn_version_to_struct_count: AHashMap, +} + +#[derive(Debug)] +pub enum ParquetProcessorError { + ParquetError(parquet::errors::ParquetError), + StorageError(StorageError), + TimeoutError(tokio::time::error::Elapsed), + IoError(io::Error), + Other(String), +} + +impl std::error::Error for ParquetProcessorError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match *self { + ParquetProcessorError::ParquetError(ref err) => Some(err), + ParquetProcessorError::StorageError(ref err) => Some(err), + ParquetProcessorError::TimeoutError(ref err) => Some(err), + ParquetProcessorError::IoError(ref err) => Some(err), + ParquetProcessorError::Other(_) => None, + } + } +} + +impl Display for ParquetProcessorError { + fn fmt(&self, f: &mut Formatter<'_>) -> FormatResult { + match *self { + ParquetProcessorError::ParquetError(ref err) => write!(f, "Parquet error: {}", err), + ParquetProcessorError::StorageError(ref err) => write!(f, "Storage error: {}", err), + ParquetProcessorError::TimeoutError(ref err) => write!(f, "Timeout error: {}", err), + ParquetProcessorError::IoError(ref err) => write!(f, "IO error: {}", err), + ParquetProcessorError::Other(ref desc) => write!(f, "Error: {}", desc), + } + } +} + +impl From for ParquetProcessorError { + fn from(err: std::io::Error) -> Self { + ParquetProcessorError::IoError(err) + } +} + +impl From for ParquetProcessorError { + fn from(err: anyhow::Error) -> Self { + ParquetProcessorError::Other(err.to_string()) + } +} + +impl From for ParquetProcessorError { + fn from(err: parquet::errors::ParquetError) -> Self { + ParquetProcessorError::ParquetError(err) + } +} + +pub async fn upload_parquet_to_gcs( + client: &GCSClient, + file_path: &PathBuf, + table_name: &str, + bucket_name: &str, +) -> Result<(), ParquetProcessorError> { + let mut file = TokioFile::open(&file_path) + .await + .map_err(|e| anyhow!("Failed to open file for reading: {}", e))?; + + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer) + .await + .map_err(|e| anyhow!("Failed to read file: {}", e))?; + + if buffer.is_empty() { + error!("The file is empty and has no data to upload.",); + return Err(ParquetProcessorError::Other( + "The file is empty and has no data to upload.".to_string(), + )); + } + + let now = chrono::Utc::now(); + let start_of_month = now + .with_day(1) + .unwrap() + .with_hour(0) + .unwrap() + .with_minute(0) + .unwrap() + .with_second(0) + .unwrap() + .with_nanosecond(0) + .unwrap(); + let highwater_s = start_of_month.timestamp_millis(); + let highwater_ms = now.timestamp_millis(); + let counter = 0; // THIS NEED TO BE REPLACED OR REIMPLEMENTED WITH AN ACTUAL LOGIC TO ENSURE FILE UNIQUENESS. + let object_name: PathBuf = generate_parquet_file_path( + BUCKET_REGULAR_TRAFFIC, + table_name, + highwater_s, + highwater_ms, + counter, + ); + + let file_name = object_name.to_str().unwrap().to_owned(); + let upload_type: UploadType = UploadType::Simple(Media::new(file_name.clone())); + + let upload_request = UploadObjectRequest { + bucket: bucket_name.to_string(), + ..Default::default() + }; + + let max_retries = 3; + let mut retry_count = 0; + let mut delay = 500; + + loop { + let data = Body::from(buffer.clone()); + let upload_result = timeout( + Duration::from_secs(300), + client.upload_object(&upload_request, data, &upload_type), + ) + .await; + + match upload_result { + Ok(Ok(result)) => { + info!("File uploaded successfully to GCS: {}", result.name); + return Ok(()); + }, + Ok(Err(e)) => { + error!("Failed to upload file to GCS: {}", e); + if retry_count >= max_retries { + return Err(ParquetProcessorError::StorageError(e)); + } + }, + Err(e) => { + error!("Upload timed out: {}", e); + if retry_count >= max_retries { + return Err(ParquetProcessorError::TimeoutError(e)); + } + }, + } + + retry_count += 1; + sleep(Duration::from_millis(delay)).await; + delay *= 2; + debug!("Retrying upload operation. Retry count: {}", retry_count); + } +} + +fn generate_parquet_file_path( + gcs_bucket_root: &str, + table: &str, + highwater_s: i64, + highwater_ms: i64, + counter: u32, +) -> PathBuf { + PathBuf::from(format!( + "{}/{}/{}/{}_{}.parquet", + gcs_bucket_root, table, highwater_s, highwater_ms, counter + )) +} diff --git a/rust/processor/src/processors/account_transactions_processor.rs b/rust/processor/src/processors/account_transactions_processor.rs new file mode 100644 index 000000000..f7ef88344 --- /dev/null +++ b/rust/processor/src/processors/account_transactions_processor.rs @@ -0,0 +1,158 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::account_transaction_models::account_transactions::AccountTransaction, + gap_detectors::ProcessingResult, + schema, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use diesel::{pg::Pg, query_builder::QueryFragment}; +use std::fmt::Debug; +use tracing::error; + +pub struct AccountTransactionsProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, +} + +impl AccountTransactionsProcessor { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + } + } +} + +impl Debug for AccountTransactionsProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "AccountTransactionsProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + account_transactions: &[AccountTransaction], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + execute_in_chunks( + conn.clone(), + insert_account_transactions_query, + account_transactions, + get_config_table_chunk_size::( + "account_transactions", + per_table_chunk_sizes, + ), + ) + .await?; + Ok(()) +} + +fn insert_account_transactions_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::account_transactions::dsl::*; + + ( + diesel::insert_into(schema::account_transactions::table) + .values(item_to_insert) + .on_conflict((transaction_version, account_address)) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for AccountTransactionsProcessor { + fn name(&self) -> &'static str { + ProcessorName::AccountTransactionsProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _db_chain_id: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut account_transactions = AHashMap::new(); + + for txn in &transactions { + account_transactions.extend(AccountTransaction::from_transaction(txn)); + } + let mut account_transactions = account_transactions + .into_values() + .collect::>(); + + // Sort by PK + account_transactions.sort_by(|a, b| { + (&a.transaction_version, &a.account_address) + .cmp(&(&b.transaction_version, &b.account_address)) + }); + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &account_transactions, + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(err) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + "[Parser] Error inserting transactions to db: {:?}", + err + ); + bail!(format!("Error inserting transactions to db. Processor {}. Start {}. End {}. Error {:?}", self.name(), start_version, end_version, err)) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/ans_processor.rs b/rust/processor/src/processors/ans_processor.rs new file mode 100644 index 000000000..a984c8de7 --- /dev/null +++ b/rust/processor/src/processors/ans_processor.rs @@ -0,0 +1,709 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::ans_models::{ + ans_lookup::{AnsLookup, AnsPrimaryName, CurrentAnsLookup, CurrentAnsPrimaryName}, + ans_lookup_v2::{ + AnsLookupV2, AnsPrimaryNameV2, CurrentAnsLookupV2, CurrentAnsPrimaryNameV2, + }, + ans_utils::{RenewNameEvent, SubdomainExtV2}, + }, + gap_detectors::ProcessingResult, + schema, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + util::standardize_address, + }, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::{ + transaction::TxnData, write_set_change::Change as WriteSetChange, Transaction, +}; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use tracing::error; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct AnsProcessorConfig { + pub ans_v1_primary_names_table_handle: String, + pub ans_v1_name_records_table_handle: String, + pub ans_v2_contract_address: String, +} + +pub struct AnsProcessor { + connection_pool: ArcDbPool, + config: AnsProcessorConfig, + per_table_chunk_sizes: AHashMap, +} + +impl AnsProcessor { + pub fn new( + connection_pool: ArcDbPool, + config: AnsProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { + tracing::info!( + ans_v1_primary_names_table_handle = config.ans_v1_primary_names_table_handle, + ans_v1_name_records_table_handle = config.ans_v1_name_records_table_handle, + ans_v2_contract_address = config.ans_v2_contract_address, + "init AnsProcessor" + ); + Self { + connection_pool, + config, + per_table_chunk_sizes, + } + } +} + +impl Debug for AnsProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "AnsProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + current_ans_lookups: &[CurrentAnsLookup], + ans_lookups: &[AnsLookup], + current_ans_primary_names: &[CurrentAnsPrimaryName], + ans_primary_names: &[AnsPrimaryName], + current_ans_lookups_v2: &[CurrentAnsLookupV2], + ans_lookups_v2: &[AnsLookupV2], + current_ans_primary_names_v2: &[CurrentAnsPrimaryNameV2], + ans_primary_names_v2: &[AnsPrimaryNameV2], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + let cal = execute_in_chunks( + conn.clone(), + insert_current_ans_lookups_query, + current_ans_lookups, + get_config_table_chunk_size::( + "current_ans_lookup", + per_table_chunk_sizes, + ), + ); + let al = execute_in_chunks( + conn.clone(), + insert_ans_lookups_query, + ans_lookups, + get_config_table_chunk_size::("ans_lookup", per_table_chunk_sizes), + ); + let capn = execute_in_chunks( + conn.clone(), + insert_current_ans_primary_names_query, + current_ans_primary_names, + get_config_table_chunk_size::( + "current_ans_primary_name", + per_table_chunk_sizes, + ), + ); + let apn = execute_in_chunks( + conn.clone(), + insert_ans_primary_names_query, + ans_primary_names, + get_config_table_chunk_size::("ans_primary_name", per_table_chunk_sizes), + ); + let cal_v2 = execute_in_chunks( + conn.clone(), + insert_current_ans_lookups_v2_query, + current_ans_lookups_v2, + get_config_table_chunk_size::( + "current_ans_lookup_v2", + per_table_chunk_sizes, + ), + ); + let al_v2 = execute_in_chunks( + conn.clone(), + insert_ans_lookups_v2_query, + ans_lookups_v2, + get_config_table_chunk_size::("ans_lookup_v2", per_table_chunk_sizes), + ); + let capn_v2 = execute_in_chunks( + conn.clone(), + insert_current_ans_primary_names_v2_query, + current_ans_primary_names_v2, + get_config_table_chunk_size::( + "current_ans_primary_name_v2", + per_table_chunk_sizes, + ), + ); + let apn_v2 = execute_in_chunks( + conn, + insert_ans_primary_names_v2_query, + ans_primary_names_v2, + get_config_table_chunk_size::( + "ans_primary_name_v2", + per_table_chunk_sizes, + ), + ); + + let (cal_res, al_res, capn_res, apn_res, cal_v2_res, al_v2_res, capn_v2_res, apn_v2_res) = + tokio::join!(cal, al, capn, apn, cal_v2, al_v2, capn_v2, apn_v2); + + for res in vec![ + cal_res, + al_res, + capn_res, + apn_res, + cal_v2_res, + al_v2_res, + capn_v2_res, + apn_v2_res, + ] { + res?; + } + + Ok(()) +} + +fn insert_current_ans_lookups_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_ans_lookup::dsl::*; + + ( + diesel::insert_into(schema::current_ans_lookup::table) + .values(item_to_insert) + .on_conflict((domain, subdomain)) + .do_update() + .set(( + registered_address.eq(excluded(registered_address)), + expiration_timestamp.eq(excluded(expiration_timestamp)), + last_transaction_version.eq(excluded(last_transaction_version)), + token_name.eq(excluded(token_name)), + is_deleted.eq(excluded(is_deleted)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_ans_lookup.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_ans_lookups_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::ans_lookup::dsl::*; + + ( + diesel::insert_into(schema::ans_lookup::table) + .values(item_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_current_ans_primary_names_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_ans_primary_name::dsl::*; + + ( + diesel::insert_into(schema::current_ans_primary_name::table) + .values(item_to_insert) + .on_conflict(registered_address) + .do_update() + .set(( + domain.eq(excluded(domain)), + subdomain.eq(excluded(subdomain)), + token_name.eq(excluded(token_name)), + is_deleted.eq(excluded(is_deleted)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_ans_primary_name.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_ans_primary_names_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::ans_primary_name::dsl::*; + + ( + diesel::insert_into(schema::ans_primary_name::table) + .values(item_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_current_ans_lookups_v2_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_ans_lookup_v2::dsl::*; + + ( + diesel::insert_into(schema::current_ans_lookup_v2::table) + .values(item_to_insert) + .on_conflict((domain, subdomain, token_standard)) + .do_update() + .set(( + registered_address.eq(excluded(registered_address)), + expiration_timestamp.eq(excluded(expiration_timestamp)), + last_transaction_version.eq(excluded(last_transaction_version)), + token_name.eq(excluded(token_name)), + is_deleted.eq(excluded(is_deleted)), + inserted_at.eq(excluded(inserted_at)), + subdomain_expiration_policy.eq(excluded(subdomain_expiration_policy)), + )), + Some(" WHERE current_ans_lookup_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_ans_lookups_v2_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::ans_lookup_v2::dsl::*; + + ( + diesel::insert_into(schema::ans_lookup_v2::table) + .values(item_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_update() + .set(( + inserted_at.eq(excluded(inserted_at)), + subdomain_expiration_policy.eq(excluded(subdomain_expiration_policy)), + )), + None, + ) +} + +fn insert_current_ans_primary_names_v2_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_ans_primary_name_v2::dsl::*; + + ( + diesel::insert_into(schema::current_ans_primary_name_v2::table) + .values(item_to_insert) + .on_conflict((registered_address, token_standard)) + .do_update() + .set(( + domain.eq(excluded(domain)), + subdomain.eq(excluded(subdomain)), + token_name.eq(excluded(token_name)), + is_deleted.eq(excluded(is_deleted)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_ans_primary_name_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_ans_primary_names_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::ans_primary_name_v2::dsl::*; + + ( + diesel::insert_into(schema::ans_primary_name_v2::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for AnsProcessor { + fn name(&self) -> &'static str { + ProcessorName::AnsProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _db_chain_id: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let ( + all_current_ans_lookups, + all_ans_lookups, + all_current_ans_primary_names, + all_ans_primary_names, + all_current_ans_lookups_v2, + all_ans_lookups_v2, + all_current_ans_primary_names_v2, + all_ans_primary_names_v2, + ) = parse_ans( + &transactions, + self.config.ans_v1_primary_names_table_handle.clone(), + self.config.ans_v1_name_records_table_handle.clone(), + self.config.ans_v2_contract_address.clone(), + ); + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + // Insert values to db + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &all_current_ans_lookups, + &all_ans_lookups, + &all_current_ans_primary_names, + &all_ans_primary_names, + &all_current_ans_lookups_v2, + &all_ans_lookups_v2, + &all_current_ans_primary_names_v2, + &all_ans_primary_names_v2, + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +fn parse_ans( + transactions: &[Transaction], + ans_v1_primary_names_table_handle: String, + ans_v1_name_records_table_handle: String, + ans_v2_contract_address: String, +) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, +) { + let mut all_current_ans_lookups = AHashMap::new(); + let mut all_ans_lookups = vec![]; + let mut all_current_ans_primary_names = AHashMap::new(); + let mut all_ans_primary_names = vec![]; + let mut all_current_ans_lookups_v2 = AHashMap::new(); + let mut all_ans_lookups_v2 = vec![]; + let mut all_current_ans_primary_names_v2 = AHashMap::new(); + let mut all_ans_primary_names_v2 = vec![]; + + for transaction in transactions { + let txn_version = transaction.version as i64; + let txn_data = match transaction.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["AnsProcessor"]) + .inc(); + tracing::warn!( + transaction_version = txn_version, + "Transaction data doesn't exist", + ); + continue; + }, + }; + let transaction_info = transaction + .info + .as_ref() + .expect("Transaction info doesn't exist!"); + + // Extracts from user transactions. Other transactions won't have any ANS changes + + if let TxnData::User(user_txn) = txn_data { + // TODO: Use the v2_renew_name_events to preserve metadata once we switch to a single ANS table to store everything + let mut v2_renew_name_events = vec![]; + let mut v2_address_to_subdomain_ext = AHashMap::new(); + + // Parse V2 ANS Events. We only care about the following events: + // 1. RenewNameEvents: helps to fill in metadata for name records with updated expiration time + // 2. SetReverseLookupEvents: parse to get current_ans_primary_names + for (event_index, event) in user_txn.events.iter().enumerate() { + if let Some(renew_name_event) = + RenewNameEvent::from_event(event, &ans_v2_contract_address, txn_version) + .unwrap() + { + v2_renew_name_events.push(renew_name_event); + } + if let Some((current_ans_lookup_v2, ans_lookup_v2)) = + CurrentAnsPrimaryNameV2::parse_v2_primary_name_record_from_event( + event, + txn_version, + event_index as i64, + &ans_v2_contract_address, + ) + .unwrap() + { + all_current_ans_primary_names_v2 + .insert(current_ans_lookup_v2.pk(), current_ans_lookup_v2); + all_ans_primary_names_v2.push(ans_lookup_v2); + } + } + + // Parse V2 ANS subdomain exts + for wsc in transaction_info.changes.iter() { + match wsc.change.as_ref().unwrap() { + WriteSetChange::WriteResource(write_resource) => { + if let Some(subdomain_ext) = SubdomainExtV2::from_write_resource( + write_resource, + &ans_v2_contract_address, + txn_version, + ) + .unwrap() + { + // Track resource account -> SubdomainExt to create the full subdomain ANS later + v2_address_to_subdomain_ext.insert( + standardize_address(write_resource.address.as_str()), + subdomain_ext, + ); + } + }, + _ => continue, + } + } + + // Parse V1 ANS write set changes + for (wsc_index, wsc) in transaction_info.changes.iter().enumerate() { + match wsc.change.as_ref().unwrap() { + WriteSetChange::WriteTableItem(table_item) => { + if let Some((current_ans_lookup, ans_lookup)) = + CurrentAnsLookup::parse_name_record_from_write_table_item_v1( + table_item, + &ans_v1_name_records_table_handle, + txn_version, + wsc_index as i64, + ) + .unwrap_or_else(|e| { + error!( + error = ?e, + "Error parsing ANS v1 name record from write table item" + ); + panic!(); + }) + { + all_current_ans_lookups + .insert(current_ans_lookup.pk(), current_ans_lookup.clone()); + all_ans_lookups.push(ans_lookup.clone()); + + // Include all v1 lookups in v2 data + let (current_ans_lookup_v2, ans_lookup_v2) = + CurrentAnsLookupV2::get_v2_from_v1(current_ans_lookup, ans_lookup); + all_current_ans_lookups_v2 + .insert(current_ans_lookup_v2.pk(), current_ans_lookup_v2); + all_ans_lookups_v2.push(ans_lookup_v2); + } + if let Some((current_primary_name, primary_name)) = + CurrentAnsPrimaryName::parse_primary_name_record_from_write_table_item_v1( + table_item, + &ans_v1_primary_names_table_handle, + txn_version, + wsc_index as i64, + ) + .unwrap_or_else(|e| { + error!( + error = ?e, + "Error parsing ANS v1 primary name from write table item" + ); + panic!(); + }) + { + all_current_ans_primary_names + .insert(current_primary_name.pk(), current_primary_name.clone()); + all_ans_primary_names.push(primary_name.clone()); + + // Include all v1 primary names in v2 data + let (current_primary_name_v2, primary_name_v2) = + CurrentAnsPrimaryNameV2::get_v2_from_v1(current_primary_name.clone(), primary_name.clone()); + all_current_ans_primary_names_v2 + .insert(current_primary_name_v2.pk(), current_primary_name_v2); + all_ans_primary_names_v2.push(primary_name_v2); + } + }, + WriteSetChange::DeleteTableItem(table_item) => { + if let Some((current_ans_lookup, ans_lookup)) = + CurrentAnsLookup::parse_name_record_from_delete_table_item_v1( + table_item, + &ans_v1_name_records_table_handle, + txn_version, + wsc_index as i64, + ) + .unwrap_or_else(|e| { + error!( + error = ?e, + "Error parsing ANS v1 name record from delete table item" + ); + panic!(); + }) + { + all_current_ans_lookups + .insert(current_ans_lookup.pk(), current_ans_lookup.clone()); + all_ans_lookups.push(ans_lookup.clone()); + + // Include all v1 lookups in v2 data + let (current_ans_lookup_v2, ans_lookup_v2) = + CurrentAnsLookupV2::get_v2_from_v1(current_ans_lookup, ans_lookup); + all_current_ans_lookups_v2 + .insert(current_ans_lookup_v2.pk(), current_ans_lookup_v2); + all_ans_lookups_v2.push(ans_lookup_v2); + } + if let Some((current_primary_name, primary_name)) = + CurrentAnsPrimaryName::parse_primary_name_record_from_delete_table_item_v1( + table_item, + &ans_v1_primary_names_table_handle, + txn_version, + wsc_index as i64, + ) + .unwrap_or_else(|e| { + error!( + error = ?e, + "Error parsing ANS v1 primary name from delete table item" + ); + panic!(); + }) + { + all_current_ans_primary_names + .insert(current_primary_name.pk(), current_primary_name.clone()); + all_ans_primary_names.push(primary_name.clone()); + + // Include all v1 primary names in v2 data + let (current_primary_name_v2, primary_name_v2) = + CurrentAnsPrimaryNameV2::get_v2_from_v1(current_primary_name, primary_name); + all_current_ans_primary_names_v2 + .insert(current_primary_name_v2.pk(), current_primary_name_v2); + all_ans_primary_names_v2.push(primary_name_v2); + } + }, + WriteSetChange::WriteResource(write_resource) => { + if let Some((current_ans_lookup_v2, ans_lookup_v2)) = + CurrentAnsLookupV2::parse_name_record_from_write_resource_v2( + write_resource, + &ans_v2_contract_address, + txn_version, + wsc_index as i64, + &v2_address_to_subdomain_ext, + ) + .unwrap_or_else(|e| { + error!( + error = ?e, + "Error parsing ANS v2 name record from write resource" + ); + panic!(); + }) + { + all_current_ans_lookups_v2 + .insert(current_ans_lookup_v2.pk(), current_ans_lookup_v2); + all_ans_lookups_v2.push(ans_lookup_v2); + } + }, + // For ANS V2, there are no delete resource changes + // 1. Unsetting a primary name will show up as a ReverseRecord write resource with empty fields + // 2. Name record v2 tokens are never deleted + _ => continue, + } + } + } + } + // Boilerplate after this for diesel + // Sort ans lookup values for postgres insert + let mut all_current_ans_lookups = all_current_ans_lookups + .into_values() + .collect::>(); + let mut all_current_ans_primary_names = all_current_ans_primary_names + .into_values() + .collect::>(); + let mut all_current_ans_lookups_v2 = all_current_ans_lookups_v2 + .into_values() + .collect::>(); + let mut all_current_ans_primary_names_v2 = all_current_ans_primary_names_v2 + .into_values() + .collect::>(); + + all_current_ans_lookups.sort(); + all_current_ans_primary_names.sort(); + all_current_ans_lookups_v2.sort(); + all_current_ans_primary_names_v2.sort(); + ( + all_current_ans_lookups, + all_ans_lookups, + all_current_ans_primary_names, + all_ans_primary_names, + all_current_ans_lookups_v2, + all_ans_lookups_v2, + all_current_ans_primary_names_v2, + all_ans_primary_names_v2, + ) +} diff --git a/rust/processor/src/processors/coin_processor.rs b/rust/processor/src/processors/coin_processor.rs new file mode 100644 index 000000000..f351ce40d --- /dev/null +++ b/rust/processor/src/processors/coin_processor.rs @@ -0,0 +1,306 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::{ + coin_models::{ + coin_activities::CoinActivity, + coin_balances::{CoinBalance, CurrentCoinBalance}, + coin_infos::CoinInfo, + }, + fungible_asset_models::v2_fungible_asset_activities::CurrentCoinBalancePK, + }, + gap_detectors::ProcessingResult, + schema, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, +}; +use ahash::AHashMap; +use anyhow::{bail, Context}; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use std::fmt::Debug; +use tracing::error; + +pub struct CoinProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, +} + +impl CoinProcessor { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + } + } +} + +impl Debug for CoinProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "CoinTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + coin_activities: &[CoinActivity], + coin_infos: &[CoinInfo], + coin_balances: &[CoinBalance], + current_coin_balances: &[CurrentCoinBalance], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let ca = execute_in_chunks( + conn.clone(), + insert_coin_activities_query, + coin_activities, + get_config_table_chunk_size::("coin_activities", per_table_chunk_sizes), + ); + let ci = execute_in_chunks( + conn.clone(), + insert_coin_infos_query, + coin_infos, + get_config_table_chunk_size::("coin_infos", per_table_chunk_sizes), + ); + let cb = execute_in_chunks( + conn.clone(), + insert_coin_balances_query, + coin_balances, + get_config_table_chunk_size::("coin_balances", per_table_chunk_sizes), + ); + let ccb = execute_in_chunks( + conn.clone(), + insert_current_coin_balances_query, + current_coin_balances, + get_config_table_chunk_size::( + "current_coin_balances", + per_table_chunk_sizes, + ), + ); + + let (ca_res, ci_res, cb_res, ccb_res) = tokio::join!(ca, ci, cb, ccb); + for res in [ca_res, ci_res, cb_res, ccb_res] { + res?; + } + Ok(()) +} + +fn insert_coin_activities_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::coin_activities::dsl::*; + + ( + diesel::insert_into(schema::coin_activities::table) + .values(items_to_insert) + .on_conflict(( + transaction_version, + event_account_address, + event_creation_number, + event_sequence_number, + )) + .do_update() + .set(( + entry_function_id_str.eq(excluded(entry_function_id_str)), + inserted_at.eq(excluded(inserted_at)), + )), + None, + ) +} + +fn insert_coin_infos_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::coin_infos::dsl::*; + + ( + diesel::insert_into(schema::coin_infos::table) + .values(items_to_insert) + .on_conflict(coin_type_hash) + .do_update() + .set(( + transaction_version_created.eq(excluded(transaction_version_created)), + creator_address.eq(excluded(creator_address)), + name.eq(excluded(name)), + symbol.eq(excluded(symbol)), + decimals.eq(excluded(decimals)), + transaction_created_timestamp.eq(excluded(transaction_created_timestamp)), + supply_aggregator_table_handle.eq(excluded(supply_aggregator_table_handle)), + supply_aggregator_table_key.eq(excluded(supply_aggregator_table_key)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE coin_infos.transaction_version_created >= EXCLUDED.transaction_version_created "), + ) +} + +fn insert_coin_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::coin_balances::dsl::*; + + ( + diesel::insert_into(schema::coin_balances::table) + .values(items_to_insert) + .on_conflict((transaction_version, owner_address, coin_type_hash)) + .do_nothing(), + None, + ) +} + +fn insert_current_coin_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_coin_balances::dsl::*; + + ( + diesel::insert_into(schema::current_coin_balances::table) + .values(items_to_insert) + .on_conflict((owner_address, coin_type_hash)) + .do_update() + .set(( + amount.eq(excluded(amount)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_coin_balances.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +#[async_trait] +impl ProcessorTrait for CoinProcessor { + fn name(&self) -> &'static str { + ProcessorName::CoinProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let ( + all_coin_activities, + all_coin_infos, + all_coin_balances, + all_current_coin_balances, + ) = tokio::task::spawn_blocking(move || { + let mut all_coin_activities = vec![]; + let mut all_coin_balances = vec![]; + let mut all_coin_infos: AHashMap = AHashMap::new(); + let mut all_current_coin_balances: AHashMap = + AHashMap::new(); + + for txn in &transactions { + let (mut coin_activities, mut coin_balances, coin_infos, current_coin_balances) = + CoinActivity::from_transaction(txn); + all_coin_activities.append(&mut coin_activities); + all_coin_balances.append(&mut coin_balances); + // For coin infos, we only want to keep the first version, so insert only if key is not present already + for (key, value) in coin_infos { + all_coin_infos.entry(key).or_insert(value); + } + all_current_coin_balances.extend(current_coin_balances); + } + let mut all_coin_infos = all_coin_infos.into_values().collect::>(); + let mut all_current_coin_balances = all_current_coin_balances + .into_values() + .collect::>(); + + // Sort by PK + all_coin_infos.sort_by(|a, b| a.coin_type.cmp(&b.coin_type)); + all_current_coin_balances.sort_by(|a, b| { + (&a.owner_address, &a.coin_type).cmp(&(&b.owner_address, &b.coin_type)) + }); + + ( + all_coin_activities, + all_coin_infos, + all_coin_balances, + all_current_coin_balances, + ) + }) + .await + .context("spawn_blocking for CoinProcessor thread failed")?; + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &all_coin_activities, + &all_coin_infos, + &all_coin_balances, + &all_current_coin_balances, + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(err) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + "[Parser] Error inserting transactions to db: {:?}", + err + ); + bail!(format!("Error inserting transactions to db. Processor {}. Start {}. End {}. Error {:?}", self.name(), start_version, end_version, err)) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/default_processor.rs b/rust/processor/src/processors/default_processor.rs new file mode 100644 index 000000000..cc2c76664 --- /dev/null +++ b/rust/processor/src/processors/default_processor.rs @@ -0,0 +1,465 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::default_models::{ + block_metadata_transactions::{BlockMetadataTransaction, BlockMetadataTransactionModel}, + move_modules::MoveModule, + move_resources::MoveResource, + move_tables::{CurrentTableItem, TableItem, TableMetadata}, + transactions::TransactionModel, + write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, + }, + gap_detectors::ProcessingResult, + schema, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + worker::TableFlags, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use std::fmt::Debug; +use tokio::join; +use tracing::error; + +pub struct DefaultProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, + deprecated_tables: TableFlags, +} + +impl DefaultProcessor { + pub fn new( + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, + deprecated_tables: TableFlags, + ) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + deprecated_tables, + } + } +} + +impl Debug for DefaultProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "DefaultTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + txns: &[TransactionModel], + block_metadata_transactions: &[BlockMetadataTransactionModel], + wscs: &[WriteSetChangeModel], + (move_modules, move_resources, table_items, current_table_items, table_metadata): ( + &[MoveModule], + &[MoveResource], + &[TableItem], + &[CurrentTableItem], + &[TableMetadata], + ), + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let txns_res = execute_in_chunks( + conn.clone(), + insert_transactions_query, + txns, + get_config_table_chunk_size::("transactions", per_table_chunk_sizes), + ); + + let bmt_res = execute_in_chunks( + conn.clone(), + insert_block_metadata_transactions_query, + block_metadata_transactions, + get_config_table_chunk_size::( + "block_metadata_transactions", + per_table_chunk_sizes, + ), + ); + + let wst_res = execute_in_chunks( + conn.clone(), + insert_write_set_changes_query, + wscs, + get_config_table_chunk_size::( + "write_set_changes", + per_table_chunk_sizes, + ), + ); + + let mm_res = execute_in_chunks( + conn.clone(), + insert_move_modules_query, + move_modules, + get_config_table_chunk_size::("move_modules", per_table_chunk_sizes), + ); + + let mr_res = execute_in_chunks( + conn.clone(), + insert_move_resources_query, + move_resources, + get_config_table_chunk_size::("move_resources", per_table_chunk_sizes), + ); + + let ti_res = execute_in_chunks( + conn.clone(), + insert_table_items_query, + table_items, + get_config_table_chunk_size::("table_items", per_table_chunk_sizes), + ); + + let cti_res = execute_in_chunks( + conn.clone(), + insert_current_table_items_query, + current_table_items, + get_config_table_chunk_size::( + "current_table_items", + per_table_chunk_sizes, + ), + ); + + let tm_res = execute_in_chunks( + conn.clone(), + insert_table_metadata_query, + table_metadata, + get_config_table_chunk_size::("table_metadatas", per_table_chunk_sizes), + ); + + let (txns_res, wst_res, bmt_res, mm_res, mr_res, ti_res, cti_res, tm_res) = + join!(txns_res, wst_res, bmt_res, mm_res, mr_res, ti_res, cti_res, tm_res); + + for res in [ + txns_res, wst_res, bmt_res, mm_res, mr_res, ti_res, cti_res, tm_res, + ] { + res?; + } + + Ok(()) +} + +fn insert_transactions_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::transactions::dsl::*; + + ( + diesel::insert_into(schema::transactions::table) + .values(items_to_insert) + .on_conflict(version) + .do_update() + .set(( + inserted_at.eq(excluded(inserted_at)), + payload_type.eq(excluded(payload_type)), + )), + None, + ) +} + +fn insert_block_metadata_transactions_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::block_metadata_transactions::dsl::*; + + ( + diesel::insert_into(schema::block_metadata_transactions::table) + .values(items_to_insert) + .on_conflict(version) + .do_nothing(), + None, + ) +} + +fn insert_write_set_changes_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::write_set_changes::dsl::*; + + ( + diesel::insert_into(schema::write_set_changes::table) + .values(items_to_insert) + .on_conflict((transaction_version, index)) + .do_nothing(), + None, + ) +} + +fn insert_move_modules_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::move_modules::dsl::*; + + ( + diesel::insert_into(schema::move_modules::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_move_resources_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::move_resources::dsl::*; + + ( + diesel::insert_into(schema::move_resources::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_table_items_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::table_items::dsl::*; + + ( + diesel::insert_into(schema::table_items::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_current_table_items_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_table_items::dsl::*; + + ( + diesel::insert_into(schema::current_table_items::table) + .values(items_to_insert) + .on_conflict((table_handle, key_hash)) + .do_update() + .set(( + key.eq(excluded(key)), + decoded_key.eq(excluded(decoded_key)), + decoded_value.eq(excluded(decoded_value)), + is_deleted.eq(excluded(is_deleted)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_table_items.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_table_metadata_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::table_metadatas::dsl::*; + + ( + diesel::insert_into(schema::table_metadatas::table) + .values(items_to_insert) + .on_conflict(handle) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for DefaultProcessor { + fn name(&self) -> &'static str { + ProcessorName::DefaultProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + let flags = self.deprecated_tables; + let ( + txns, + block_metadata_transactions, + write_set_changes, + (move_modules, move_resources, table_items, current_table_items, table_metadata), + ) = tokio::task::spawn_blocking(move || process_transactions(transactions, flags)) + .await + .expect("Failed to spawn_blocking for TransactionModel::from_transactions"); + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &txns, + &block_metadata_transactions, + &write_set_changes, + ( + &move_modules, + &move_resources, + &table_items, + ¤t_table_items, + &table_metadata, + ), + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +fn process_transactions( + transactions: Vec, + flags: TableFlags, +) -> ( + Vec, + Vec, + Vec, + ( + Vec, + Vec, + Vec, + Vec, + Vec, + ), +) { + let (mut txns, block_metadata_txns, mut write_set_changes, wsc_details) = + TransactionModel::from_transactions(&transactions); + let mut block_metadata_transactions = vec![]; + for block_metadata_txn in block_metadata_txns { + block_metadata_transactions.push(block_metadata_txn.clone()); + } + let mut move_modules = vec![]; + let mut move_resources = vec![]; + let mut table_items = vec![]; + let mut current_table_items = AHashMap::new(); + let mut table_metadata = AHashMap::new(); + for detail in wsc_details { + match detail { + WriteSetChangeDetail::Module(module) => move_modules.push(module.clone()), + WriteSetChangeDetail::Resource(resource) => move_resources.push(resource.clone()), + WriteSetChangeDetail::Table(item, current_item, metadata) => { + table_items.push(item.clone()); + current_table_items.insert( + ( + current_item.table_handle.clone(), + current_item.key_hash.clone(), + ), + current_item.clone(), + ); + if let Some(meta) = metadata { + table_metadata.insert(meta.handle.clone(), meta.clone()); + } + }, + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut current_table_items = current_table_items + .into_values() + .collect::>(); + let mut table_metadata = table_metadata.into_values().collect::>(); + // Sort by PK + current_table_items + .sort_by(|a, b| (&a.table_handle, &a.key_hash).cmp(&(&b.table_handle, &b.key_hash))); + table_metadata.sort_by(|a, b| a.handle.cmp(&b.handle)); + + if flags.contains(TableFlags::MOVE_RESOURCES) { + move_resources.clear(); + } + if flags.contains(TableFlags::TRANSACTIONS) { + txns.clear(); + } + if flags.contains(TableFlags::WRITE_SET_CHANGES) { + write_set_changes.clear(); + } + + ( + txns, + block_metadata_transactions, + write_set_changes, + ( + move_modules, + move_resources, + table_items, + current_table_items, + table_metadata, + ), + ) +} diff --git a/rust/processor/src/processors/events_processor.rs b/rust/processor/src/processors/events_processor.rs new file mode 100644 index 000000000..409914275 --- /dev/null +++ b/rust/processor/src/processors/events_processor.rs @@ -0,0 +1,180 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::events_models::events::EventModel, + gap_detectors::ProcessingResult, + schema, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + }, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use std::fmt::Debug; +use tracing::error; + +pub struct EventsProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, +} + +impl EventsProcessor { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + } + } +} + +impl Debug for EventsProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "EventsProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + events: &[EventModel], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + execute_in_chunks( + conn, + insert_events_query, + events, + get_config_table_chunk_size::("events", per_table_chunk_sizes), + ) + .await?; + Ok(()) +} + +fn insert_events_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::events::dsl::*; + ( + diesel::insert_into(schema::events::table) + .values(items_to_insert) + .on_conflict((transaction_version, event_index)) + .do_update() + .set(( + inserted_at.eq(excluded(inserted_at)), + indexed_type.eq(excluded(indexed_type)), + )), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for EventsProcessor { + fn name(&self) -> &'static str { + ProcessorName::EventsProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut events = vec![]; + for txn in &transactions { + let txn_version = txn.version as i64; + let block_height = txn.block_height as i64; + let txn_data = match txn.txn_data.as_ref() { + Some(data) => data, + None => { + tracing::warn!( + transaction_version = txn_version, + "Transaction data doesn't exist" + ); + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["EventsProcessor"]) + .inc(); + continue; + }, + }; + let default = vec![]; + let raw_events = match txn_data { + TxnData::BlockMetadata(tx_inner) => &tx_inner.events, + TxnData::Genesis(tx_inner) => &tx_inner.events, + TxnData::User(tx_inner) => &tx_inner.events, + _ => &default, + }; + + let txn_events = EventModel::from_events(raw_events, txn_version, block_height); + events.extend(txn_events); + } + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &events, + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/fungible_asset_processor.rs b/rust/processor/src/processors/fungible_asset_processor.rs new file mode 100644 index 000000000..12c36600a --- /dev/null +++ b/rust/processor/src/processors/fungible_asset_processor.rs @@ -0,0 +1,745 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::{ + coin_models::coin_supply::CoinSupply, + fungible_asset_models::{ + v2_fungible_asset_activities::{EventToCoinType, FungibleAssetActivity}, + v2_fungible_asset_balances::{ + CurrentFungibleAssetBalance, CurrentFungibleAssetMapping, + CurrentUnifiedFungibleAssetBalance, FungibleAssetBalance, + }, + v2_fungible_asset_utils::{ + ConcurrentFungibleAssetBalance, ConcurrentFungibleAssetSupply, FeeStatement, + FungibleAssetMetadata, FungibleAssetStore, FungibleAssetSupply, + }, + v2_fungible_metadata::{FungibleAssetMetadataMapping, FungibleAssetMetadataModel}, + }, + object_models::v2_object_utils::{ + ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, Untransferable, + }, + }, + gap_detectors::ProcessingResult, + schema, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + util::{get_entry_function_from_user_request, standardize_address}, + }, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::{transaction::TxnData, write_set_change::Change, Transaction}; +use async_trait::async_trait; +use chrono::NaiveDateTime; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use std::fmt::Debug; +use tracing::error; + +pub struct FungibleAssetProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, +} + +impl FungibleAssetProcessor { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + } + } +} + +impl Debug for FungibleAssetProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "FungibleAssetTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + fungible_asset_activities: &[FungibleAssetActivity], + fungible_asset_metadata: &[FungibleAssetMetadataModel], + fungible_asset_balances: &[FungibleAssetBalance], + current_fungible_asset_balances: &[CurrentFungibleAssetBalance], + current_unified_fungible_asset_balances: ( + &[CurrentUnifiedFungibleAssetBalance], + &[CurrentUnifiedFungibleAssetBalance], + ), + coin_supply: &[CoinSupply], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let faa = execute_in_chunks( + conn.clone(), + insert_fungible_asset_activities_query, + fungible_asset_activities, + get_config_table_chunk_size::( + "fungible_asset_activities", + per_table_chunk_sizes, + ), + ); + let fam = execute_in_chunks( + conn.clone(), + insert_fungible_asset_metadata_query, + fungible_asset_metadata, + get_config_table_chunk_size::( + "fungible_asset_metadata", + per_table_chunk_sizes, + ), + ); + let fab = execute_in_chunks( + conn.clone(), + insert_fungible_asset_balances_query, + fungible_asset_balances, + get_config_table_chunk_size::( + "fungible_asset_balances", + per_table_chunk_sizes, + ), + ); + let cfab = execute_in_chunks( + conn.clone(), + insert_current_fungible_asset_balances_query, + current_fungible_asset_balances, + get_config_table_chunk_size::( + "current_fungible_asset_balances", + per_table_chunk_sizes, + ), + ); + let cufab_v1 = execute_in_chunks( + conn.clone(), + insert_current_unified_fungible_asset_balances_v1_query, + current_unified_fungible_asset_balances.0, + get_config_table_chunk_size::( + "current_unified_fungible_asset_balances", + per_table_chunk_sizes, + ), + ); + let cufab_v2 = execute_in_chunks( + conn.clone(), + insert_current_unified_fungible_asset_balances_v2_query, + current_unified_fungible_asset_balances.1, + get_config_table_chunk_size::( + "current_unified_fungible_asset_balances", + per_table_chunk_sizes, + ), + ); + let cs = execute_in_chunks( + conn, + insert_coin_supply_query, + coin_supply, + get_config_table_chunk_size::("coin_supply", per_table_chunk_sizes), + ); + let (faa_res, fam_res, fab_res, cfab_res, cufab1_res, cufab2_res, cs_res) = + tokio::join!(faa, fam, fab, cfab, cufab_v1, cufab_v2, cs); + for res in [ + faa_res, fam_res, fab_res, cfab_res, cufab1_res, cufab2_res, cs_res, + ] { + res?; + } + + Ok(()) +} + +fn insert_fungible_asset_activities_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::fungible_asset_activities::dsl::*; + + ( + diesel::insert_into(schema::fungible_asset_activities::table) + .values(items_to_insert) + .on_conflict((transaction_version, event_index)) + .do_nothing(), + None, + ) +} + +fn insert_fungible_asset_metadata_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::fungible_asset_metadata::dsl::*; + + ( + diesel::insert_into(schema::fungible_asset_metadata::table) + .values(items_to_insert) + .on_conflict(asset_type) + .do_update() + .set( + ( + creator_address.eq(excluded(creator_address)), + name.eq(excluded(name)), + symbol.eq(excluded(symbol)), + decimals.eq(excluded(decimals)), + icon_uri.eq(excluded(icon_uri)), + project_uri.eq(excluded(project_uri)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + supply_aggregator_table_handle_v1.eq(excluded(supply_aggregator_table_handle_v1)), + supply_aggregator_table_key_v1.eq(excluded(supply_aggregator_table_key_v1)), + token_standard.eq(excluded(token_standard)), + inserted_at.eq(excluded(inserted_at)), + is_token_v2.eq(excluded(is_token_v2)), + supply_v2.eq(excluded(supply_v2)), + maximum_v2.eq(excluded(maximum_v2)), + ) + ), + Some(" WHERE fungible_asset_metadata.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_fungible_asset_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::fungible_asset_balances::dsl::*; + + ( + diesel::insert_into(schema::fungible_asset_balances::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_current_fungible_asset_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_fungible_asset_balances::dsl::*; + + ( + diesel::insert_into(schema::current_fungible_asset_balances::table) + .values(items_to_insert) + .on_conflict(storage_id) + .do_update() + .set( + ( + owner_address.eq(excluded(owner_address)), + asset_type.eq(excluded(asset_type)), + is_primary.eq(excluded(is_primary)), + is_frozen.eq(excluded(is_frozen)), + amount.eq(excluded(amount)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + last_transaction_version.eq(excluded(last_transaction_version)), + token_standard.eq(excluded(token_standard)), + inserted_at.eq(excluded(inserted_at)), + ) + ), + Some(" WHERE current_fungible_asset_balances.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_unified_fungible_asset_balances_v1_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_unified_fungible_asset_balances_to_be_renamed::dsl::*; + + ( + diesel::insert_into(schema::current_unified_fungible_asset_balances_to_be_renamed::table) + .values(items_to_insert) + .on_conflict(storage_id) + .do_update() + .set( + ( + owner_address.eq(excluded(owner_address)), + asset_type_v1.eq(excluded(asset_type_v1)), + is_frozen.eq(excluded(is_frozen)), + amount_v1.eq(excluded(amount_v1)), + last_transaction_timestamp_v1.eq(excluded(last_transaction_timestamp_v1)), + last_transaction_version_v1.eq(excluded(last_transaction_version_v1)), + inserted_at.eq(excluded(inserted_at)), + ) + ), + Some(" WHERE current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v1 IS NULL \ + OR current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v1 <= excluded.last_transaction_version_v1"), + ) +} + +fn insert_current_unified_fungible_asset_balances_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_unified_fungible_asset_balances_to_be_renamed::dsl::*; + ( + diesel::insert_into(schema::current_unified_fungible_asset_balances_to_be_renamed::table) + .values(items_to_insert) + .on_conflict(storage_id) + .do_update() + .set( + ( + owner_address.eq(excluded(owner_address)), + asset_type_v2.eq(excluded(asset_type_v2)), + is_primary.eq(excluded(is_primary)), + is_frozen.eq(excluded(is_frozen)), + amount_v2.eq(excluded(amount_v2)), + last_transaction_timestamp_v2.eq(excluded(last_transaction_timestamp_v2)), + last_transaction_version_v2.eq(excluded(last_transaction_version_v2)), + inserted_at.eq(excluded(inserted_at)), + ) + ), + Some(" WHERE current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v2 IS NULL \ + OR current_unified_fungible_asset_balances_to_be_renamed.last_transaction_version_v2 <= excluded.last_transaction_version_v2 "), + ) +} + +fn insert_coin_supply_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::coin_supply::dsl::*; + + ( + diesel::insert_into(schema::coin_supply::table) + .values(items_to_insert) + .on_conflict((transaction_version, coin_type_hash)) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for FungibleAssetProcessor { + fn name(&self) -> &'static str { + ProcessorName::FungibleAssetProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let ( + fungible_asset_activities, + fungible_asset_metadata, + fungible_asset_balances, + current_fungible_asset_balances, + current_unified_fungible_asset_balances, + coin_supply, + ) = parse_v2_coin(&transactions).await; + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let (coin_balance, fa_balance): (Vec<_>, Vec<_>) = current_unified_fungible_asset_balances + .into_iter() + .partition(|x| x.is_primary.is_none()); + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &fungible_asset_activities, + &fungible_asset_metadata, + &fungible_asset_balances, + ¤t_fungible_asset_balances, + (&coin_balance, &fa_balance), + &coin_supply, + &self.per_table_chunk_sizes, + ) + .await; + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(err) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + "[Parser] Error inserting transactions to db: {:?}", + err + ); + bail!(format!("Error inserting transactions to db. Processor {}. Start {}. End {}. Error {:?}", self.name(), start_version, end_version, err)) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +/// V2 coin is called fungible assets and this flow includes all data from V1 in coin_processor +async fn parse_v2_coin( + transactions: &[Transaction], +) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, +) { + let mut fungible_asset_activities = vec![]; + let mut fungible_asset_balances = vec![]; + let mut all_coin_supply = vec![]; + let mut current_fungible_asset_balances: CurrentFungibleAssetMapping = AHashMap::new(); + let mut fungible_asset_metadata: FungibleAssetMetadataMapping = AHashMap::new(); + + // Get Metadata for fungible assets by object + let mut fungible_asset_object_helper: ObjectAggregatedDataMapping = AHashMap::new(); + + for txn in transactions { + let txn_version = txn.version as i64; + let block_height = txn.block_height as i64; + let txn_data = match txn.txn_data.as_ref() { + Some(data) => data, + None => { + tracing::warn!( + transaction_version = txn_version, + "Transaction data doesn't exist" + ); + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["FungibleAssetProcessor"]) + .inc(); + continue; + }, + }; + let transaction_info = txn.info.as_ref().expect("Transaction info doesn't exist!"); + let txn_timestamp = txn + .timestamp + .as_ref() + .expect("Transaction timestamp doesn't exist!") + .seconds; + #[allow(deprecated)] + let txn_timestamp = + NaiveDateTime::from_timestamp_opt(txn_timestamp, 0).expect("Txn Timestamp is invalid!"); + let txn_epoch = txn.epoch as i64; + + let default = vec![]; + let (events, user_request, entry_function_id_str) = match txn_data { + TxnData::BlockMetadata(tx_inner) => (&tx_inner.events, None, None), + TxnData::Genesis(tx_inner) => (&tx_inner.events, None, None), + TxnData::User(tx_inner) => { + let user_request = tx_inner + .request + .as_ref() + .expect("Sends is not present in user txn"); + let entry_function_id_str = get_entry_function_from_user_request(user_request); + (&tx_inner.events, Some(user_request), entry_function_id_str) + }, + _ => (&default, None, None), + }; + + // This is because v1 events (deposit/withdraw) don't have coin type so the only way is to match + // the event to the resource using the event guid + let mut event_to_v1_coin_type: EventToCoinType = AHashMap::new(); + + // First loop to get all objects + // Need to do a first pass to get all the objects + for wsc in transaction_info.changes.iter() { + if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { + if let Some(object) = + ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() + { + fungible_asset_object_helper.insert( + standardize_address(&wr.address.to_string()), + ObjectAggregatedData { + object, + ..ObjectAggregatedData::default() + }, + ); + } + } + } + // Loop to get the metadata relevant to parse v1 and v2. + // As an optimization, we also handle v1 balances in the process + for (index, wsc) in transaction_info.changes.iter().enumerate() { + if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { + if let Some((balance, current_balance, event_to_coin)) = + FungibleAssetBalance::get_v1_from_write_resource( + write_resource, + index as i64, + txn_version, + txn_timestamp, + ) + .unwrap() + { + fungible_asset_balances.push(balance); + current_fungible_asset_balances + .insert(current_balance.storage_id.clone(), current_balance.clone()); + event_to_v1_coin_type.extend(event_to_coin); + } + // Fill the v2 object metadata + let address = standardize_address(&write_resource.address.to_string()); + if let Some(aggregated_data) = fungible_asset_object_helper.get_mut(&address) { + if let Some(fungible_asset_metadata) = + FungibleAssetMetadata::from_write_resource(write_resource, txn_version) + .unwrap() + { + aggregated_data.fungible_asset_metadata = Some(fungible_asset_metadata); + } + if let Some(fungible_asset_store) = + FungibleAssetStore::from_write_resource(write_resource, txn_version) + .unwrap() + { + aggregated_data.fungible_asset_store = Some(fungible_asset_store); + } + if let Some(fungible_asset_supply) = + FungibleAssetSupply::from_write_resource(write_resource, txn_version) + .unwrap() + { + aggregated_data.fungible_asset_supply = Some(fungible_asset_supply); + } + if let Some(concurrent_fungible_asset_supply) = + ConcurrentFungibleAssetSupply::from_write_resource( + write_resource, + txn_version, + ) + .unwrap() + { + aggregated_data.concurrent_fungible_asset_supply = + Some(concurrent_fungible_asset_supply); + } + if let Some(concurrent_fungible_asset_balance) = + ConcurrentFungibleAssetBalance::from_write_resource( + write_resource, + txn_version, + ) + .unwrap() + { + aggregated_data.concurrent_fungible_asset_balance = + Some(concurrent_fungible_asset_balance); + } + if let Some(untransferable) = + Untransferable::from_write_resource(write_resource, txn_version).unwrap() + { + aggregated_data.untransferable = Some(untransferable); + } + } + } else if let Change::DeleteResource(delete_resource) = wsc.change.as_ref().unwrap() { + if let Some((balance, current_balance, event_to_coin)) = + FungibleAssetBalance::get_v1_from_delete_resource( + delete_resource, + index as i64, + txn_version, + txn_timestamp, + ) + .unwrap() + { + fungible_asset_balances.push(balance); + current_fungible_asset_balances + .insert(current_balance.storage_id.clone(), current_balance.clone()); + event_to_v1_coin_type.extend(event_to_coin); + } + } + } + + // The artificial gas event, only need for v1 + if let Some(req) = user_request { + let fee_statement = events.iter().find_map(|event| { + let event_type = event.type_str.as_str(); + FeeStatement::from_event(event_type, &event.data, txn_version) + }); + let gas_event = FungibleAssetActivity::get_gas_event( + transaction_info, + req, + &entry_function_id_str, + txn_version, + txn_timestamp, + block_height, + fee_statement, + ); + fungible_asset_activities.push(gas_event); + } + + // Loop to handle events and collect additional metadata from events for v2 + for (index, event) in events.iter().enumerate() { + if let Some(v1_activity) = FungibleAssetActivity::get_v1_from_event( + event, + txn_version, + block_height, + txn_timestamp, + &entry_function_id_str, + &event_to_v1_coin_type, + index as i64, + ) + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible asset activity v1"); + panic!("[Parser] error parsing fungible asset activity v1"); + }) { + fungible_asset_activities.push(v1_activity); + } + if let Some(v2_activity) = FungibleAssetActivity::get_v2_from_event( + event, + txn_version, + block_height, + txn_timestamp, + index as i64, + &entry_function_id_str, + &fungible_asset_object_helper, + ) + .await + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible asset activity v2"); + panic!("[Parser] error parsing fungible asset activity v2"); + }) { + fungible_asset_activities.push(v2_activity); + } + } + + // Loop to handle all the other changes + for (index, wsc) in transaction_info.changes.iter().enumerate() { + match wsc.change.as_ref().unwrap() { + Change::WriteResource(write_resource) => { + if let Some(fa_metadata) = + FungibleAssetMetadataModel::get_v1_from_write_resource( + write_resource, + txn_version, + txn_timestamp, + ) + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible metadata v1"); + panic!("[Parser] error parsing fungible metadata v1"); + }) + { + fungible_asset_metadata.insert(fa_metadata.asset_type.clone(), fa_metadata); + } + if let Some(fa_metadata) = + FungibleAssetMetadataModel::get_v2_from_write_resource( + write_resource, + txn_version, + txn_timestamp, + &fungible_asset_object_helper, + ) + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible metadata v2"); + panic!("[Parser] error parsing fungible metadata v2"); + }) + { + fungible_asset_metadata.insert(fa_metadata.asset_type.clone(), fa_metadata); + } + if let Some((balance, curr_balance)) = + FungibleAssetBalance::get_v2_from_write_resource( + write_resource, + index as i64, + txn_version, + txn_timestamp, + &fungible_asset_object_helper, + ) + .await + .unwrap_or_else(|e| { + tracing::error!( + transaction_version = txn_version, + index = index, + error = ?e, + "[Parser] error parsing fungible balance v2"); + panic!("[Parser] error parsing fungible balance v2"); + }) + { + fungible_asset_balances.push(balance); + current_fungible_asset_balances + .insert(curr_balance.storage_id.clone(), curr_balance); + } + }, + Change::WriteTableItem(table_item) => { + if let Some(coin_supply) = CoinSupply::from_write_table_item( + table_item, + txn_version, + txn_timestamp, + txn_epoch, + ) + .unwrap() + { + all_coin_supply.push(coin_supply); + } + }, + _ => {}, + } + } + } + + // Boilerplate after this + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut fungible_asset_metadata = fungible_asset_metadata + .into_values() + .collect::>(); + let mut current_fungible_asset_balances = current_fungible_asset_balances + .into_values() + .collect::>(); + + // Sort by PK + fungible_asset_metadata.sort_by(|a, b| a.asset_type.cmp(&b.asset_type)); + current_fungible_asset_balances.sort_by(|a, b| a.storage_id.cmp(&b.storage_id)); + + // Process the unified balance + let current_unified_fungible_asset_balances = current_fungible_asset_balances + .iter() + .map(CurrentUnifiedFungibleAssetBalance::from) + .collect::>(); + ( + fungible_asset_activities, + fungible_asset_metadata, + fungible_asset_balances, + current_fungible_asset_balances, + current_unified_fungible_asset_balances, + all_coin_supply, + ) +} diff --git a/rust/processor/src/processors/mod.rs b/rust/processor/src/processors/mod.rs new file mode 100644 index 000000000..08c034550 --- /dev/null +++ b/rust/processor/src/processors/mod.rs @@ -0,0 +1,255 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// Note: For enum_dispatch to work nicely, it is easiest to have the trait and the enum +// in the same file (ProcessorTrait and Processor). + +pub mod account_transactions_processor; +pub mod ans_processor; +pub mod coin_processor; +pub mod default_processor; +pub mod events_processor; +pub mod fungible_asset_processor; +pub mod monitoring_processor; +pub mod nft_metadata_processor; +pub mod objects_processor; +pub mod parquet_default_processor; +pub mod stake_processor; +pub mod token_processor; +pub mod token_v2_processor; +pub mod transaction_metadata_processor; +pub mod user_transaction_processor; + +use self::{ + account_transactions_processor::AccountTransactionsProcessor, + ans_processor::{AnsProcessor, AnsProcessorConfig}, + coin_processor::CoinProcessor, + default_processor::DefaultProcessor, + events_processor::EventsProcessor, + fungible_asset_processor::FungibleAssetProcessor, + monitoring_processor::MonitoringProcessor, + nft_metadata_processor::{NftMetadataProcessor, NftMetadataProcessorConfig}, + objects_processor::{ObjectsProcessor, ObjectsProcessorConfig}, + parquet_default_processor::DefaultParquetProcessorConfig, + stake_processor::{StakeProcessor, StakeProcessorConfig}, + token_processor::{TokenProcessor, TokenProcessorConfig}, + token_v2_processor::{TokenV2Processor, TokenV2ProcessorConfig}, + transaction_metadata_processor::TransactionMetadataProcessor, + user_transaction_processor::UserTransactionProcessor, +}; +use crate::{ + db::common::models::processor_status::ProcessorStatus, + gap_detectors::ProcessingResult, + processors::parquet_default_processor::DefaultParquetProcessor, + schema::processor_status, + utils::{ + counters::{GOT_CONNECTION_COUNT, UNABLE_TO_GET_CONNECTION_COUNT}, + database::{execute_with_better_error, ArcDbPool, DbPoolConnection}, + util::parse_timestamp, + }, +}; +use aptos_protos::transaction::v1::Transaction as ProtoTransaction; +use async_trait::async_trait; +use diesel::{pg::upsert::excluded, ExpressionMethods}; +use enum_dispatch::enum_dispatch; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct DefaultProcessingResult { + pub start_version: u64, + pub end_version: u64, + pub last_transaction_timestamp: Option, + pub processing_duration_in_secs: f64, + pub db_insertion_duration_in_secs: f64, +} + +/// Base trait for all processors +#[async_trait] +#[enum_dispatch] +pub trait ProcessorTrait: Send + Sync + Debug { + fn name(&self) -> &'static str; + + /// Process all transactions including writing to the database + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + db_chain_id: Option, + ) -> anyhow::Result; + + /// Gets a reference to the connection pool + /// This is used by the `get_conn()` helper below + fn connection_pool(&self) -> &ArcDbPool; + + //* Below are helper methods that don't need to be implemented *// + + /// Gets an instance of the connection pool + fn get_pool(&self) -> ArcDbPool { + let pool = self.connection_pool(); + pool.clone() + } + + /// Gets the connection. + /// If it was unable to do so (default timeout: 30s), it will keep retrying until it can. + async fn get_conn(&self) -> DbPoolConnection { + let pool = self.connection_pool(); + loop { + match pool.get().await { + Ok(conn) => { + GOT_CONNECTION_COUNT.inc(); + return conn; + }, + Err(err) => { + UNABLE_TO_GET_CONNECTION_COUNT.inc(); + tracing::error!( + // todo bb8 doesn't let you read the connection timeout. + //"Could not get DB connection from pool, will retry in {:?}. Err: {:?}", + //pool.connection_timeout(), + "Could not get DB connection from pool, will retry. Err: {:?}", + err + ); + }, + }; + } + } + + /// Store last processed version from database. We can assume that all previously processed + /// versions are successful because any gap would cause the processor to panic + async fn update_last_processed_version( + &self, + version: u64, + last_transaction_timestamp: Option, + ) -> anyhow::Result<()> { + let timestamp = last_transaction_timestamp.map(|t| parse_timestamp(&t, version as i64)); + let status = ProcessorStatus { + processor: self.name().to_string(), + last_success_version: version as i64, + last_transaction_timestamp: timestamp, + }; + execute_with_better_error( + self.get_pool(), + diesel::insert_into(processor_status::table) + .values(&status) + .on_conflict(processor_status::processor) + .do_update() + .set(( + processor_status::last_success_version + .eq(excluded(processor_status::last_success_version)), + processor_status::last_updated.eq(excluded(processor_status::last_updated)), + processor_status::last_transaction_timestamp + .eq(excluded(processor_status::last_transaction_timestamp)), + )), + Some(" WHERE processor_status.last_success_version <= EXCLUDED.last_success_version "), + ) + .await?; + Ok(()) + } +} + +/// This enum captures the configs for all the different processors that are defined. +/// The configs for each processor should only contain configuration specific to that +/// processor. For configuration that is common to all processors, put it in +/// IndexerGrpcProcessorConfig. +#[derive(Clone, Debug, Deserialize, Serialize, strum::IntoStaticStr, strum::EnumDiscriminants)] +#[serde(tag = "type", rename_all = "snake_case")] +// What is all this strum stuff? Let me explain. +// +// Previously we had consts called NAME in each module and a function called `name` on +// the ProcessorTrait. As such it was possible for this name to not match the snake case +// representation of the struct name. By using strum we can have a single source for +// processor names derived from the enum variants themselves. +// +// That's what this strum_discriminants stuff is, it uses macro magic to generate the +// ProcessorName enum based on ProcessorConfig. The rest of the derives configure this +// generation logic, e.g. to make sure we use snake_case. +#[strum(serialize_all = "snake_case")] +#[strum_discriminants( + derive( + Deserialize, + Serialize, + strum::EnumVariantNames, + strum::IntoStaticStr, + strum::Display, + clap::ValueEnum + ), + name(ProcessorName), + clap(rename_all = "snake_case"), + serde(rename_all = "snake_case"), + strum(serialize_all = "snake_case") +)] +pub enum ProcessorConfig { + AccountTransactionsProcessor, + AnsProcessor(AnsProcessorConfig), + CoinProcessor, + DefaultProcessor, + EventsProcessor, + FungibleAssetProcessor, + MonitoringProcessor, + NftMetadataProcessor(NftMetadataProcessorConfig), + ObjectsProcessor(ObjectsProcessorConfig), + StakeProcessor(StakeProcessorConfig), + TokenProcessor(TokenProcessorConfig), + TokenV2Processor(TokenV2ProcessorConfig), + TransactionMetadataProcessor, + UserTransactionProcessor, + DefaultParquetProcessor(DefaultParquetProcessorConfig), +} + +impl ProcessorConfig { + /// Get the name of the processor config as a static str. This is a convenience + /// method to access the derived functionality implemented by strum::IntoStaticStr. + pub fn name(&self) -> &'static str { + self.into() + } +} + +/// This enum contains all the processors defined in this crate. We use enum_dispatch +/// as it is more efficient than using dynamic dispatch (Box) and +/// it enables nice safety checks like in we do in `test_processor_names_complete`. +#[enum_dispatch(ProcessorTrait)] +#[derive(Debug)] +// To ensure that the variants of ProcessorConfig and Processor line up, in the testing +// build path we derive EnumDiscriminants on this enum as well and make sure the two +// sets of variants match up in `test_processor_names_complete`. +#[cfg_attr( + test, + derive(strum::EnumDiscriminants), + strum_discriminants( + derive(strum::EnumVariantNames), + name(ProcessorDiscriminants), + strum(serialize_all = "snake_case") + ) +)] +pub enum Processor { + AccountTransactionsProcessor, + AnsProcessor, + CoinProcessor, + DefaultProcessor, + EventsProcessor, + FungibleAssetProcessor, + MonitoringProcessor, + NftMetadataProcessor, + ObjectsProcessor, + StakeProcessor, + TokenProcessor, + TokenV2Processor, + TransactionMetadataProcessor, + UserTransactionProcessor, + DefaultParquetProcessor, +} + +#[cfg(test)] +mod test { + use super::*; + use strum::VariantNames; + + /// This test exists to make sure that when a new processor is added, it is added + /// to both Processor and ProcessorConfig. To make sure this passes, make sure the + /// variants are in the same order (lexicographical) and the names match. + #[test] + fn test_processor_names_complete() { + assert_eq!(ProcessorName::VARIANTS, ProcessorDiscriminants::VARIANTS); + } +} diff --git a/rust/processor/src/processors/monitoring_processor.rs b/rust/processor/src/processors/monitoring_processor.rs new file mode 100644 index 000000000..c7e750f82 --- /dev/null +++ b/rust/processor/src/processors/monitoring_processor.rs @@ -0,0 +1,58 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{gap_detectors::ProcessingResult, utils::database::ArcDbPool}; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use std::fmt::Debug; + +pub struct MonitoringProcessor { + connection_pool: ArcDbPool, +} + +impl MonitoringProcessor { + pub fn new(connection_pool: ArcDbPool) -> Self { + Self { connection_pool } + } +} + +impl Debug for MonitoringProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "MonitoringProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +#[async_trait] +impl ProcessorTrait for MonitoringProcessor { + fn name(&self) -> &'static str { + ProcessorName::MonitoringProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), + }, + )) + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/nft_metadata_processor.rs b/rust/processor/src/processors/nft_metadata_processor.rs new file mode 100644 index 000000000..4fcb9a922 --- /dev/null +++ b/rust/processor/src/processors/nft_metadata_processor.rs @@ -0,0 +1,350 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::{ + object_models::v2_object_utils::{ + ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, + }, + token_models::tokens::{TableHandleToOwner, TableMetadataForToken}, + token_v2_models::{ + v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, + v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, + }, + }, + gap_detectors::ProcessingResult, + utils::{ + database::{ArcDbPool, DbPoolConnection}, + util::{parse_timestamp, remove_null_bytes, standardize_address}, + }, + IndexerGrpcProcessorConfig, +}; +use ahash::AHashMap; +use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; +use async_trait::async_trait; +use futures_util::future::try_join_all; +use google_cloud_googleapis::pubsub::v1::PubsubMessage; +use google_cloud_pubsub::client::{Client, ClientConfig}; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::Debug, + time::{SystemTime, UNIX_EPOCH}, +}; +use tracing::{error, info}; + +pub const CHUNK_SIZE: usize = 1000; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NftMetadataProcessorConfig { + pub pubsub_topic_name: String, + pub google_application_credentials: Option, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} + +pub struct NftMetadataProcessor { + connection_pool: ArcDbPool, + chain_id: u8, + config: NftMetadataProcessorConfig, +} + +impl NftMetadataProcessor { + pub fn new(connection_pool: ArcDbPool, config: NftMetadataProcessorConfig) -> Self { + tracing::info!("init NftMetadataProcessor"); + + // Crate reads from authentication from file specified in + // GOOGLE_APPLICATION_CREDENTIALS env var. + if let Some(credentials) = config.google_application_credentials.clone() { + std::env::set_var("GOOGLE_APPLICATION_CREDENTIALS", credentials); + } + + Self { + connection_pool, + chain_id: 0, + config, + } + } + + pub fn set_chain_id(&mut self, chain_id: u8) { + self.chain_id = chain_id; + } +} + +impl Debug for NftMetadataProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "NftMetadataProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +#[async_trait] +impl ProcessorTrait for NftMetadataProcessor { + fn name(&self) -> &'static str { + ProcessorName::NftMetadataProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + db_chain_id: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; + + let db_chain_id = db_chain_id.unwrap_or_else(|| { + error!("[NFT Metadata Crawler] db_chain_id must not be null"); + panic!(); + }); + + // First get all token related table metadata from the batch of transactions. This is in case + // an earlier transaction has metadata (in resources) that's missing from a later transaction. + let table_handle_to_owner = + TableMetadataForToken::get_table_handle_to_owner_from_transactions(&transactions); + + // Initialize pubsub client + let config = ClientConfig::default().with_auth().await?; + let client = Client::new(config).await?; + let topic = client.topic(&self.config.pubsub_topic_name.clone()); + let publisher = topic.new_publisher(None); + let ordering_key = get_current_timestamp(); + + // Publish CurrentTokenDataV2 and CurrentCollectionV2 from transactions + let (token_datas, collections) = parse_v2_token( + &transactions, + &table_handle_to_owner, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await; + let mut pubsub_messages: Vec = + Vec::with_capacity(token_datas.len() + collections.len()); + + // Publish all parsed token and collection data to Pubsub + for token_data in token_datas { + pubsub_messages.push(PubsubMessage { + data: clean_token_pubsub_message(token_data, db_chain_id).into(), + ordering_key: ordering_key.clone(), + ..Default::default() + }) + } + + for collection in collections { + pubsub_messages.push(PubsubMessage { + data: clean_collection_pubsub_message(collection, db_chain_id).into(), + ordering_key: ordering_key.clone(), + ..Default::default() + }) + } + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + info!( + start_version = start_version, + end_version = end_version, + "[NFT Metadata Crawler] Publishing to queue" + ); + + let chunks: Vec> = pubsub_messages + .chunks(CHUNK_SIZE) + .map(|chunk| chunk.to_vec()) + .collect(); + + for chunk in chunks { + try_join_all( + publisher + .publish_bulk(chunk) + .await + .into_iter() + .map(|awaiter| awaiter.get()), + ) + .await?; + } + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + + Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )) + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +fn clean_token_pubsub_message(ctd: CurrentTokenDataV2, db_chain_id: u64) -> String { + remove_null_bytes(&format!( + "{},{},{},{},{},false", + ctd.token_data_id, + ctd.token_uri, + ctd.last_transaction_version, + ctd.last_transaction_timestamp, + db_chain_id, + )) +} + +fn clean_collection_pubsub_message(cc: CurrentCollectionV2, db_chain_id: u64) -> String { + remove_null_bytes(&format!( + "{},{},{},{},{},false", + cc.collection_id, + cc.uri, + cc.last_transaction_version, + cc.last_transaction_timestamp, + db_chain_id, + )) +} + +/// Copied from token_processor; +async fn parse_v2_token( + transactions: &[Transaction], + table_handle_to_owner: &TableHandleToOwner, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, +) -> (Vec, Vec) { + let mut current_token_datas_v2: AHashMap = + AHashMap::new(); + let mut current_collections_v2: AHashMap = + AHashMap::new(); + + for txn in transactions { + let txn_version = txn.version as i64; + let txn_timestamp = parse_timestamp(txn.timestamp.as_ref().unwrap(), txn_version); + let transaction_info = txn.info.as_ref().expect("Transaction info doesn't exist!"); + + let mut token_v2_metadata_helper: ObjectAggregatedDataMapping = AHashMap::new(); + for wsc in transaction_info.changes.iter() { + if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { + if let Some(object) = + ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() + { + token_v2_metadata_helper.insert( + standardize_address(&wr.address.to_string()), + ObjectAggregatedData { + aptos_collection: None, + fixed_supply: None, + object, + concurrent_supply: None, + unlimited_supply: None, + property_map: None, + transfer_events: vec![], + untransferable: None, + token: None, + fungible_asset_metadata: None, + fungible_asset_supply: None, + concurrent_fungible_asset_supply: None, + concurrent_fungible_asset_balance: None, + fungible_asset_store: None, + token_identifier: None, + }, + ); + } + } + } + + for (index, wsc) in transaction_info.changes.iter().enumerate() { + let wsc_index = index as i64; + match wsc.change.as_ref().unwrap() { + Change::WriteTableItem(table_item) => { + if let Some((_, current_token_data)) = + TokenDataV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + ) + .unwrap() + { + current_token_datas_v2 + .insert(current_token_data.token_data_id.clone(), current_token_data); + } + if let Some((_, current_collection)) = + CollectionV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + { + current_collections_v2 + .insert(current_collection.collection_id.clone(), current_collection); + } + }, + Change::WriteResource(resource) => { + if let Some((_, current_token_data)) = TokenDataV2::get_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata_helper, + ) + .unwrap() + { + current_token_datas_v2 + .insert(current_token_data.token_data_id.clone(), current_token_data); + } + if let Some((_, current_collection)) = CollectionV2::get_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata_helper, + ) + .unwrap() + { + current_collections_v2 + .insert(current_collection.collection_id.clone(), current_collection); + } + }, + + _ => {}, + } + } + } + + let current_token_datas_v2 = current_token_datas_v2 + .into_values() + .collect::>(); + let current_collections_v2 = current_collections_v2 + .into_values() + .collect::>(); + + (current_token_datas_v2, current_collections_v2) +} + +/// Get current system timestamp in milliseconds for ordering key +fn get_current_timestamp() -> String { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() + .to_string() +} diff --git a/rust/processor/src/processors/objects_processor.rs b/rust/processor/src/processors/objects_processor.rs new file mode 100644 index 000000000..9228692ab --- /dev/null +++ b/rust/processor/src/processors/objects_processor.rs @@ -0,0 +1,308 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::object_models::{ + v2_object_utils::{ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata}, + v2_objects::{CurrentObject, Object}, + }, + gap_detectors::ProcessingResult, + schema, + utils::{ + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + util::standardize_address, + }, + IndexerGrpcProcessorConfig, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use tracing::error; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ObjectsProcessorConfig { + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} +pub struct ObjectsProcessor { + connection_pool: ArcDbPool, + config: ObjectsProcessorConfig, + per_table_chunk_sizes: AHashMap, +} + +impl ObjectsProcessor { + pub fn new( + connection_pool: ArcDbPool, + config: ObjectsProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { + Self { + connection_pool, + config, + per_table_chunk_sizes, + } + } +} + +impl Debug for ObjectsProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "ObjectsProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + (objects, current_objects): (&[Object], &[CurrentObject]), + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let io = execute_in_chunks( + conn.clone(), + insert_objects_query, + objects, + get_config_table_chunk_size::("objects", per_table_chunk_sizes), + ); + let co = execute_in_chunks( + conn, + insert_current_objects_query, + current_objects, + get_config_table_chunk_size::("current_objects", per_table_chunk_sizes), + ); + let (io_res, co_res) = tokio::join!(io, co); + for res in [io_res, co_res] { + res?; + } + + Ok(()) +} + +fn insert_objects_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::objects::dsl::*; + ( + diesel::insert_into(schema::objects::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_update() + .set((inserted_at.eq(excluded(inserted_at)),)), + None, + ) +} + +fn insert_current_objects_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_objects::dsl::*; + ( + diesel::insert_into(schema::current_objects::table) + .values(items_to_insert) + .on_conflict(object_address) + .do_update() + .set(( + owner_address.eq(excluded(owner_address)), + state_key_hash.eq(excluded(state_key_hash)), + allow_ungated_transfer.eq(excluded(allow_ungated_transfer)), + last_guid_creation_num.eq(excluded(last_guid_creation_num)), + last_transaction_version.eq(excluded(last_transaction_version)), + is_deleted.eq(excluded(is_deleted)), + inserted_at.eq(excluded(inserted_at)), + untransferrable.eq(excluded(untransferrable)), + )), + Some( + " WHERE current_objects.last_transaction_version <= excluded.last_transaction_version ", + ), + ) +} + +#[async_trait] +impl ProcessorTrait for ObjectsProcessor { + fn name(&self) -> &'static str { + ProcessorName::ObjectsProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; + + // Moving object handling here because we need a single object + // map through transactions for lookups + let mut all_objects = vec![]; + let mut all_current_objects = AHashMap::new(); + let mut object_metadata_helper: ObjectAggregatedDataMapping = AHashMap::new(); + + for txn in &transactions { + let txn_version = txn.version as i64; + let changes = &txn + .info + .as_ref() + .unwrap_or_else(|| { + panic!( + "Transaction info doesn't exist! Transaction {}", + txn_version + ) + }) + .changes; + + // First pass to get all the object cores + for wsc in changes.iter() { + if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { + let address = standardize_address(&wr.address.to_string()); + if let Some(object_with_metadata) = + ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() + { + // Object core is the first struct that we need to get + object_metadata_helper.insert(address.clone(), ObjectAggregatedData { + object: object_with_metadata, + token: None, + fungible_asset_store: None, + // The following structs are unused in this processor + fungible_asset_metadata: None, + aptos_collection: None, + fixed_supply: None, + unlimited_supply: None, + concurrent_supply: None, + property_map: None, + transfer_events: vec![], + untransferable: None, + fungible_asset_supply: None, + concurrent_fungible_asset_supply: None, + concurrent_fungible_asset_balance: None, + token_identifier: None, + }); + } + } + } + + // Second pass to construct the object data + for (index, wsc) in changes.iter().enumerate() { + let index: i64 = index as i64; + match wsc.change.as_ref().unwrap() { + Change::WriteResource(inner) => { + if let Some((object, current_object)) = &Object::from_write_resource( + inner, + txn_version, + index, + &object_metadata_helper, + ) + .unwrap() + { + all_objects.push(object.clone()); + all_current_objects + .insert(object.object_address.clone(), current_object.clone()); + } + }, + Change::DeleteResource(inner) => { + // Passing all_current_objects into the function so that we can get the owner of the deleted + // resource if it was handled in the same batch + if let Some((object, current_object)) = Object::from_delete_resource( + inner, + txn_version, + index, + &all_current_objects, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + { + all_objects.push(object.clone()); + all_current_objects + .insert(object.object_address.clone(), current_object.clone()); + } + }, + _ => {}, + }; + } + } + + // Sort by PK + let mut all_current_objects = all_current_objects + .into_values() + .collect::>(); + all_current_objects.sort_by(|a, b| a.object_address.cmp(&b.object_address)); + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + (&all_objects, &all_current_objects), + &self.per_table_chunk_sizes, + ) + .await; + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/parquet_default_processor.rs b/rust/processor/src/processors/parquet_default_processor.rs new file mode 100644 index 000000000..132a24ad3 --- /dev/null +++ b/rust/processor/src/processors/parquet_default_processor.rs @@ -0,0 +1,269 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::default_models::{ + parquet_move_resources::MoveResource, + parquet_move_tables::{CurrentTableItem, TableItem, TableMetadata}, + parquet_transactions::{Transaction as ParquetTransaction, TransactionModel}, + parquet_write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, + }, + gap_detectors::ProcessingResult, + parquet_handler::create_parquet_handler_loop, + parquet_processors::{generic_parquet_processor::ParquetDataGeneric, ParquetProcessingResult}, + utils::database::ArcDbPool, +}; +use ahash::AHashMap; +use anyhow::anyhow; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use kanal::AsyncSender; +use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Formatter, Result}; + +pub const RESOURCE_TYPES: [&str; 3] = ["transaction", "move_resource", "write_set_changes"]; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct DefaultParquetProcessorConfig { + pub google_application_credentials: Option, + pub bucket_name: String, + pub parquet_handler_response_channel_size: usize, + pub max_buffer_size: usize, +} + +pub struct DefaultParquetProcessor { + connection_pool: ArcDbPool, + transaction_sender: AsyncSender>, + move_resource_sender: AsyncSender>, + wsc_sender: AsyncSender>, + ti_sender: AsyncSender>, +} + +// TODO: Since each table item has different size allocated, the pace of being backfilled to PQ varies a lot. +// Maybe we can have also have a way to configure different starting version for each table later. +impl DefaultParquetProcessor { + pub fn new( + connection_pool: ArcDbPool, + config: DefaultParquetProcessorConfig, + new_gap_detector_sender: AsyncSender, + ) -> Self { + if let Some(credentials) = config.google_application_credentials.clone() { + std::env::set_var("GOOGLE_APPLICATION_CREDENTIALS", credentials); + } + + let transaction_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + let move_resource_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + let wsc_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + let ti_sender = create_parquet_handler_loop::( + new_gap_detector_sender.clone(), + ProcessorName::DefaultParquetProcessor.into(), + config.bucket_name.clone(), + config.parquet_handler_response_channel_size, + config.max_buffer_size, + ); + + Self { + connection_pool, + transaction_sender, + move_resource_sender, + wsc_sender, + ti_sender, + } + } +} + +impl Debug for DefaultParquetProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + write!( + f, + "ParquetProcessor {{ capacity of t channel: {:?}, capacity of mr channel: {:?}, capacity of wsc channel: {:?}, capacity of ti channel: {:?} }}", + &self.transaction_sender.capacity(), + &self.move_resource_sender.capacity(), + &self.wsc_sender.capacity(), + &self.ti_sender.capacity(), + ) + } +} + +#[async_trait] +impl ProcessorTrait for DefaultParquetProcessor { + fn name(&self) -> &'static str { + ProcessorName::DefaultParquetProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let ((mr, wsc, t, ti), transaction_version_to_struct_count) = + tokio::task::spawn_blocking(move || process_transactions(transactions)) + .await + .expect("Failed to spawn_blocking for TransactionModel::from_transactions"); + + let mr_parquet_data = ParquetDataGeneric { + data: mr, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + + self.move_resource_sender + .send(mr_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + let wsc_parquet_data = ParquetDataGeneric { + data: wsc, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + self.wsc_sender + .send(wsc_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + let t_parquet_data = ParquetDataGeneric { + data: t, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + self.transaction_sender + .send(t_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + let ti_parquet_data = ParquetDataGeneric { + data: ti, + last_transaction_timestamp: last_transaction_timestamp.clone(), + transaction_version_to_struct_count: transaction_version_to_struct_count.clone(), + first_txn_version: start_version, + last_txn_version: end_version, + }; + + self.ti_sender + .send(ti_parquet_data) + .await + .map_err(|e| anyhow!("Failed to send to parquet manager: {}", e))?; + + Ok(ProcessingResult::ParquetProcessingResult( + ParquetProcessingResult { + start_version: start_version as i64, + end_version: end_version as i64, + last_transaction_timestamp: last_transaction_timestamp.clone(), + txn_version_to_struct_count: AHashMap::new(), + }, + )) + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +pub fn process_transactions( + transactions: Vec, +) -> ( + ( + Vec, + Vec, + Vec, + Vec, + ), + AHashMap, +) { + let mut transaction_version_to_struct_count: AHashMap = AHashMap::new(); + let (txns, _block_metadata_txns, write_set_changes, wsc_details) = + TransactionModel::from_transactions( + &transactions, + &mut transaction_version_to_struct_count, + ); + + let mut move_modules = vec![]; + let mut move_resources = vec![]; + let mut table_items = vec![]; + let mut current_table_items = AHashMap::new(); + let mut table_metadata: AHashMap = AHashMap::new(); + + for detail in wsc_details { + match detail { + WriteSetChangeDetail::Module(module) => { + move_modules.push(module.clone()); + // transaction_version_to_struct_count.entry(module.transaction_version).and_modify(|e| *e += 1); // TODO: uncomment in Tranche2 + }, + WriteSetChangeDetail::Resource(resource) => { + transaction_version_to_struct_count + .entry(resource.txn_version) + .and_modify(|e| *e += 1); + move_resources.push(resource); + }, + WriteSetChangeDetail::Table(item, current_item, metadata) => { + transaction_version_to_struct_count + .entry(item.txn_version) + .and_modify(|e| *e += 1); + table_items.push(item); + + current_table_items.insert( + ( + current_item.table_handle.clone(), + current_item.key_hash.clone(), + ), + current_item, + ); + // transaction_version_to_struct_count.entry(current_item.last_transaction_version).and_modify(|e| *e += 1); // TODO: uncomment in Tranche2 + + if let Some(meta) = metadata { + table_metadata.insert(meta.handle.clone(), meta); + // transaction_version_to_struct_count.entry(current_item.last_transaction_version).and_modify(|e| *e += 1); // TODO: uncomment in Tranche2 + } + }, + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut current_table_items = current_table_items + .into_values() + .collect::>(); + let mut table_metadata = table_metadata.into_values().collect::>(); + // Sort by PK + current_table_items + .sort_by(|a, b| (&a.table_handle, &a.key_hash).cmp(&(&b.table_handle, &b.key_hash))); + table_metadata.sort_by(|a, b| a.handle.cmp(&b.handle)); + + ( + (move_resources, write_set_changes, txns, table_items), + transaction_version_to_struct_count, + ) +} diff --git a/rust/processor/src/processors/stake_processor.rs b/rust/processor/src/processors/stake_processor.rs new file mode 100644 index 000000000..d623704d1 --- /dev/null +++ b/rust/processor/src/processors/stake_processor.rs @@ -0,0 +1,607 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::stake_models::{ + current_delegated_voter::CurrentDelegatedVoter, + delegator_activities::DelegatedStakingActivity, + delegator_balances::{ + CurrentDelegatorBalance, CurrentDelegatorBalanceMap, DelegatorBalance, + }, + delegator_pools::{ + CurrentDelegatorPoolBalance, DelegatorPool, DelegatorPoolBalance, DelegatorPoolMap, + }, + proposal_votes::ProposalVote, + stake_utils::DelegationVoteGovernanceRecordsResource, + staking_pool_voter::{CurrentStakingPoolVoter, StakingPoolVoterMap}, + }, + gap_detectors::ProcessingResult, + schema, + utils::{ + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + util::{parse_timestamp, standardize_address}, + }, + IndexerGrpcProcessorConfig, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::{write_set_change::Change, Transaction}; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use tracing::error; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct StakeProcessorConfig { + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} + +pub struct StakeProcessor { + connection_pool: ArcDbPool, + config: StakeProcessorConfig, + per_table_chunk_sizes: AHashMap, +} + +impl StakeProcessor { + pub fn new( + connection_pool: ArcDbPool, + config: StakeProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { + Self { + connection_pool, + config, + per_table_chunk_sizes, + } + } +} + +impl Debug for StakeProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "StakeTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + current_stake_pool_voters: &[CurrentStakingPoolVoter], + proposal_votes: &[ProposalVote], + delegator_actvities: &[DelegatedStakingActivity], + delegator_balances: &[DelegatorBalance], + current_delegator_balances: &[CurrentDelegatorBalance], + delegator_pools: &[DelegatorPool], + delegator_pool_balances: &[DelegatorPoolBalance], + current_delegator_pool_balances: &[CurrentDelegatorPoolBalance], + current_delegated_voter: &[CurrentDelegatedVoter], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let cspv = execute_in_chunks( + conn.clone(), + insert_current_stake_pool_voter_query, + current_stake_pool_voters, + get_config_table_chunk_size::( + "current_staking_pool_voter", + per_table_chunk_sizes, + ), + ); + let pv = execute_in_chunks( + conn.clone(), + insert_proposal_votes_query, + proposal_votes, + get_config_table_chunk_size::("proposal_votes", per_table_chunk_sizes), + ); + let da = execute_in_chunks( + conn.clone(), + insert_delegator_activities_query, + delegator_actvities, + get_config_table_chunk_size::( + "delegated_staking_activities", + per_table_chunk_sizes, + ), + ); + let db = execute_in_chunks( + conn.clone(), + insert_delegator_balances_query, + delegator_balances, + get_config_table_chunk_size::( + "delegator_balances", + per_table_chunk_sizes, + ), + ); + let cdb = execute_in_chunks( + conn.clone(), + insert_current_delegator_balances_query, + current_delegator_balances, + get_config_table_chunk_size::( + "current_delegator_balances", + per_table_chunk_sizes, + ), + ); + let dp = execute_in_chunks( + conn.clone(), + insert_delegator_pools_query, + delegator_pools, + get_config_table_chunk_size::( + "delegated_staking_pools", + per_table_chunk_sizes, + ), + ); + let dpb = execute_in_chunks( + conn.clone(), + insert_delegator_pool_balances_query, + delegator_pool_balances, + get_config_table_chunk_size::( + "delegated_staking_pool_balances", + per_table_chunk_sizes, + ), + ); + let cdpb = execute_in_chunks( + conn.clone(), + insert_current_delegator_pool_balances_query, + current_delegator_pool_balances, + get_config_table_chunk_size::( + "current_delegated_staking_pool_balances", + per_table_chunk_sizes, + ), + ); + let cdv = execute_in_chunks( + conn, + insert_current_delegated_voter_query, + current_delegated_voter, + get_config_table_chunk_size::( + "current_delegated_voter", + per_table_chunk_sizes, + ), + ); + + let (cspv_res, pv_res, da_res, db_res, cdb_res, dp_res, dpb_res, cdpb_res, cdv_res) = + futures::join!(cspv, pv, da, db, cdb, dp, dpb, cdpb, cdv); + for res in [ + cspv_res, pv_res, da_res, db_res, cdb_res, dp_res, dpb_res, cdpb_res, cdv_res, + ] { + res?; + } + + Ok(()) +} + +fn insert_current_stake_pool_voter_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_staking_pool_voter::dsl::*; + + (diesel::insert_into(schema::current_staking_pool_voter::table) + .values(items_to_insert) + .on_conflict(staking_pool_address) + .do_update() + .set(( + staking_pool_address.eq(excluded(staking_pool_address)), + voter_address.eq(excluded(voter_address)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + operator_address.eq(excluded(operator_address)), + )), + Some( + " WHERE current_staking_pool_voter.last_transaction_version <= EXCLUDED.last_transaction_version ", + ), + ) +} + +fn insert_proposal_votes_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::proposal_votes::dsl::*; + + ( + diesel::insert_into(schema::proposal_votes::table) + .values(items_to_insert) + .on_conflict((transaction_version, proposal_id, voter_address)) + .do_nothing(), + None, + ) +} + +fn insert_delegator_activities_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::delegated_staking_activities::dsl::*; + + ( + diesel::insert_into(schema::delegated_staking_activities::table) + .values(items_to_insert) + .on_conflict((transaction_version, event_index)) + .do_nothing(), + None, + ) +} + +fn insert_delegator_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::delegator_balances::dsl::*; + + ( + diesel::insert_into(schema::delegator_balances::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_current_delegator_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_delegator_balances::dsl::*; + + (diesel::insert_into(schema::current_delegator_balances::table) + .values(items_to_insert) + .on_conflict((delegator_address, pool_address, pool_type, table_handle)) + .do_update() + .set(( + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + shares.eq(excluded(shares)), + parent_table_handle.eq(excluded(parent_table_handle)), + )), + Some( + " WHERE current_delegator_balances.last_transaction_version <= EXCLUDED.last_transaction_version ", + ), + ) +} + +fn insert_delegator_pools_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::delegated_staking_pools::dsl::*; + + (diesel::insert_into(schema::delegated_staking_pools::table) + .values(items_to_insert) + .on_conflict(staking_pool_address) + .do_update() + .set(( + first_transaction_version.eq(excluded(first_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + )), + Some( + " WHERE delegated_staking_pools.first_transaction_version >= EXCLUDED.first_transaction_version ", + ), + ) +} + +fn insert_delegator_pool_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::delegated_staking_pool_balances::dsl::*; + + ( + diesel::insert_into(schema::delegated_staking_pool_balances::table) + .values(items_to_insert) + .on_conflict((transaction_version, staking_pool_address)) + .do_nothing(), + None, + ) +} + +fn insert_current_delegator_pool_balances_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_delegated_staking_pool_balances::dsl::*; + + (diesel::insert_into(schema::current_delegated_staking_pool_balances::table) + .values(items_to_insert) + .on_conflict(staking_pool_address) + .do_update() + .set(( + total_coins.eq(excluded(total_coins)), + total_shares.eq(excluded(total_shares)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + operator_commission_percentage.eq(excluded(operator_commission_percentage)), + inactive_table_handle.eq(excluded(inactive_table_handle)), + active_table_handle.eq(excluded(active_table_handle)), + )), + Some( + " WHERE current_delegated_staking_pool_balances.last_transaction_version <= EXCLUDED.last_transaction_version ", + ), + ) +} + +fn insert_current_delegated_voter_query( + item_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_delegated_voter::dsl::*; + + (diesel::insert_into(schema::current_delegated_voter::table) + .values(item_to_insert) + .on_conflict((delegation_pool_address, delegator_address)) + .do_update() + .set(( + voter.eq(excluded(voter)), + pending_voter.eq(excluded(pending_voter)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + last_transaction_version.eq(excluded(last_transaction_version)), + table_handle.eq(excluded(table_handle)), + inserted_at.eq(excluded(inserted_at)), + )), + Some( + " WHERE current_delegated_voter.last_transaction_version <= EXCLUDED.last_transaction_version ", + ), + ) +} + +#[async_trait] +impl ProcessorTrait for StakeProcessor { + fn name(&self) -> &'static str { + ProcessorName::StakeProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; + + let mut all_current_stake_pool_voters: StakingPoolVoterMap = AHashMap::new(); + let mut all_proposal_votes = vec![]; + let mut all_delegator_activities = vec![]; + let mut all_delegator_balances = vec![]; + let mut all_current_delegator_balances: CurrentDelegatorBalanceMap = AHashMap::new(); + let mut all_delegator_pools: DelegatorPoolMap = AHashMap::new(); + let mut all_delegator_pool_balances = vec![]; + let mut all_current_delegator_pool_balances = AHashMap::new(); + + let mut active_pool_to_staking_pool = AHashMap::new(); + // structs needed to get delegated voters + let mut all_current_delegated_voter = AHashMap::new(); + let mut all_vote_delegation_handle_to_pool_address = AHashMap::new(); + + for txn in &transactions { + // Add votes data + let current_stake_pool_voter = CurrentStakingPoolVoter::from_transaction(txn).unwrap(); + all_current_stake_pool_voters.extend(current_stake_pool_voter); + let mut proposal_votes = ProposalVote::from_transaction(txn).unwrap(); + all_proposal_votes.append(&mut proposal_votes); + + // Add delegator activities + let mut delegator_activities = DelegatedStakingActivity::from_transaction(txn).unwrap(); + all_delegator_activities.append(&mut delegator_activities); + + // Add delegator pools + let (delegator_pools, mut delegator_pool_balances, current_delegator_pool_balances) = + DelegatorPool::from_transaction(txn).unwrap(); + all_delegator_pools.extend(delegator_pools); + all_delegator_pool_balances.append(&mut delegator_pool_balances); + all_current_delegator_pool_balances.extend(current_delegator_pool_balances); + + // Moving the transaction code here is the new paradigm to avoid redoing a lot of the duplicate work + // Currently only delegator voting follows this paradigm + // TODO: refactor all the other staking code to follow this paradigm + let txn_version = txn.version as i64; + let txn_timestamp = parse_timestamp(txn.timestamp.as_ref().unwrap(), txn_version); + let transaction_info = txn.info.as_ref().expect("Transaction info doesn't exist!"); + // adding some metadata for subsequent parsing + for wsc in &transaction_info.changes { + if let Change::WriteResource(write_resource) = wsc.change.as_ref().unwrap() { + if let Some(DelegationVoteGovernanceRecordsResource::GovernanceRecords(inner)) = + DelegationVoteGovernanceRecordsResource::from_write_resource( + write_resource, + txn_version, + )? + { + let delegation_pool_address = + standardize_address(&write_resource.address.to_string()); + let vote_delegation_handle = + inner.vote_delegation.buckets.inner.get_handle(); + + all_vote_delegation_handle_to_pool_address + .insert(vote_delegation_handle, delegation_pool_address.clone()); + } + if let Some(map) = + CurrentDelegatorBalance::get_active_pool_to_staking_pool_mapping( + write_resource, + txn_version, + ) + .unwrap() + { + active_pool_to_staking_pool.extend(map); + } + } + } + + // Add delegator balances + let (mut delegator_balances, current_delegator_balances) = + CurrentDelegatorBalance::from_transaction( + txn, + &active_pool_to_staking_pool, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap(); + all_delegator_balances.append(&mut delegator_balances); + all_current_delegator_balances.extend(current_delegator_balances); + + // this write table item indexing is to get delegator address, table handle, and voter & pending voter + for wsc in &transaction_info.changes { + if let Change::WriteTableItem(write_table_item) = wsc.change.as_ref().unwrap() { + let voter_map = CurrentDelegatedVoter::from_write_table_item( + write_table_item, + txn_version, + txn_timestamp, + &all_vote_delegation_handle_to_pool_address, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap(); + + all_current_delegated_voter.extend(voter_map); + } + } + + // we need one last loop to prefill delegators that got in before the delegated voting contract was deployed + for wsc in &transaction_info.changes { + if let Change::WriteTableItem(write_table_item) = wsc.change.as_ref().unwrap() { + if let Some(voter) = + CurrentDelegatedVoter::get_delegators_pre_contract_deployment( + write_table_item, + txn_version, + txn_timestamp, + &active_pool_to_staking_pool, + &all_current_delegated_voter, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + { + all_current_delegated_voter.insert(voter.pk(), voter); + } + } + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut all_current_stake_pool_voters = all_current_stake_pool_voters + .into_values() + .collect::>(); + let mut all_current_delegator_balances = all_current_delegator_balances + .into_values() + .collect::>(); + let mut all_delegator_pools = all_delegator_pools + .into_values() + .collect::>(); + let mut all_current_delegator_pool_balances = all_current_delegator_pool_balances + .into_values() + .collect::>(); + let mut all_current_delegated_voter = all_current_delegated_voter + .into_values() + .collect::>(); + + // Sort by PK + all_current_stake_pool_voters + .sort_by(|a, b| a.staking_pool_address.cmp(&b.staking_pool_address)); + all_current_delegator_balances.sort_by(|a, b| { + (&a.delegator_address, &a.pool_address, &a.pool_type).cmp(&( + &b.delegator_address, + &b.pool_address, + &b.pool_type, + )) + }); + all_delegator_pools.sort_by(|a, b| a.staking_pool_address.cmp(&b.staking_pool_address)); + all_current_delegator_pool_balances + .sort_by(|a, b| a.staking_pool_address.cmp(&b.staking_pool_address)); + all_current_delegated_voter.sort(); + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &all_current_stake_pool_voters, + &all_proposal_votes, + &all_delegator_activities, + &all_delegator_balances, + &all_current_delegator_balances, + &all_delegator_pools, + &all_delegator_pool_balances, + &all_current_delegator_pool_balances, + &all_current_delegated_voter, + &self.per_table_chunk_sizes, + ) + .await; + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/token_processor.rs b/rust/processor/src/processors/token_processor.rs new file mode 100644 index 000000000..cd5411b28 --- /dev/null +++ b/rust/processor/src/processors/token_processor.rs @@ -0,0 +1,610 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::token_models::{ + collection_datas::{CollectionData, CurrentCollectionData}, + nft_points::NftPoints, + token_activities::TokenActivity, + token_claims::CurrentTokenPendingClaim, + token_datas::{CurrentTokenData, TokenData}, + token_ownerships::{CurrentTokenOwnership, TokenOwnership}, + tokens::{ + CurrentTokenOwnershipPK, CurrentTokenPendingClaimPK, TableMetadataForToken, Token, + TokenDataIdHash, + }, + }, + gap_detectors::ProcessingResult, + schema, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + IndexerGrpcProcessorConfig, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use tracing::error; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TokenProcessorConfig { + pub nft_points_contract: Option, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} + +pub struct TokenProcessor { + connection_pool: ArcDbPool, + config: TokenProcessorConfig, + per_table_chunk_sizes: AHashMap, +} + +impl TokenProcessor { + pub fn new( + connection_pool: ArcDbPool, + config: TokenProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { + Self { + connection_pool, + config, + per_table_chunk_sizes, + } + } +} + +impl Debug for TokenProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "TokenTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + (tokens, token_ownerships, token_datas, collection_datas): ( + &[Token], + &[TokenOwnership], + &[TokenData], + &[CollectionData], + ), + (current_token_ownerships, current_token_datas, current_collection_datas): ( + &[CurrentTokenOwnership], + &[CurrentTokenData], + &[CurrentCollectionData], + ), + token_activities: &[TokenActivity], + current_token_claims: &[CurrentTokenPendingClaim], + nft_points: &[NftPoints], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let t = execute_in_chunks( + conn.clone(), + insert_tokens_query, + tokens, + get_config_table_chunk_size::("tokens", per_table_chunk_sizes), + ); + let to = execute_in_chunks( + conn.clone(), + insert_token_ownerships_query, + token_ownerships, + get_config_table_chunk_size::("token_ownerships", per_table_chunk_sizes), + ); + let td = execute_in_chunks( + conn.clone(), + insert_token_datas_query, + token_datas, + get_config_table_chunk_size::("token_datas", per_table_chunk_sizes), + ); + let cd = execute_in_chunks( + conn.clone(), + insert_collection_datas_query, + collection_datas, + get_config_table_chunk_size::("collection_datas", per_table_chunk_sizes), + ); + let cto = execute_in_chunks( + conn.clone(), + insert_current_token_ownerships_query, + current_token_ownerships, + get_config_table_chunk_size::( + "current_token_ownerships", + per_table_chunk_sizes, + ), + ); + let ctd = execute_in_chunks( + conn.clone(), + insert_current_token_datas_query, + current_token_datas, + get_config_table_chunk_size::( + "current_token_datas", + per_table_chunk_sizes, + ), + ); + let ccd = execute_in_chunks( + conn.clone(), + insert_current_collection_datas_query, + current_collection_datas, + get_config_table_chunk_size::( + "current_collection_datas", + per_table_chunk_sizes, + ), + ); + + let ta = execute_in_chunks( + conn.clone(), + insert_token_activities_query, + token_activities, + get_config_table_chunk_size::("token_activities", per_table_chunk_sizes), + ); + + let ctc = execute_in_chunks( + conn.clone(), + insert_current_token_claims_query, + current_token_claims, + get_config_table_chunk_size::( + "current_token_pending_claims", + per_table_chunk_sizes, + ), + ); + let np = execute_in_chunks( + conn, + insert_nft_points_query, + nft_points, + get_config_table_chunk_size::("nft_points", per_table_chunk_sizes), + ); + + let (t_res, to_res, td_res, cd_res, cto_res, ctd_res, ccd_res, ta_res, ctc_res, np) = + tokio::join!(t, to, td, cd, cto, ctd, ccd, ta, ctc, np); + + for res in [ + t_res, to_res, td_res, cd_res, cto_res, ctd_res, ccd_res, ta_res, ctc_res, np, + ] { + res?; + } + Ok(()) +} + +fn insert_tokens_query( + tokens_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::tokens::dsl::*; + ( + diesel::insert_into(schema::tokens::table) + .values(tokens_to_insert) + .on_conflict((token_data_id_hash, property_version, transaction_version)) + .do_nothing(), + None, + ) +} + +fn insert_token_ownerships_query( + token_ownerships_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::token_ownerships::dsl::*; + + ( + diesel::insert_into(schema::token_ownerships::table) + .values(token_ownerships_to_insert) + .on_conflict(( + token_data_id_hash, + property_version, + transaction_version, + table_handle, + )) + .do_nothing(), + None, + ) +} + +fn insert_token_datas_query( + token_datas_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::token_datas::dsl::*; + ( + diesel::insert_into(schema::token_datas::table) + .values(token_datas_to_insert) + .on_conflict((token_data_id_hash, transaction_version)) + .do_nothing(), + None, + ) +} + +fn insert_collection_datas_query( + collection_datas_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::collection_datas::dsl::*; + + ( + diesel::insert_into(schema::collection_datas::table) + .values(collection_datas_to_insert) + .on_conflict((collection_data_id_hash, transaction_version)) + .do_nothing(), + None, + ) +} + +fn insert_current_token_ownerships_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_ownerships::dsl::*; + + (diesel::insert_into(schema::current_token_ownerships::table) + .values(items_to_insert) + .on_conflict((token_data_id_hash, property_version, owner_address)) + .do_update() + .set(( + creator_address.eq(excluded(creator_address)), + collection_name.eq(excluded(collection_name)), + name.eq(excluded(name)), + amount.eq(excluded(amount)), + token_properties.eq(excluded(token_properties)), + last_transaction_version.eq(excluded(last_transaction_version)), + collection_data_id_hash.eq(excluded(collection_data_id_hash)), + table_type.eq(excluded(table_type)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_ownerships.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_token_datas_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_datas::dsl::*; + (diesel::insert_into(schema::current_token_datas::table) + .values(items_to_insert) + .on_conflict(token_data_id_hash) + .do_update() + .set(( + creator_address.eq(excluded(creator_address)), + collection_name.eq(excluded(collection_name)), + name.eq(excluded(name)), + maximum.eq(excluded(maximum)), + supply.eq(excluded(supply)), + largest_property_version.eq(excluded(largest_property_version)), + metadata_uri.eq(excluded(metadata_uri)), + payee_address.eq(excluded(payee_address)), + royalty_points_numerator.eq(excluded(royalty_points_numerator)), + royalty_points_denominator.eq(excluded(royalty_points_denominator)), + maximum_mutable.eq(excluded(maximum_mutable)), + uri_mutable.eq(excluded(uri_mutable)), + description_mutable.eq(excluded(description_mutable)), + properties_mutable.eq(excluded(properties_mutable)), + royalty_mutable.eq(excluded(royalty_mutable)), + default_properties.eq(excluded(default_properties)), + last_transaction_version.eq(excluded(last_transaction_version)), + collection_data_id_hash.eq(excluded(collection_data_id_hash)), + description.eq(excluded(description)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_datas.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_collection_datas_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_collection_datas::dsl::*; + + (diesel::insert_into(schema::current_collection_datas::table) + .values(items_to_insert) + .on_conflict(collection_data_id_hash) + .do_update() + .set(( + creator_address.eq(excluded(creator_address)), + collection_name.eq(excluded(collection_name)), + description.eq(excluded(description)), + metadata_uri.eq(excluded(metadata_uri)), + supply.eq(excluded(supply)), + maximum.eq(excluded(maximum)), + maximum_mutable.eq(excluded(maximum_mutable)), + uri_mutable.eq(excluded(uri_mutable)), + description_mutable.eq(excluded(description_mutable)), + last_transaction_version.eq(excluded(last_transaction_version)), + table_handle.eq(excluded(table_handle)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_collection_datas.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_token_activities_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::token_activities::dsl::*; + + ( + diesel::insert_into(schema::token_activities::table) + .values(items_to_insert) + .on_conflict(( + transaction_version, + event_account_address, + event_creation_number, + event_sequence_number, + )) + .do_nothing(), + None, + ) +} + +fn insert_current_token_claims_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_pending_claims::dsl::*; + + ( + diesel::insert_into(schema::current_token_pending_claims::table) + .values(items_to_insert) + .on_conflict(( + token_data_id_hash, property_version, from_address, to_address + )) + .do_update() + .set(( + collection_data_id_hash.eq(excluded(collection_data_id_hash)), + creator_address.eq(excluded(creator_address)), + collection_name.eq(excluded(collection_name)), + name.eq(excluded(name)), + amount.eq(excluded(amount)), + table_handle.eq(excluded(table_handle)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + token_data_id.eq(excluded(token_data_id)), + collection_id.eq(excluded(collection_id)), + )), + Some(" WHERE current_token_pending_claims.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_nft_points_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::nft_points::dsl::*; + + ( + diesel::insert_into(schema::nft_points::table) + .values(items_to_insert) + .on_conflict(transaction_version) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for TokenProcessor { + fn name(&self) -> &'static str { + ProcessorName::TokenProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut conn = self.get_conn().await; + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; + + // First get all token related table metadata from the batch of transactions. This is in case + // an earlier transaction has metadata (in resources) that's missing from a later transaction. + let table_handle_to_owner = + TableMetadataForToken::get_table_handle_to_owner_from_transactions(&transactions); + + // Token V1 only, this section will be deprecated soon + let mut all_tokens = vec![]; + let mut all_token_ownerships = vec![]; + let mut all_token_datas = vec![]; + let mut all_collection_datas = vec![]; + let mut all_token_activities = vec![]; + + // Hashmap key will be the PK of the table, we do not want to send duplicates writes to the db within a batch + let mut all_current_token_ownerships: AHashMap< + CurrentTokenOwnershipPK, + CurrentTokenOwnership, + > = AHashMap::new(); + let mut all_current_token_datas: AHashMap = + AHashMap::new(); + let mut all_current_collection_datas: AHashMap = + AHashMap::new(); + let mut all_current_token_claims: AHashMap< + CurrentTokenPendingClaimPK, + CurrentTokenPendingClaim, + > = AHashMap::new(); + + // This is likely temporary + let mut all_nft_points = vec![]; + + for txn in &transactions { + let ( + mut tokens, + mut token_ownerships, + mut token_datas, + mut collection_datas, + current_token_ownerships, + current_token_datas, + current_collection_datas, + current_token_claims, + ) = Token::from_transaction( + txn, + &table_handle_to_owner, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await; + all_tokens.append(&mut tokens); + all_token_ownerships.append(&mut token_ownerships); + all_token_datas.append(&mut token_datas); + all_collection_datas.append(&mut collection_datas); + // Given versions will always be increasing here (within a single batch), we can just override current values + all_current_token_ownerships.extend(current_token_ownerships); + all_current_token_datas.extend(current_token_datas); + all_current_collection_datas.extend(current_collection_datas); + + // Track token activities + let mut activities = TokenActivity::from_transaction(txn); + all_token_activities.append(&mut activities); + + // claims + all_current_token_claims.extend(current_token_claims); + + // NFT points + let nft_points_txn = + NftPoints::from_transaction(txn, self.config.nft_points_contract.clone()); + if let Some(nft_points) = nft_points_txn { + all_nft_points.push(nft_points); + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut all_current_token_ownerships = all_current_token_ownerships + .into_values() + .collect::>(); + let mut all_current_token_datas = all_current_token_datas + .into_values() + .collect::>(); + let mut all_current_collection_datas = all_current_collection_datas + .into_values() + .collect::>(); + let mut all_current_token_claims = all_current_token_claims + .into_values() + .collect::>(); + + // Sort by PK + all_current_token_ownerships.sort_by(|a, b| { + (&a.token_data_id_hash, &a.property_version, &a.owner_address).cmp(&( + &b.token_data_id_hash, + &b.property_version, + &b.owner_address, + )) + }); + all_current_token_datas.sort_by(|a, b| a.token_data_id_hash.cmp(&b.token_data_id_hash)); + all_current_collection_datas + .sort_by(|a, b| a.collection_data_id_hash.cmp(&b.collection_data_id_hash)); + all_current_token_claims.sort_by(|a, b| { + ( + &a.token_data_id_hash, + &a.property_version, + &a.from_address, + &a.to_address, + ) + .cmp(&( + &b.token_data_id_hash, + &b.property_version, + &b.from_address, + &a.to_address, + )) + }); + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + ( + &all_tokens, + &all_token_ownerships, + &all_token_datas, + &all_collection_datas, + ), + ( + &all_current_token_ownerships, + &all_current_token_datas, + &all_current_collection_datas, + ), + &all_token_activities, + &all_current_token_claims, + &all_nft_points, + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/token_v2_processor.rs b/rust/processor/src/processors/token_v2_processor.rs new file mode 100644 index 000000000..32530f9bb --- /dev/null +++ b/rust/processor/src/processors/token_v2_processor.rs @@ -0,0 +1,1215 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::{ + fungible_asset_models::v2_fungible_asset_utils::FungibleAssetMetadata, + object_models::v2_object_utils::{ + ObjectAggregatedData, ObjectAggregatedDataMapping, ObjectWithMetadata, Untransferable, + }, + token_models::tokens::{TableHandleToOwner, TableMetadataForToken}, + token_v2_models::{ + v1_token_royalty::CurrentTokenRoyaltyV1, + v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, + v2_token_activities::TokenActivityV2, + v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, + v2_token_metadata::{CurrentTokenV2Metadata, CurrentTokenV2MetadataPK}, + v2_token_ownerships::{ + CurrentTokenOwnershipV2, CurrentTokenOwnershipV2PK, NFTOwnershipV2, + TokenOwnershipV2, + }, + v2_token_utils::{ + AptosCollection, Burn, BurnEvent, ConcurrentSupply, FixedSupply, MintEvent, + PropertyMapModel, TokenIdentifiers, TokenV2, TokenV2Burned, TokenV2Minted, + TransferEvent, UnlimitedSupply, + }, + }, + }, + gap_detectors::ProcessingResult, + schema, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool, DbPoolConnection}, + util::{get_entry_function_from_user_request, parse_timestamp, standardize_address}, + }, + IndexerGrpcProcessorConfig, +}; +use ahash::{AHashMap, AHashSet}; +use anyhow::bail; +use aptos_protos::transaction::v1::{transaction::TxnData, write_set_change::Change, Transaction}; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use tracing::error; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TokenV2ProcessorConfig { + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retries")] + pub query_retries: u32, + #[serde(default = "IndexerGrpcProcessorConfig::default_query_retry_delay_ms")] + pub query_retry_delay_ms: u64, +} + +pub struct TokenV2Processor { + connection_pool: ArcDbPool, + config: TokenV2ProcessorConfig, + per_table_chunk_sizes: AHashMap, +} + +impl TokenV2Processor { + pub fn new( + connection_pool: ArcDbPool, + config: TokenV2ProcessorConfig, + per_table_chunk_sizes: AHashMap, + ) -> Self { + Self { + connection_pool, + config, + per_table_chunk_sizes, + } + } +} + +impl Debug for TokenV2Processor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "TokenV2TransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + collections_v2: &[CollectionV2], + token_datas_v2: &[TokenDataV2], + token_ownerships_v2: &[TokenOwnershipV2], + current_collections_v2: &[CurrentCollectionV2], + (current_token_datas_v2, current_deleted_token_datas_v2): ( + &[CurrentTokenDataV2], + &[CurrentTokenDataV2], + ), + (current_token_ownerships_v2, current_deleted_token_ownerships_v2): ( + &[CurrentTokenOwnershipV2], + &[CurrentTokenOwnershipV2], + ), + token_activities_v2: &[TokenActivityV2], + current_token_v2_metadata: &[CurrentTokenV2Metadata], + current_token_royalties_v1: &[CurrentTokenRoyaltyV1], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let coll_v2 = execute_in_chunks( + conn.clone(), + insert_collections_v2_query, + collections_v2, + get_config_table_chunk_size::("collections_v2", per_table_chunk_sizes), + ); + let td_v2 = execute_in_chunks( + conn.clone(), + insert_token_datas_v2_query, + token_datas_v2, + get_config_table_chunk_size::("token_datas_v2", per_table_chunk_sizes), + ); + let to_v2 = execute_in_chunks( + conn.clone(), + insert_token_ownerships_v2_query, + token_ownerships_v2, + get_config_table_chunk_size::( + "token_ownerships_v2", + per_table_chunk_sizes, + ), + ); + let cc_v2 = execute_in_chunks( + conn.clone(), + insert_current_collections_v2_query, + current_collections_v2, + get_config_table_chunk_size::( + "current_collections_v2", + per_table_chunk_sizes, + ), + ); + let ctd_v2 = execute_in_chunks( + conn.clone(), + insert_current_token_datas_v2_query, + current_token_datas_v2, + get_config_table_chunk_size::( + "current_token_datas_v2", + per_table_chunk_sizes, + ), + ); + let cdtd_v2 = execute_in_chunks( + conn.clone(), + insert_current_deleted_token_datas_v2_query, + current_deleted_token_datas_v2, + get_config_table_chunk_size::( + "current_token_datas_v2", + per_table_chunk_sizes, + ), + ); + let cto_v2 = execute_in_chunks( + conn.clone(), + insert_current_token_ownerships_v2_query, + current_token_ownerships_v2, + get_config_table_chunk_size::( + "current_token_ownerships_v2", + per_table_chunk_sizes, + ), + ); + let cdto_v2 = execute_in_chunks( + conn.clone(), + insert_current_deleted_token_ownerships_v2_query, + current_deleted_token_ownerships_v2, + get_config_table_chunk_size::( + "current_token_ownerships_v2", + per_table_chunk_sizes, + ), + ); + let ta_v2 = execute_in_chunks( + conn.clone(), + insert_token_activities_v2_query, + token_activities_v2, + get_config_table_chunk_size::( + "token_activities_v2", + per_table_chunk_sizes, + ), + ); + let ct_v2 = execute_in_chunks( + conn.clone(), + insert_current_token_v2_metadatas_query, + current_token_v2_metadata, + get_config_table_chunk_size::( + "current_token_v2_metadata", + per_table_chunk_sizes, + ), + ); + let ctr_v1 = execute_in_chunks( + conn, + insert_current_token_royalties_v1_query, + current_token_royalties_v1, + get_config_table_chunk_size::( + "current_token_royalty_v1", + per_table_chunk_sizes, + ), + ); + + let ( + coll_v2_res, + td_v2_res, + to_v2_res, + cc_v2_res, + ctd_v2_res, + cdtd_v2_res, + cto_v2_res, + cdto_v2_res, + ta_v2_res, + ct_v2_res, + ctr_v1_res, + ) = tokio::join!( + coll_v2, td_v2, to_v2, cc_v2, ctd_v2, cdtd_v2, cto_v2, cdto_v2, ta_v2, ct_v2, ctr_v1 + ); + + for res in [ + coll_v2_res, + td_v2_res, + to_v2_res, + cc_v2_res, + ctd_v2_res, + cdtd_v2_res, + cto_v2_res, + cdto_v2_res, + ta_v2_res, + ct_v2_res, + ctr_v1_res, + ] { + res?; + } + + Ok(()) +} + +fn insert_collections_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::collections_v2::dsl::*; + ( + diesel::insert_into(schema::collections_v2::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + ) +} + +fn insert_token_datas_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::token_datas_v2::dsl::*; + + ( + diesel::insert_into(schema::token_datas_v2::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_update() + .set(( + maximum.eq(excluded(maximum)), + supply.eq(excluded(supply)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + inserted_at.eq(excluded(inserted_at)), + decimals.eq(excluded(decimals)), + )), + None, + ) +} + +fn insert_token_ownerships_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::token_ownerships_v2::dsl::*; + + ( + diesel::insert_into(schema::token_ownerships_v2::table) + .values(items_to_insert) + .on_conflict((transaction_version, write_set_change_index)) + .do_update() + .set(( + is_fungible_v2.eq(excluded(is_fungible_v2)), + inserted_at.eq(excluded(inserted_at)), + )), + None, + ) +} + +fn insert_current_collections_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_collections_v2::dsl::*; + + ( + diesel::insert_into(schema::current_collections_v2::table) + .values(items_to_insert) + .on_conflict(collection_id) + .do_update() + .set(( + creator_address.eq(excluded(creator_address)), + collection_name.eq(excluded(collection_name)), + description.eq(excluded(description)), + uri.eq(excluded(uri)), + current_supply.eq(excluded(current_supply)), + max_supply.eq(excluded(max_supply)), + total_minted_v2.eq(excluded(total_minted_v2)), + mutable_description.eq(excluded(mutable_description)), + mutable_uri.eq(excluded(mutable_uri)), + table_handle_v1.eq(excluded(table_handle_v1)), + token_standard.eq(excluded(token_standard)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_collections_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_token_datas_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_datas_v2::dsl::*; + + ( + diesel::insert_into(schema::current_token_datas_v2::table) + .values(items_to_insert) + .on_conflict(token_data_id) + .do_update() + .set(( + collection_id.eq(excluded(collection_id)), + token_name.eq(excluded(token_name)), + maximum.eq(excluded(maximum)), + supply.eq(excluded(supply)), + largest_property_version_v1.eq(excluded(largest_property_version_v1)), + token_uri.eq(excluded(token_uri)), + description.eq(excluded(description)), + token_properties.eq(excluded(token_properties)), + token_standard.eq(excluded(token_standard)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + decimals.eq(excluded(decimals)), + // Intentionally not including is_deleted because it should always be true in this part + // and doesn't need to override + )), + Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_deleted_token_datas_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_datas_v2::dsl::*; + + ( + diesel::insert_into(schema::current_token_datas_v2::table) + .values(items_to_insert) + .on_conflict(token_data_id) + .do_update() + .set(( + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + is_deleted_v2.eq(excluded(is_deleted_v2)), + )), + Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_token_ownerships_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_ownerships_v2::dsl::*; + + ( + diesel::insert_into(schema::current_token_ownerships_v2::table) + .values(items_to_insert) + .on_conflict((token_data_id, property_version_v1, owner_address, storage_id)) + .do_update() + .set(( + amount.eq(excluded(amount)), + table_type_v1.eq(excluded(table_type_v1)), + token_properties_mutated_v1.eq(excluded(token_properties_mutated_v1)), + is_soulbound_v2.eq(excluded(is_soulbound_v2)), + token_standard.eq(excluded(token_standard)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + non_transferrable_by_owner.eq(excluded(non_transferrable_by_owner)), + )), + Some(" WHERE current_token_ownerships_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_deleted_token_ownerships_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_ownerships_v2::dsl::*; + + ( + diesel::insert_into(schema::current_token_ownerships_v2::table) + .values(items_to_insert) + .on_conflict((token_data_id, property_version_v1, owner_address, storage_id)) + .do_update() + .set(( + amount.eq(excluded(amount)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_ownerships_v2.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_token_activities_v2_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::token_activities_v2::dsl::*; + + ( + diesel::insert_into(schema::token_activities_v2::table) + .values(items_to_insert) + .on_conflict((transaction_version, event_index)) + .do_update() + .set(( + is_fungible_v2.eq(excluded(is_fungible_v2)), + inserted_at.eq(excluded(inserted_at)), + )), + None, + ) +} + +fn insert_current_token_v2_metadatas_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_v2_metadata::dsl::*; + + ( + diesel::insert_into(schema::current_token_v2_metadata::table) + .values(items_to_insert) + .on_conflict((object_address, resource_type)) + .do_update() + .set(( + data.eq(excluded(data)), + state_key_hash.eq(excluded(state_key_hash)), + last_transaction_version.eq(excluded(last_transaction_version)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_v2_metadata.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +fn insert_current_token_royalties_v1_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::current_token_royalty_v1::dsl::*; + + ( + diesel::insert_into(schema::current_token_royalty_v1::table) + .values(items_to_insert) + .on_conflict(token_data_id) + .do_update() + .set(( + payee_address.eq(excluded(payee_address)), + royalty_points_numerator.eq(excluded(royalty_points_numerator)), + royalty_points_denominator.eq(excluded(royalty_points_denominator)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + )), + Some(" WHERE current_token_royalty_v1.last_transaction_version <= excluded.last_transaction_version "), + ) +} + +#[async_trait] +impl ProcessorTrait for TokenV2Processor { + fn name(&self) -> &'static str { + ProcessorName::TokenV2Processor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut conn = self.get_conn().await; + + // First get all token related table metadata from the batch of transactions. This is in case + // an earlier transaction has metadata (in resources) that's missing from a later transaction. + let table_handle_to_owner = + TableMetadataForToken::get_table_handle_to_owner_from_transactions(&transactions); + + let query_retries = self.config.query_retries; + let query_retry_delay_ms = self.config.query_retry_delay_ms; + // Token V2 processing which includes token v1 + let ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_datas_v2, + current_deleted_token_datas_v2, + current_token_ownerships_v2, + current_deleted_token_ownerships_v2, + token_activities_v2, + current_token_v2_metadata, + current_token_royalties_v1, + ) = parse_v2_token( + &transactions, + &table_handle_to_owner, + &mut conn, + query_retries, + query_retry_delay_ms, + ) + .await; + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &collections_v2, + &token_datas_v2, + &token_ownerships_v2, + ¤t_collections_v2, + (¤t_token_datas_v2, ¤t_deleted_token_datas_v2), + ( + ¤t_token_ownerships_v2, + ¤t_deleted_token_ownerships_v2, + ), + &token_activities_v2, + ¤t_token_v2_metadata, + ¤t_token_royalties_v1, + &self.per_table_chunk_sizes, + ) + .await; + + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} + +async fn parse_v2_token( + transactions: &[Transaction], + table_handle_to_owner: &TableHandleToOwner, + conn: &mut DbPoolConnection<'_>, + query_retries: u32, + query_retry_delay_ms: u64, +) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, // deleted token ownerships + Vec, + Vec, + Vec, +) { + // Token V2 and V1 combined + let mut collections_v2 = vec![]; + let mut token_datas_v2 = vec![]; + let mut token_ownerships_v2 = vec![]; + let mut token_activities_v2 = vec![]; + + let mut current_collections_v2: AHashMap = + AHashMap::new(); + let mut current_token_datas_v2: AHashMap = + AHashMap::new(); + let mut current_deleted_token_datas_v2: AHashMap = + AHashMap::new(); + let mut current_token_ownerships_v2: AHashMap< + CurrentTokenOwnershipV2PK, + CurrentTokenOwnershipV2, + > = AHashMap::new(); + let mut current_deleted_token_ownerships_v2 = AHashMap::new(); + // Tracks prior ownership in case a token gets burned + let mut prior_nft_ownership: AHashMap = AHashMap::new(); + // Get Metadata for token v2 by object + // We want to persist this through the entire batch so that even if a token is burned, + // we can still get the object core metadata for it + let mut token_v2_metadata_helper: ObjectAggregatedDataMapping = AHashMap::new(); + // Basically token properties + let mut current_token_v2_metadata: AHashMap = + AHashMap::new(); + let mut current_token_royalties_v1: AHashMap = + AHashMap::new(); + + // Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure + for txn in transactions { + let txn_version = txn.version; + let txn_data = match txn.txn_data.as_ref() { + Some(data) => data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["TokenV2Processor"]) + .inc(); + tracing::warn!( + transaction_version = txn_version, + "Transaction data doesn't exist" + ); + continue; + }, + }; + let txn_version = txn.version as i64; + let txn_timestamp = parse_timestamp(txn.timestamp.as_ref().unwrap(), txn_version); + let transaction_info = txn.info.as_ref().expect("Transaction info doesn't exist!"); + + if let TxnData::User(user_txn) = txn_data { + let user_request = user_txn + .request + .as_ref() + .expect("Sends is not present in user txn"); + let entry_function_id_str = get_entry_function_from_user_request(user_request); + + // Get burn events for token v2 by object + let mut tokens_burned: TokenV2Burned = AHashMap::new(); + + // Get mint events for token v2 by object + let mut tokens_minted: TokenV2Minted = AHashSet::new(); + + // Need to do a first pass to get all the objects + for wsc in transaction_info.changes.iter() { + if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { + if let Some(object) = + ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() + { + token_v2_metadata_helper.insert( + standardize_address(&wr.address.to_string()), + ObjectAggregatedData { + object, + ..ObjectAggregatedData::default() + }, + ); + } + } + } + + // Need to do a second pass to get all the structs related to the object + for wsc in transaction_info.changes.iter() { + if let Change::WriteResource(wr) = wsc.change.as_ref().unwrap() { + let address = standardize_address(&wr.address.to_string()); + if let Some(aggregated_data) = token_v2_metadata_helper.get_mut(&address) { + if let Some(fixed_supply) = + FixedSupply::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.fixed_supply = Some(fixed_supply); + } + if let Some(unlimited_supply) = + UnlimitedSupply::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.unlimited_supply = Some(unlimited_supply); + } + if let Some(aptos_collection) = + AptosCollection::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.aptos_collection = Some(aptos_collection); + } + if let Some(property_map) = + PropertyMapModel::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.property_map = Some(property_map); + } + if let Some(concurrent_supply) = + ConcurrentSupply::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.concurrent_supply = Some(concurrent_supply); + } + if let Some(token) = TokenV2::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.token = Some(token); + } + if let Some(fungible_asset_metadata) = + FungibleAssetMetadata::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.fungible_asset_metadata = Some(fungible_asset_metadata); + } + if let Some(token_identifier) = + TokenIdentifiers::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.token_identifier = Some(token_identifier); + } + if let Some(untransferable) = + Untransferable::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.untransferable = Some(untransferable); + } + } + } + } + + // Pass through events to get the burn events and token activities v2 + // This needs to be here because we need the metadata above for token activities + // and burn / transfer events need to come before the next section + for (index, event) in user_txn.events.iter().enumerate() { + if let Some(burn_event) = Burn::from_event(event, txn_version).unwrap() { + tokens_burned.insert(burn_event.get_token_address(), burn_event); + } + if let Some(old_burn_event) = BurnEvent::from_event(event, txn_version).unwrap() { + let burn_event = Burn::new( + standardize_address(event.key.as_ref().unwrap().account_address.as_str()), + old_burn_event.get_token_address(), + "".to_string(), + ); + tokens_burned.insert(burn_event.get_token_address(), burn_event); + } + if let Some(mint_event) = MintEvent::from_event(event, txn_version).unwrap() { + tokens_minted.insert(mint_event.get_token_address()); + } + if let Some(transfer_events) = + TransferEvent::from_event(event, txn_version).unwrap() + { + if let Some(aggregated_data) = + token_v2_metadata_helper.get_mut(&transfer_events.get_object_address()) + { + // we don't want index to be 0 otherwise we might have collision with write set change index + // note that these will be multiplied by -1 so that it doesn't conflict with wsc index + let index = if index == 0 { + user_txn.events.len() + } else { + index + }; + aggregated_data + .transfer_events + .push((index as i64, transfer_events)); + } + } + // handling all the token v1 events + if let Some(event) = TokenActivityV2::get_v1_from_parsed_event( + event, + txn_version, + txn_timestamp, + index as i64, + &entry_function_id_str, + ) + .unwrap() + { + token_activities_v2.push(event); + } + // handling all the token v2 events + if let Some(event) = TokenActivityV2::get_nft_v2_from_parsed_event( + event, + txn_version, + txn_timestamp, + index as i64, + &entry_function_id_str, + &token_v2_metadata_helper, + ) + .await + .unwrap() + { + token_activities_v2.push(event); + } + } + + for (index, wsc) in transaction_info.changes.iter().enumerate() { + let wsc_index = index as i64; + match wsc.change.as_ref().unwrap() { + Change::WriteTableItem(table_item) => { + if let Some((collection, current_collection)) = + CollectionV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + { + collections_v2.push(collection); + current_collections_v2.insert( + current_collection.collection_id.clone(), + current_collection, + ); + } + if let Some((token_data, current_token_data)) = + TokenDataV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + ) + .unwrap() + { + token_datas_v2.push(token_data); + current_token_datas_v2.insert( + current_token_data.token_data_id.clone(), + current_token_data, + ); + } + if let Some(current_token_royalty) = + CurrentTokenRoyaltyV1::get_v1_from_write_table_item( + table_item, + txn_version, + txn_timestamp, + ) + .unwrap() + { + current_token_royalties_v1.insert( + current_token_royalty.token_data_id.clone(), + current_token_royalty, + ); + } + if let Some((token_ownership, current_token_ownership)) = + TokenOwnershipV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap() + { + token_ownerships_v2.push(token_ownership); + if let Some(cto) = current_token_ownership { + prior_nft_ownership.insert( + cto.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: cto.token_data_id.clone(), + owner_address: cto.owner_address.clone(), + is_soulbound: cto.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + cto.token_data_id.clone(), + cto.property_version_v1.clone(), + cto.owner_address.clone(), + cto.storage_id.clone(), + ), + cto, + ); + } + } + }, + Change::DeleteTableItem(table_item) => { + if let Some((token_ownership, current_token_ownership)) = + TokenOwnershipV2::get_v1_from_delete_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap() + { + token_ownerships_v2.push(token_ownership); + if let Some(cto) = current_token_ownership { + prior_nft_ownership.insert( + cto.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: cto.token_data_id.clone(), + owner_address: cto.owner_address.clone(), + is_soulbound: cto.is_soulbound_v2, + }, + ); + current_deleted_token_ownerships_v2.insert( + ( + cto.token_data_id.clone(), + cto.property_version_v1.clone(), + cto.owner_address.clone(), + cto.storage_id.clone(), + ), + cto, + ); + } + } + }, + Change::WriteResource(resource) => { + if let Some((collection, current_collection)) = + CollectionV2::get_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata_helper, + ) + .unwrap() + { + collections_v2.push(collection); + current_collections_v2.insert( + current_collection.collection_id.clone(), + current_collection, + ); + } + if let Some((token_data, current_token_data)) = + TokenDataV2::get_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata_helper, + ) + .unwrap() + { + // Add NFT ownership + let (mut ownerships, current_ownerships) = + TokenOwnershipV2::get_nft_v2_from_token_data( + &token_data, + &token_v2_metadata_helper, + ) + .unwrap(); + if let Some(current_nft_ownership) = ownerships.first() { + // Note that the first element in ownerships is the current ownership. We need to cache + // it in prior_nft_ownership so that moving forward if we see a burn we'll know + // where it came from. + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership + .owner_address + .as_ref() + .unwrap() + .clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + } + token_ownerships_v2.append(&mut ownerships); + current_token_ownerships_v2.extend(current_ownerships); + token_datas_v2.push(token_data); + current_token_datas_v2.insert( + current_token_data.token_data_id.clone(), + current_token_data, + ); + } + + // Add burned NFT handling for token datas (can probably be merged with below) + if let Some(deleted_token_data) = + TokenDataV2::get_burned_nft_v2_from_write_resource( + resource, + txn_version, + txn_timestamp, + &tokens_burned, + ) + .await + .unwrap() + { + current_deleted_token_datas_v2.insert( + deleted_token_data.token_data_id.clone(), + deleted_token_data, + ); + } + // Add burned NFT handling + if let Some((nft_ownership, current_nft_ownership)) = + TokenOwnershipV2::get_burned_nft_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &prior_nft_ownership, + &tokens_burned, + &token_v2_metadata_helper, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + { + token_ownerships_v2.push(nft_ownership); + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership.owner_address.clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + current_deleted_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); + } + + // Track token properties + if let Some(token_metadata) = CurrentTokenV2Metadata::from_write_resource( + resource, + txn_version, + &token_v2_metadata_helper, + ) + .unwrap() + { + current_token_v2_metadata.insert( + ( + token_metadata.object_address.clone(), + token_metadata.resource_type.clone(), + ), + token_metadata, + ); + } + }, + Change::DeleteResource(resource) => { + // Add burned NFT handling for token datas (can probably be merged with below) + if let Some(deleted_token_data) = + TokenDataV2::get_burned_nft_v2_from_delete_resource( + resource, + txn_version, + txn_timestamp, + &tokens_burned, + ) + .await + .unwrap() + { + current_deleted_token_datas_v2.insert( + deleted_token_data.token_data_id.clone(), + deleted_token_data, + ); + } + if let Some((nft_ownership, current_nft_ownership)) = + TokenOwnershipV2::get_burned_nft_v2_from_delete_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &prior_nft_ownership, + &tokens_burned, + conn, + query_retries, + query_retry_delay_ms, + ) + .await + .unwrap() + { + token_ownerships_v2.push(nft_ownership); + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership.owner_address.clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + current_deleted_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); + } + }, + _ => {}, + } + } + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut current_collections_v2 = current_collections_v2 + .into_values() + .collect::>(); + let mut current_token_datas_v2 = current_token_datas_v2 + .into_values() + .collect::>(); + let mut current_deleted_token_datas_v2 = current_deleted_token_datas_v2 + .into_values() + .collect::>(); + let mut current_token_ownerships_v2 = current_token_ownerships_v2 + .into_values() + .collect::>(); + let mut current_token_v2_metadata = current_token_v2_metadata + .into_values() + .collect::>(); + let mut current_deleted_token_ownerships_v2 = current_deleted_token_ownerships_v2 + .into_values() + .collect::>(); + let mut current_token_royalties_v1 = current_token_royalties_v1 + .into_values() + .collect::>(); + + // Sort by PK + current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); + current_deleted_token_datas_v2.sort_by(|a, b| a.token_data_id.cmp(&b.token_data_id)); + current_token_datas_v2.sort_by(|a, b| a.token_data_id.cmp(&b.token_data_id)); + current_token_ownerships_v2.sort_by(|a, b| { + ( + &a.token_data_id, + &a.property_version_v1, + &a.owner_address, + &a.storage_id, + ) + .cmp(&( + &b.token_data_id, + &b.property_version_v1, + &b.owner_address, + &b.storage_id, + )) + }); + current_token_v2_metadata.sort_by(|a, b| { + (&a.object_address, &a.resource_type).cmp(&(&b.object_address, &b.resource_type)) + }); + current_deleted_token_ownerships_v2.sort_by(|a, b| { + ( + &a.token_data_id, + &a.property_version_v1, + &a.owner_address, + &a.storage_id, + ) + .cmp(&( + &b.token_data_id, + &b.property_version_v1, + &b.owner_address, + &b.storage_id, + )) + }); + current_token_royalties_v1.sort(); + + ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_datas_v2, + current_deleted_token_datas_v2, + current_token_ownerships_v2, + current_deleted_token_ownerships_v2, + token_activities_v2, + current_token_v2_metadata, + current_token_royalties_v1, + ) +} diff --git a/rust/processor/src/processors/transaction_metadata_processor.rs b/rust/processor/src/processors/transaction_metadata_processor.rs new file mode 100644 index 000000000..615dacd09 --- /dev/null +++ b/rust/processor/src/processors/transaction_metadata_processor.rs @@ -0,0 +1,227 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::transaction_metadata_model::{ + event_size_info::EventSize, transaction_size_info::TransactionSize, + write_set_size_info::WriteSetSize, + }, + gap_detectors::ProcessingResult, + schema, + utils::database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::Transaction; +use async_trait::async_trait; +use diesel::{pg::Pg, query_builder::QueryFragment}; +use std::fmt::Debug; +use tracing::{error, warn}; + +pub struct TransactionMetadataProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, +} + +impl TransactionMetadataProcessor { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + } + } +} + +impl Debug for TransactionMetadataProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "TransactionMetadataProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + transaction_sizes: &[TransactionSize], + event_sizes: &[EventSize], + write_set_sizes: &[WriteSetSize], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + execute_in_chunks( + conn.clone(), + insert_transaction_sizes_query, + transaction_sizes, + get_config_table_chunk_size::( + "transaction_size_info", + per_table_chunk_sizes, + ), + ) + .await?; + execute_in_chunks( + conn.clone(), + insert_event_sizes_query, + event_sizes, + get_config_table_chunk_size::("event_size_info", per_table_chunk_sizes), + ) + .await?; + execute_in_chunks( + conn, + insert_write_set_sizes_query, + write_set_sizes, + get_config_table_chunk_size::("write_set_size_info", per_table_chunk_sizes), + ) + .await?; + + Ok(()) +} + +fn insert_transaction_sizes_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::transaction_size_info::dsl::*; + ( + diesel::insert_into(schema::transaction_size_info::table) + .values(items_to_insert) + .on_conflict(transaction_version) + .do_nothing(), + None, + ) +} + +fn insert_event_sizes_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::event_size_info::dsl::*; + ( + diesel::insert_into(schema::event_size_info::table) + .values(items_to_insert) + .on_conflict((transaction_version, index)) + .do_nothing(), + None, + ) +} + +fn insert_write_set_sizes_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::write_set_size_info::dsl::*; + ( + diesel::insert_into(schema::write_set_size_info::table) + .values(items_to_insert) + .on_conflict((transaction_version, index)) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for TransactionMetadataProcessor { + fn name(&self) -> &'static str { + ProcessorName::TransactionMetadataProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let mut transaction_sizes = vec![]; + let mut event_sizes = vec![]; + let mut write_set_sizes = vec![]; + for txn in &transactions { + let txn_version = txn.version as i64; + let size_info = match txn.size_info.as_ref() { + Some(size_info) => size_info, + None => { + warn!(version = txn.version, "Transaction size info not found"); + continue; + }, + }; + transaction_sizes.push(TransactionSize::from_transaction_info( + size_info, + txn_version, + )); + for (index, event_size_info) in size_info.event_size_info.iter().enumerate() { + event_sizes.push(EventSize::from_event_size_info( + event_size_info, + txn_version, + index as i64, + )); + } + for (index, write_set_size_info) in size_info.write_op_size_info.iter().enumerate() { + write_set_sizes.push(WriteSetSize::from_transaction_info( + write_set_size_info, + txn_version, + index as i64, + )); + } + } + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &transaction_sizes, + &event_sizes, + &write_set_sizes, + &self.per_table_chunk_sizes, + ) + .await; + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp: transactions.last().unwrap().timestamp.clone(), + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/processors/user_transaction_processor.rs b/rust/processor/src/processors/user_transaction_processor.rs new file mode 100644 index 000000000..08416488e --- /dev/null +++ b/rust/processor/src/processors/user_transaction_processor.rs @@ -0,0 +1,220 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{DefaultProcessingResult, ProcessorName, ProcessorTrait}; +use crate::{ + db::common::models::user_transactions_models::{ + signatures::Signature, user_transactions::UserTransactionModel, + }, + gap_detectors::ProcessingResult, + schema, + utils::{ + counters::PROCESSOR_UNKNOWN_TYPE_COUNT, + database::{execute_in_chunks, get_config_table_chunk_size, ArcDbPool}, + }, +}; +use ahash::AHashMap; +use anyhow::bail; +use aptos_protos::transaction::v1::{transaction::TxnData, Transaction}; +use async_trait::async_trait; +use diesel::{ + pg::{upsert::excluded, Pg}, + query_builder::QueryFragment, + ExpressionMethods, +}; +use std::fmt::Debug; +use tracing::error; + +pub struct UserTransactionProcessor { + connection_pool: ArcDbPool, + per_table_chunk_sizes: AHashMap, +} + +impl UserTransactionProcessor { + pub fn new(connection_pool: ArcDbPool, per_table_chunk_sizes: AHashMap) -> Self { + Self { + connection_pool, + per_table_chunk_sizes, + } + } +} + +impl Debug for UserTransactionProcessor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = &self.connection_pool.state(); + write!( + f, + "UserTransactionProcessor {{ connections: {:?} idle_connections: {:?} }}", + state.connections, state.idle_connections + ) + } +} + +async fn insert_to_db( + conn: ArcDbPool, + name: &'static str, + start_version: u64, + end_version: u64, + user_transactions: &[UserTransactionModel], + signatures: &[Signature], + per_table_chunk_sizes: &AHashMap, +) -> Result<(), diesel::result::Error> { + tracing::trace!( + name = name, + start_version = start_version, + end_version = end_version, + "Inserting to db", + ); + + let ut = execute_in_chunks( + conn.clone(), + insert_user_transactions_query, + user_transactions, + get_config_table_chunk_size::( + "user_transactions", + per_table_chunk_sizes, + ), + ); + let is = execute_in_chunks( + conn, + insert_signatures_query, + signatures, + get_config_table_chunk_size::("signatures", per_table_chunk_sizes), + ); + + let (ut_res, is_res) = futures::join!(ut, is); + for res in [ut_res, is_res] { + res?; + } + Ok(()) +} + +fn insert_user_transactions_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::user_transactions::dsl::*; + ( + diesel::insert_into(schema::user_transactions::table) + .values(items_to_insert) + .on_conflict(version) + .do_update() + .set(( + expiration_timestamp_secs.eq(excluded(expiration_timestamp_secs)), + inserted_at.eq(excluded(inserted_at)), + )), + None, + ) +} + +fn insert_signatures_query( + items_to_insert: Vec, +) -> ( + impl QueryFragment + diesel::query_builder::QueryId + Send, + Option<&'static str>, +) { + use schema::signatures::dsl::*; + ( + diesel::insert_into(schema::signatures::table) + .values(items_to_insert) + .on_conflict(( + transaction_version, + multi_agent_index, + multi_sig_index, + is_sender_primary, + )) + .do_nothing(), + None, + ) +} + +#[async_trait] +impl ProcessorTrait for UserTransactionProcessor { + fn name(&self) -> &'static str { + ProcessorName::UserTransactionProcessor.into() + } + + async fn process_transactions( + &self, + transactions: Vec, + start_version: u64, + end_version: u64, + _: Option, + ) -> anyhow::Result { + let processing_start = std::time::Instant::now(); + let last_transaction_timestamp = transactions.last().unwrap().timestamp.clone(); + + let mut signatures = vec![]; + let mut user_transactions = vec![]; + for txn in &transactions { + let txn_version = txn.version as i64; + let block_height = txn.block_height as i64; + let txn_data = match txn.txn_data.as_ref() { + Some(txn_data) => txn_data, + None => { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["UserTransactionProcessor"]) + .inc(); + tracing::warn!( + transaction_version = txn_version, + "Transaction data doesn't exist" + ); + continue; + }, + }; + if let TxnData::User(inner) = txn_data { + let (user_transaction, sigs) = UserTransactionModel::from_transaction( + inner, + txn.timestamp.as_ref().unwrap(), + block_height, + txn.epoch as i64, + txn_version, + ); + signatures.extend(sigs); + user_transactions.push(user_transaction); + } + } + + let processing_duration_in_secs = processing_start.elapsed().as_secs_f64(); + let db_insertion_start = std::time::Instant::now(); + + let tx_result = insert_to_db( + self.get_pool(), + self.name(), + start_version, + end_version, + &user_transactions, + &signatures, + &self.per_table_chunk_sizes, + ) + .await; + let db_insertion_duration_in_secs = db_insertion_start.elapsed().as_secs_f64(); + match tx_result { + Ok(_) => Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs, + db_insertion_duration_in_secs, + last_transaction_timestamp, + }, + )), + Err(e) => { + error!( + start_version = start_version, + end_version = end_version, + processor_name = self.name(), + error = ?e, + "[Parser] Error inserting transactions to db", + ); + bail!(e) + }, + } + } + + fn connection_pool(&self) -> &ArcDbPool { + &self.connection_pool + } +} diff --git a/rust/processor/src/transaction_filter.rs b/rust/processor/src/transaction_filter.rs new file mode 100644 index 000000000..39ae5204a --- /dev/null +++ b/rust/processor/src/transaction_filter.rs @@ -0,0 +1,80 @@ +use aptos_protos::transaction::v1::{ + transaction::{TransactionType, TxnData}, + transaction_payload::Payload, + Transaction, +}; +use serde::{Deserialize, Serialize}; + +/// Allows filtering transactions based on various criteria +/// The criteria are combined with `AND` +/// If a criteria is not set, it is ignored +/// Criteria will be loaded from the config file +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub struct TransactionFilter { + // Only allow transactions from these contract addresses + focus_contract_addresses: Option>, + // Skip transactions from these sender addresses + skip_sender_addresses: Option>, + // Skip all transactions that aren't user transactions + focus_user_transactions: bool, +} + +impl TransactionFilter { + pub fn new( + focus_contract_addresses: Option>, + skip_sender_addresses: Option>, + focus_user_transactions: bool, + ) -> Self { + // TODO: normalize addresses + Self { + focus_contract_addresses, + skip_sender_addresses, + focus_user_transactions, + } + } + + /// Returns true if the transaction should be included + pub fn include(&self, transaction: &Transaction) -> bool { + // If we're only focusing on user transactions, skip if it's not a user transaction + + let is_user_txn = transaction.r#type == TransactionType::User as i32; + if self.focus_user_transactions && !is_user_txn { + return false; + } + + // If it's not a user transaction, we can skip the rest of the checks + if !is_user_txn { + return true; + } + + if let Some(TxnData::User(user_transaction)) = transaction.txn_data.as_ref() { + if let Some(utr) = user_transaction.request.as_ref() { + // Skip if sender is in the skip list + if let Some(skip_sender_addresses) = &self.skip_sender_addresses { + if skip_sender_addresses.contains(&utr.sender) { + return false; + } + } + + if let Some(focus_contract_addresses) = &self.focus_contract_addresses { + // Skip if focus contract addresses are set and the transaction isn't in the list + if let Some(payload) = utr.payload.as_ref() { + if let Some(Payload::EntryFunctionPayload(efp)) = payload.payload.as_ref() { + if let Some(function) = efp.function.as_ref() { + if let Some(module) = function.module.as_ref() { + if !focus_contract_addresses.contains(&module.address) { + return false; + } + } + } + } + } + } + } + } + + true + } +} diff --git a/rust/processor/src/utils/counters.rs b/rust/processor/src/utils/counters.rs new file mode 100644 index 000000000..f4a6a57e1 --- /dev/null +++ b/rust/processor/src/utils/counters.rs @@ -0,0 +1,284 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use once_cell::sync::Lazy; +use prometheus::{ + register_gauge_vec, register_int_counter, register_int_counter_vec, register_int_gauge_vec, + GaugeVec, IntCounter, IntCounterVec, IntGaugeVec, +}; + +pub enum ProcessorStep { + ReceivedTxnsFromGrpc, + // Received transactions from GRPC. Sending transactions to channel. + ProcessedBatch, + // Processor finished processing one batch of transaction + ProcessedMultipleBatches, // Processor finished processing multiple batches of transactions +} + +impl ProcessorStep { + pub fn get_step(&self) -> &'static str { + match self { + ProcessorStep::ReceivedTxnsFromGrpc => "1", + ProcessorStep::ProcessedBatch => "2", + ProcessorStep::ProcessedMultipleBatches => "3", + } + } + + pub fn get_label(&self) -> &'static str { + match self { + ProcessorStep::ReceivedTxnsFromGrpc => { + "[Parser] Received transactions from GRPC. Sending transactions to channel." + }, + ProcessorStep::ProcessedBatch => { + "[Parser] Processor finished processing one batch of transaction" + }, + ProcessorStep::ProcessedMultipleBatches => { + "[Parser] Processor finished processing multiple batches of transactions" + }, + } + } +} + +/// Data latency when processor receives transactions. +pub static PROCESSOR_DATA_RECEIVED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_data_receive_latency_in_secs", + "Data latency when processor receives transactions", + &["request_token", "processor_name"] + ) + .unwrap() +}); + +/// Data latency when processor finishes processing transactions. +pub static PROCESSOR_DATA_PROCESSED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_data_processed_latency_in_secs", + "Data latency when processor finishes processing transactions", + &["request_token", "processor_name"] + ) + .unwrap() +}); + +/// Number of times a given processor has been invoked +pub static PROCESSOR_INVOCATIONS_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_invocation_count", + "Number of times a given processor has been invoked", + &["processor_name"] + ) + .unwrap() +}); + +/// Number of times any given processor has raised an error +pub static PROCESSOR_ERRORS_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_errors", + "Number of times any given processor has raised an error", + &["processor_name"] + ) + .unwrap() +}); + +/// Number of times any given processor has completed successfully +pub static PROCESSOR_SUCCESSES_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_success_count", + "Number of times a given processor has completed successfully", + &["processor_name"] + ) + .unwrap() +}); + +/// Number of times the connection pool has timed out when trying to get a connection +pub static UNABLE_TO_GET_CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter!( + "indexer_connection_pool_err", + "Number of times the connection pool has timed out when trying to get a connection" + ) + .unwrap() +}); + +/// Number of times the connection pool got a connection +pub static GOT_CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter!( + "indexer_connection_pool_ok", + "Number of times the connection pool got a connection" + ) + .unwrap() +}); + +#[allow(dead_code)] +/// Number of times the indexer has been unable to fetch a transaction. Ideally zero. +pub static UNABLE_TO_FETCH_TRANSACTION: Lazy = Lazy::new(|| { + register_int_counter!( + "indexer_unable_to_fetch_transaction_count", + "Number of times the indexer has been unable to fetch a transaction" + ) + .unwrap() +}); + +#[allow(dead_code)] +/// Number of times the indexer has been able to fetch a transaction +pub static FETCHED_TRANSACTION: Lazy = Lazy::new(|| { + register_int_counter!( + "indexer_fetched_transaction_count", + "Number of times the indexer has been able to fetch a transaction" + ) + .unwrap() +}); + +/// Max version processed +pub static LATEST_PROCESSED_VERSION: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_processor_latest_version", + "Latest version a processor has fully consumed", + &["processor_name", "step", "message", "task_index"] + ) + .unwrap() +}); + +/// Count of bytes processed. +pub static PROCESSED_BYTES_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_processed_bytes_count", + "Count of bytes processed", + &["processor_name", "step", "message", "task_index"] + ) + .unwrap() +}); + +/// The amount of time that a task spent waiting for a protobuf bundle of transactions +pub static PB_CHANNEL_FETCH_WAIT_TIME_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_pb_channel_fetch_wait_time_secs", + "Count of bytes processed", + &["processor_name", "task_index"] + ) + .unwrap() +}); + +/// Count of transactions processed. +pub static NUM_TRANSACTIONS_PROCESSED_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_num_transactions_processed_count", + "Number of transactions processed", + &["processor_name", "step", "message", "task_index"] + ) + .unwrap() +}); + +/// Count of transactions filtered out +pub static NUM_TRANSACTIONS_FILTERED_OUT_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_num_transactions_filtered_out_count", + "Number of transactions filtered out", + &["processor_name"] + ) + .unwrap() +}); + +/// Size of the channel containing transactions fetched from GRPC, waiting to be processed +pub static FETCHER_THREAD_CHANNEL_SIZE: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_processor_fetcher_thread_channel_size", + "Size of the fetcher thread channel", + &["processor_name"] + ) + .unwrap() +}); + +/// Overall processing time for a single batch of transactions (per task) +pub static SINGLE_BATCH_PROCESSING_TIME_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_single_batch_processing_time_in_secs", + "Time taken to process a single batch of transactions", + &["processor_name", "task_index"] + ) + .unwrap() +}); + +/// Parsing time for a single batch of transactions +pub static SINGLE_BATCH_PARSING_TIME_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_single_batch_parsing_time_in_secs", + "Time taken to parse a single batch of transactions", + &["processor_name", "task_index"] + ) + .unwrap() +}); + +/// DB insertion time for a single batch of transactions +pub static SINGLE_BATCH_DB_INSERTION_TIME_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_single_batch_db_insertion_time_in_secs", + "Time taken to insert to DB for a single batch of transactions", + &["processor_name", "task_index"] + ) + .unwrap() +}); + +/// Transaction timestamp in unixtime +pub static TRANSACTION_UNIX_TIMESTAMP: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_transaction_unix_timestamp", + "Transaction timestamp in unixtime", + &["processor_name", "step", "message", "task_index"] + ) + .unwrap() +}); + +/// Data gap warnings +pub static PROCESSOR_DATA_GAP_COUNT: Lazy = Lazy::new(|| { + register_int_gauge_vec!("indexer_processor_data_gap_count", "Data gap count", &[ + "processor_name" + ]) + .unwrap() +}); + +/// Data gap warnings for parquet +pub static PARQUET_PROCESSOR_DATA_GAP_COUNT: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_parquet_processor_data_gap_count", + "Data gap count", + &["processor_name"] + ) + .unwrap() +}); + +/// GRPC latency. +pub static GRPC_LATENCY_BY_PROCESSOR_IN_SECS: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_processor_grpc_latency_in_secs", + "GRPC latency observed by processor", + &["processor_name", "task_index"] + ) + .unwrap() +}); + +/// Processor unknown type count. +pub static PROCESSOR_UNKNOWN_TYPE_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_processor_unknown_type_count", + "Processor unknown type count, e.g., comptaibility issues", + &["model_name"] + ) + .unwrap() +}); + +/// Parquet struct size +pub static PARQUET_STRUCT_SIZE: Lazy = Lazy::new(|| { + register_int_gauge_vec!("indexer_parquet_struct_size", "Parquet struct size", &[ + "parquet_type" + ]) + .unwrap() +}); + +/// Parquet handler buffer size +pub static PARQUET_HANDLER_BUFFER_SIZE: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_parquet_handler_buffer_size", + "Parquet handler buffer size", + &["parquet_type"] // TODO: add something like task_index + ) + .unwrap() +}); diff --git a/rust/processor/src/utils/database.rs b/rust/processor/src/utils/database.rs new file mode 100644 index 000000000..411ce46c9 --- /dev/null +++ b/rust/processor/src/utils/database.rs @@ -0,0 +1,285 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Database-related functions +#![allow(clippy::extra_unused_lifetimes)] + +use crate::utils::util::remove_null_bytes; +use ahash::AHashMap; +use diesel::{ + query_builder::{AstPass, Query, QueryFragment}, + ConnectionResult, QueryResult, +}; +use diesel_async::{ + pooled_connection::{ + bb8::{Pool, PooledConnection}, + AsyncDieselConnectionManager, ManagerConfig, PoolError, + }, + AsyncPgConnection, RunQueryDsl, +}; +use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; +use futures_util::{future::BoxFuture, FutureExt}; +use std::sync::Arc; + +pub type Backend = diesel::pg::Pg; + +pub type MyDbConnection = AsyncPgConnection; +pub type DbPool = Pool; +pub type ArcDbPool = Arc; +pub type DbPoolConnection<'a> = PooledConnection<'a, MyDbConnection>; + +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/postgres/migrations"); + +pub const DEFAULT_MAX_POOL_SIZE: u32 = 150; + +#[derive(QueryId)] +/// Using this will append a where clause at the end of the string upsert function, e.g. +/// INSERT INTO ... ON CONFLICT DO UPDATE SET ... WHERE "transaction_version" = excluded."transaction_version" +/// This is needed when we want to maintain a table with only the latest state +pub struct UpsertFilterLatestTransactionQuery { + query: T, + where_clause: Option<&'static str>, +} + +// the max is actually u16::MAX but we see that when the size is too big we get an overflow error so reducing it a bit +pub const MAX_DIESEL_PARAM_SIZE: usize = (u16::MAX / 2) as usize; + +/// This function will clean the data for postgres. Currently it has support for removing +/// null bytes from strings but in the future we will add more functionality. +pub fn clean_data_for_db serde::Deserialize<'de>>( + items: Vec, + should_remove_null_bytes: bool, +) -> Vec { + if should_remove_null_bytes { + items.iter().map(remove_null_bytes).collect() + } else { + items + } +} + +fn establish_connection(database_url: &str) -> BoxFuture> { + use native_tls::{Certificate, TlsConnector}; + use postgres_native_tls::MakeTlsConnector; + + (async move { + let (url, cert_path) = parse_and_clean_db_url(database_url); + let cert = std::fs::read(cert_path.unwrap()).expect("Could not read certificate"); + + let cert = Certificate::from_pem(&cert).expect("Could not parse certificate"); + let connector = TlsConnector::builder() + .danger_accept_invalid_certs(true) + .add_root_certificate(cert) + .build() + .expect("Could not build TLS connector"); + let connector = MakeTlsConnector::new(connector); + + let (client, connection) = tokio_postgres::connect(&url, connector) + .await + .expect("Could not connect to database"); + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("connection error: {}", e); + } + }); + AsyncPgConnection::try_from(client).await + }) + .boxed() +} + +fn parse_and_clean_db_url(url: &str) -> (String, Option) { + let mut db_url = url::Url::parse(url).expect("Could not parse database url"); + let mut cert_path = None; + + let mut query = "".to_string(); + db_url.query_pairs().for_each(|(k, v)| { + if k == "sslrootcert" { + cert_path = Some(v.parse().unwrap()); + } else { + query.push_str(&format!("{}={}&", k, v)); + } + }); + db_url.set_query(Some(&query)); + + (db_url.to_string(), cert_path) +} + +pub async fn new_db_pool( + database_url: &str, + max_pool_size: Option, +) -> Result { + let (_url, cert_path) = parse_and_clean_db_url(database_url); + + let config = if cert_path.is_some() { + let mut config = ManagerConfig::::default(); + config.custom_setup = Box::new(|conn| Box::pin(establish_connection(conn))); + AsyncDieselConnectionManager::::new_with_config(database_url, config) + } else { + AsyncDieselConnectionManager::::new(database_url) + }; + let pool = Pool::builder() + .max_size(max_pool_size.unwrap_or(DEFAULT_MAX_POOL_SIZE)) + .build(config) + .await?; + Ok(Arc::new(pool)) +} + +pub async fn execute_in_chunks( + conn: ArcDbPool, + build_query: fn(Vec) -> (U, Option<&'static str>), + items_to_insert: &[T], + chunk_size: usize, +) -> Result<(), diesel::result::Error> +where + U: QueryFragment + diesel::query_builder::QueryId + Send + 'static, + T: serde::Serialize + for<'de> serde::Deserialize<'de> + Clone + Send + 'static, +{ + let tasks = items_to_insert + .chunks(chunk_size) + .map(|chunk| { + let conn = conn.clone(); + let items = chunk.to_vec(); + tokio::spawn(async move { + let (query, additional_where_clause) = build_query(items.clone()); + execute_or_retry_cleaned(conn, build_query, items, query, additional_where_clause) + .await + }) + }) + .collect::>(); + + let results = futures_util::future::try_join_all(tasks) + .await + .expect("Task panicked executing in chunks"); + for res in results { + res? + } + + Ok(()) +} + +pub async fn execute_with_better_error( + pool: ArcDbPool, + query: U, + mut additional_where_clause: Option<&'static str>, +) -> QueryResult +where + U: QueryFragment + diesel::query_builder::QueryId + Send, +{ + let original_query = diesel::debug_query::(&query).to_string(); + // This is needed because if we don't insert any row, then diesel makes a call like this + // SELECT 1 FROM TABLE WHERE 1=0 + if original_query.to_lowercase().contains("where") { + additional_where_clause = None; + } + let final_query = UpsertFilterLatestTransactionQuery { + query, + where_clause: additional_where_clause, + }; + let debug_string = diesel::debug_query::(&final_query).to_string(); + tracing::debug!("Executing query: {:?}", debug_string); + let conn = &mut pool.get().await.map_err(|e| { + tracing::warn!("Error getting connection from pool: {:?}", e); + diesel::result::Error::DatabaseError( + diesel::result::DatabaseErrorKind::UnableToSendCommand, + Box::new(e.to_string()), + ) + })?; + let res = final_query.execute(conn).await; + if let Err(ref e) = res { + tracing::warn!("Error running query: {:?}\n{:?}", e, debug_string); + } + res +} + +/// Returns the entry for the config hashmap, or the default field count for the insert +/// Given diesel has a limit of how many parameters can be inserted in a single operation (u16::MAX), +/// we default to chunk an array of items based on how many columns are in the table. +pub fn get_config_table_chunk_size( + table_name: &str, + per_table_chunk_sizes: &AHashMap, +) -> usize { + per_table_chunk_sizes + .get(table_name) + .copied() + .unwrap_or_else(|| MAX_DIESEL_PARAM_SIZE / T::field_count()) +} + +pub async fn execute_with_better_error_conn( + conn: &mut MyDbConnection, + query: U, + mut additional_where_clause: Option<&'static str>, +) -> QueryResult +where + U: QueryFragment + diesel::query_builder::QueryId + Send, +{ + let original_query = diesel::debug_query::(&query).to_string(); + // This is needed because if we don't insert any row, then diesel makes a call like this + // SELECT 1 FROM TABLE WHERE 1=0 + if original_query.to_lowercase().contains("where") { + additional_where_clause = None; + } + let final_query = UpsertFilterLatestTransactionQuery { + query, + where_clause: additional_where_clause, + }; + let debug_string = diesel::debug_query::(&final_query).to_string(); + tracing::debug!("Executing query: {:?}", debug_string); + let res = final_query.execute(conn).await; + if let Err(ref e) = res { + tracing::warn!("Error running query: {:?}\n{:?}", e, debug_string); + } + res +} + +async fn execute_or_retry_cleaned( + conn: ArcDbPool, + build_query: fn(Vec) -> (U, Option<&'static str>), + items: Vec, + query: U, + additional_where_clause: Option<&'static str>, +) -> Result<(), diesel::result::Error> +where + U: QueryFragment + diesel::query_builder::QueryId + Send, + T: serde::Serialize + for<'de> serde::Deserialize<'de> + Clone, +{ + match execute_with_better_error(conn.clone(), query, additional_where_clause).await { + Ok(_) => {}, + Err(_) => { + let cleaned_items = clean_data_for_db(items, true); + let (cleaned_query, additional_where_clause) = build_query(cleaned_items); + match execute_with_better_error(conn.clone(), cleaned_query, additional_where_clause) + .await + { + Ok(_) => {}, + Err(e) => { + return Err(e); + }, + } + }, + } + Ok(()) +} + +pub fn run_pending_migrations(conn: &mut impl MigrationHarness) { + conn.run_pending_migrations(MIGRATIONS) + .expect("[Parser] Migrations failed!"); +} + +/// Section below is required to modify the query. +impl Query for UpsertFilterLatestTransactionQuery { + type SqlType = T::SqlType; +} + +//impl RunQueryDsl for UpsertFilterLatestTransactionQuery {} + +impl QueryFragment for UpsertFilterLatestTransactionQuery +where + T: QueryFragment, +{ + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Backend>) -> QueryResult<()> { + self.query.walk_ast(out.reborrow())?; + if let Some(w) = self.where_clause { + out.push_sql(w); + } + Ok(()) + } +} diff --git a/rust/processor/src/utils/mod.rs b/rust/processor/src/utils/mod.rs new file mode 100644 index 000000000..4f13167fe --- /dev/null +++ b/rust/processor/src/utils/mod.rs @@ -0,0 +1,6 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod counters; +pub mod database; +pub mod util; diff --git a/rust/processor/src/utils/util.rs b/rust/processor/src/utils/util.rs new file mode 100644 index 000000000..40cede660 --- /dev/null +++ b/rust/processor/src/utils/util.rs @@ -0,0 +1,657 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + db::common::models::property_map::{PropertyMap, TokenObjectPropertyMap}, + utils::counters::PROCESSOR_UNKNOWN_TYPE_COUNT, +}; +use aptos_protos::{ + transaction::v1::{ + multisig_transaction_payload::Payload as MultisigPayloadType, + transaction_payload::Payload as PayloadType, write_set::WriteSet as WriteSetType, + EntryFunctionId, EntryFunctionPayload, MoveScriptBytecode, MoveType, ScriptPayload, + TransactionPayload, UserTransactionRequest, WriteSet, + }, + util::timestamp::Timestamp, +}; +use bigdecimal::{BigDecimal, Signed, ToPrimitive, Zero}; +use lazy_static::lazy_static; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; +use sha2::Digest; +use std::str::FromStr; +use tiny_keccak::{Hasher, Sha3}; + +// 9999-12-31 23:59:59, this is the max supported by Google BigQuery +pub const MAX_TIMESTAMP_SECS: i64 = 253_402_300_799; +// Max length of entry function id string to ensure that db doesn't explode +pub const MAX_ENTRY_FUNCTION_LENGTH: usize = 1000; + +pub const APTOS_COIN_TYPE_STR: &str = "0x1::aptos_coin::AptosCoin"; + +lazy_static! { + pub static ref APT_METADATA_ADDRESS_RAW: [u8; 32] = { + let mut addr = [0u8; 32]; + addr[31] = 10u8; + addr + }; + pub static ref APT_METADATA_ADDRESS_HEX: String = + format!("0x{}", hex::encode(*APT_METADATA_ADDRESS_RAW)); +} +// Supporting structs to get clean payload without escaped strings +#[derive(Debug, Deserialize, Serialize)] +pub struct EntryFunctionPayloadClean { + pub function: Option, + pub type_arguments: Vec, + pub arguments: Vec, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ScriptPayloadClean { + pub code: Option, + pub type_arguments: Vec, + pub arguments: Vec, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ScriptWriteSetClean { + pub execute_as: String, + pub script: ScriptPayloadClean, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MultisigPayloadClean { + pub multisig_address: String, + pub transaction_payload: Option, +} + +/// Standardizes all addresses and table handles to be length 66 (0x-64 length hash) +pub fn standardize_address(handle: &str) -> String { + if let Some(handle) = handle.strip_prefix("0x") { + format!("0x{:0>64}", handle) + } else { + format!("0x{:0>64}", handle) + } +} + +pub fn hash_str(val: &str) -> String { + hex::encode(sha2::Sha256::digest(val.as_bytes())) +} + +pub fn sha3_256(buffer: &[u8]) -> [u8; 32] { + let mut output = [0; 32]; + let mut sha3 = Sha3::v256(); + sha3.update(buffer); + sha3.finalize(&mut output); + output +} + +pub fn truncate_str(val: &str, max_chars: usize) -> String { + let mut trunc = val.to_string(); + trunc.truncate(max_chars); + trunc +} + +pub fn u64_to_bigdecimal(val: u64) -> BigDecimal { + BigDecimal::from(val) +} + +pub fn bigdecimal_to_u64(val: &BigDecimal) -> u64 { + val.to_u64().expect("Unable to convert big decimal to u64") +} + +pub fn ensure_not_negative(val: BigDecimal) -> BigDecimal { + if val.is_negative() { + return BigDecimal::zero(); + } + val +} + +pub fn get_entry_function_from_user_request( + user_request: &UserTransactionRequest, +) -> Option { + let entry_function_id_str: Option = + match &user_request.payload.as_ref().unwrap().payload { + Some(PayloadType::EntryFunctionPayload(payload)) => { + Some(payload.entry_function_id_str.clone()) + }, + Some(PayloadType::MultisigPayload(payload)) => { + if let Some(payload) = payload.transaction_payload.as_ref() { + match payload.payload.as_ref().unwrap() { + MultisigPayloadType::EntryFunctionPayload(payload) => { + Some(payload.entry_function_id_str.clone()) + }, + } + } else { + None + } + }, + _ => return None, + }; + + entry_function_id_str.map(|s| truncate_str(&s, MAX_ENTRY_FUNCTION_LENGTH)) +} + +pub fn get_payload_type(payload: &TransactionPayload) -> String { + payload.r#type().as_str_name().to_string() +} + +/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way +/// This function converts the string into json recursively and lets the diesel ORM handles +/// the escaping. +pub fn get_clean_payload(payload: &TransactionPayload, version: i64) -> Option { + if payload.payload.as_ref().is_none() { + PROCESSOR_UNKNOWN_TYPE_COUNT + .with_label_values(&["TransactionPayload"]) + .inc(); + tracing::warn!( + transaction_version = version, + "Transaction payload doesn't exist", + ); + return None; + } + match payload.payload.as_ref().unwrap() { + PayloadType::EntryFunctionPayload(inner) => { + let clean = get_clean_entry_function_payload(inner, version); + Some(serde_json::to_value(clean).unwrap_or_else(|_| { + tracing::error!(version = version, "Unable to serialize payload into value"); + panic!() + })) + }, + PayloadType::ScriptPayload(inner) => { + let clean = get_clean_script_payload(inner, version); + Some(serde_json::to_value(clean).unwrap_or_else(|_| { + tracing::error!(version = version, "Unable to serialize payload into value"); + panic!() + })) + }, + PayloadType::WriteSetPayload(inner) => { + if let Some(writeset) = inner.write_set.as_ref() { + get_clean_writeset(writeset, version) + } else { + None + } + }, + PayloadType::MultisigPayload(inner) => { + let clean = if let Some(payload) = inner.transaction_payload.as_ref() { + let payload_clean = match payload.payload.as_ref().unwrap() { + MultisigPayloadType::EntryFunctionPayload(payload) => { + let clean = get_clean_entry_function_payload(payload, version); + Some(serde_json::to_value(clean).unwrap_or_else(|_| { + tracing::error!( + version = version, + "Unable to serialize payload into value" + ); + panic!() + })) + }, + }; + MultisigPayloadClean { + multisig_address: inner.multisig_address.clone(), + transaction_payload: payload_clean, + } + } else { + MultisigPayloadClean { + multisig_address: inner.multisig_address.clone(), + transaction_payload: None, + } + }; + Some(serde_json::to_value(clean).unwrap_or_else(|_| { + tracing::error!(version = version, "Unable to serialize payload into value"); + panic!() + })) + }, + } +} + +/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way +/// Note that DirectWriteSet is just events + writeset which is already represented separately +pub fn get_clean_writeset(writeset: &WriteSet, version: i64) -> Option { + match writeset.write_set.as_ref().unwrap() { + WriteSetType::ScriptWriteSet(inner) => { + let payload = inner.script.as_ref().unwrap(); + Some( + serde_json::to_value(get_clean_script_payload(payload, version)).unwrap_or_else( + |_| { + tracing::error!( + version = version, + "Unable to serialize payload into value" + ); + panic!() + }, + ), + ) + }, + WriteSetType::DirectWriteSet(_) => None, + } +} + +/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way +fn get_clean_entry_function_payload( + payload: &EntryFunctionPayload, + version: i64, +) -> EntryFunctionPayloadClean { + EntryFunctionPayloadClean { + function: payload.function.clone(), + type_arguments: payload.type_arguments.clone(), + arguments: payload + .arguments + .iter() + .map(|arg| { + serde_json::from_str(arg).unwrap_or_else(|_| { + tracing::error!(version = version, "Unable to serialize payload into value"); + panic!() + }) + }) + .collect(), + } +} + +/// Part of the json comes escaped from the protobuf so we need to unescape in a safe way +fn get_clean_script_payload(payload: &ScriptPayload, version: i64) -> ScriptPayloadClean { + ScriptPayloadClean { + code: payload.code.clone(), + type_arguments: payload.type_arguments.clone(), + arguments: payload + .arguments + .iter() + .map(|arg| { + serde_json::from_str(arg).unwrap_or_else(|_| { + tracing::error!(version = version, "Unable to serialize payload into value"); + panic!() + }) + }) + .collect(), + } +} + +pub fn parse_timestamp(ts: &Timestamp, version: i64) -> chrono::NaiveDateTime { + let final_ts = if ts.seconds >= MAX_TIMESTAMP_SECS { + Timestamp { + seconds: MAX_TIMESTAMP_SECS, + nanos: 0, + } + } else { + ts.clone() + }; + #[allow(deprecated)] + chrono::NaiveDateTime::from_timestamp_opt(final_ts.seconds, final_ts.nanos as u32) + .unwrap_or_else(|| panic!("Could not parse timestamp {:?} for version {}", ts, version)) +} + +pub fn parse_timestamp_secs(ts: u64, version: i64) -> chrono::NaiveDateTime { + #[allow(deprecated)] + chrono::NaiveDateTime::from_timestamp_opt( + std::cmp::min(ts, MAX_TIMESTAMP_SECS as u64) as i64, + 0, + ) + .unwrap_or_else(|| panic!("Could not parse timestamp {:?} for version {}", ts, version)) +} + +pub fn remove_null_bytes serde::Deserialize<'de>>(input: &T) -> T { + let mut txn_json = serde_json::to_value(input).unwrap(); + recurse_remove_null_bytes_from_json(&mut txn_json); + serde_json::from_value::(txn_json).unwrap() +} + +fn recurse_remove_null_bytes_from_json(sub_json: &mut Value) { + match sub_json { + Value::Array(array) => { + for item in array { + recurse_remove_null_bytes_from_json(item); + } + }, + Value::Object(object) => { + for (_key, value) in object { + recurse_remove_null_bytes_from_json(value); + } + }, + Value::String(str) => { + if !str.is_empty() { + let replacement = string_null_byte_replacement(str); + *str = replacement; + } + }, + _ => {}, + } +} + +fn string_null_byte_replacement(value: &str) -> String { + value.replace('\u{0000}', "").replace("\\u0000", "") +} + +/// convert the bcs encoded inner value of property_map to its original value in string format +pub fn deserialize_property_map_from_bcs_hexstring<'de, D>( + deserializer: D, +) -> core::result::Result +where + D: Deserializer<'de>, +{ + let s = serde_json::Value::deserialize(deserializer)?; + // iterate the json string to convert key-value pair + // assume the format of {“map”: {“data”: [{“key”: “Yuri”, “value”: {“type”: “String”, “value”: “0x42656e”}}, {“key”: “Tarded”, “value”: {“type”: “String”, “value”: “0x446f766572"}}]}} + // if successfully parsing we return the decoded property_map string otherwise return the original string + Ok(convert_bcs_propertymap(s.clone()).unwrap_or(s)) +} + +/// convert the bcs encoded inner value of property_map to its original value in string format +pub fn deserialize_token_object_property_map_from_bcs_hexstring<'de, D>( + deserializer: D, +) -> core::result::Result +where + D: Deserializer<'de>, +{ + let s = serde_json::Value::deserialize(deserializer)?; + // iterate the json string to convert key-value pair + Ok(convert_bcs_token_object_propertymap(s.clone()).unwrap_or(s)) +} + +pub fn deserialize_string_from_hexstring<'de, D>( + deserializer: D, +) -> core::result::Result +where + D: Deserializer<'de>, +{ + let s = ::deserialize(deserializer)?; + Ok(String::from_utf8(hex_to_raw_bytes(&s).unwrap()).unwrap_or(s)) +} + +/// Convert the bcs serialized vector to its original string format +pub fn convert_bcs_hex(typ: String, value: String) -> Option { + let decoded = hex::decode(value.strip_prefix("0x").unwrap_or(&*value)).ok()?; + + match typ.as_str() { + "0x1::string::String" => bcs::from_bytes::(decoded.as_slice()), + "u8" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "u64" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "u128" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "bool" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "address" => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("0x{}", e)), + _ => Ok(value), + } + .ok() +} + +/// Convert the bcs serialized vector to its original string format for token v2 property map. +pub fn convert_bcs_hex_new(typ: u8, value: String) -> Option { + let decoded = hex::decode(value.strip_prefix("0x").unwrap_or(&*value)).ok()?; + + match typ { + 0 /* bool */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 1 /* u8 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 2 /* u16 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 3 /* u32 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 4 /* u64 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 5 /* u128 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 6 /* u256 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 7 /* address */ => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("0x{}", e)), + 8 /* byte_vector */ => bcs::from_bytes::>(decoded.as_slice()).map(|e| format!("0x{}", hex::encode(e))), + 9 /* string */ => bcs::from_bytes::(decoded.as_slice()), + _ => Ok(value), + } + .ok() +} + +/// Convert the json serialized PropertyMap's inner BCS fields to their original value in string format +pub fn convert_bcs_propertymap(s: Value) -> Option { + match PropertyMap::from_bcs_encode_str(s) { + Some(e) => match serde_json::to_value(&e) { + Ok(val) => Some(val), + Err(_) => None, + }, + None => None, + } +} + +pub fn convert_bcs_token_object_propertymap(s: Value) -> Option { + match TokenObjectPropertyMap::from_bcs_encode_str(s) { + Some(e) => match serde_json::to_value(&e) { + Ok(val) => Some(val), + Err(_) => None, + }, + None => None, + } +} + +/// Convert from hex string to raw byte string +pub fn hex_to_raw_bytes(val: &str) -> anyhow::Result> { + Ok(hex::decode(val.strip_prefix("0x").unwrap_or(val))?) +} + +/// Deserialize from string to type T +pub fn deserialize_from_string<'de, D, T>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: FromStr, + ::Err: std::fmt::Display, +{ + use serde::de::Error; + + let s = ::deserialize(deserializer)?; + s.parse::().map_err(D::Error::custom) +} + +/// Convert the protobuf Timestamp to epcoh time in seconds. +pub fn time_diff_since_pb_timestamp_in_secs(timestamp: &Timestamp) -> f64 { + let current_timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("SystemTime before UNIX EPOCH!") + .as_secs_f64(); + let transaction_time = timestamp.seconds as f64 + timestamp.nanos as f64 * 1e-9; + current_timestamp - transaction_time +} + +/// Convert the protobuf timestamp to ISO format +pub fn timestamp_to_iso(timestamp: &Timestamp) -> String { + let dt = parse_timestamp(timestamp, 0); + dt.format("%Y-%m-%dT%H:%M:%S%.9fZ").to_string() +} + +/// Convert the protobuf timestamp to unixtime +pub fn timestamp_to_unixtime(timestamp: &Timestamp) -> f64 { + timestamp.seconds as f64 + timestamp.nanos as f64 * 1e-9 +} + +/// Get name from unwrapped move type +/// E.g. 0x1::domain::Name will return Name +pub fn get_name_from_unnested_move_type(move_type: &str) -> &str { + let t: Vec<&str> = move_type.split("::").collect(); + t.last().unwrap() +} + +/* COMMON STRUCTS */ +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Aggregator { + #[serde(deserialize_with = "deserialize_from_string")] + pub value: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub max_value: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AggregatorSnapshot { + #[serde(deserialize_with = "deserialize_from_string")] + pub value: BigDecimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct DerivedStringSnapshot { + pub value: String, +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::Datelike; + use serde::Serialize; + + #[derive(Serialize, Deserialize, Debug)] + struct TypeInfoMock { + #[serde(deserialize_with = "deserialize_string_from_hexstring")] + pub module_name: String, + #[serde(deserialize_with = "deserialize_string_from_hexstring")] + pub struct_name: String, + } + + #[derive(Serialize, Deserialize, Debug)] + struct TokenDataMock { + #[serde(deserialize_with = "deserialize_property_map_from_bcs_hexstring")] + pub default_properties: serde_json::Value, + } + + #[derive(Serialize, Deserialize, Debug)] + struct TokenObjectDataMock { + #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] + pub default_properties: serde_json::Value, + } + + #[test] + fn test_parse_timestamp() { + let ts = parse_timestamp( + &Timestamp { + seconds: 1649560602, + nanos: 0, + }, + 1, + ); + assert_eq!(ts.timestamp(), 1649560602); + assert_eq!(ts.year(), 2022); + + let ts2 = parse_timestamp_secs(600000000000000, 2); + assert_eq!(ts2.year(), 9999); + + let ts3 = parse_timestamp_secs(1659386386, 2); + assert_eq!(ts3.timestamp(), 1659386386); + } + + #[test] + fn test_deserialize_string_from_bcs() { + let test_struct = TypeInfoMock { + module_name: String::from("0x6170746f735f636f696e"), + struct_name: String::from("0x4170746f73436f696e"), + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TypeInfoMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.module_name.as_str(), "aptos_coin"); + assert_eq!(d.struct_name.as_str(), "AptosCoin"); + } + + #[test] + fn test_deserialize_property_map() { + let test_property_json = r#" + { + "map":{ + "data":[ + { + "key":"type", + "value":{ + "type":"0x1::string::String", + "value":"0x06646f6d61696e" + } + }, + { + "key":"creation_time_sec", + "value":{ + "type":"u64", + "value":"0x140f4f6300000000" + } + }, + { + "key":"expiration_time_sec", + "value":{ + "type":"u64", + "value":"0x9442306500000000" + } + } + ] + } + }"#; + let test_property_json: serde_json::Value = + serde_json::from_str(test_property_json).unwrap(); + let test_struct = TokenDataMock { + default_properties: test_property_json, + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TokenDataMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.default_properties["type"], "domain"); + assert_eq!(d.default_properties["creation_time_sec"], "1666125588"); + assert_eq!(d.default_properties["expiration_time_sec"], "1697661588"); + } + + #[test] + fn test_empty_property_map() { + let test_property_json = r#"{"map": {"data": []}}"#; + let test_property_json: serde_json::Value = + serde_json::from_str(test_property_json).unwrap(); + let test_struct = TokenDataMock { + default_properties: test_property_json, + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TokenDataMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.default_properties, Value::Object(serde_json::Map::new())); + } + + #[test] + fn test_deserialize_token_object_property_map() { + let test_property_json = r#" + { + "data": [{ + "key": "Rank", + "value": { + "type": 9, + "value": "0x0642726f6e7a65" + } + }, + { + "key": "address_property", + "value": { + "type": 7, + "value": "0x2b4d540735a4e128fda896f988415910a45cab41c9ddd802b32dd16e8f9ca3cd" + } + }, + { + "key": "bytes_property", + "value": { + "type": 8, + "value": "0x0401020304" + } + }, + { + "key": "u64_property", + "value": { + "type": 4, + "value": "0x0000000000000001" + } + } + ] + } + "#; + let test_property_json: serde_json::Value = + serde_json::from_str(test_property_json).unwrap(); + let test_struct = TokenObjectDataMock { + default_properties: test_property_json, + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TokenObjectDataMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.default_properties["Rank"], "Bronze"); + assert_eq!( + d.default_properties["address_property"], + "0x2b4d540735a4e128fda896f988415910a45cab41c9ddd802b32dd16e8f9ca3cd" + ); + assert_eq!(d.default_properties["bytes_property"], "0x01020304"); + assert_eq!(d.default_properties["u64_property"], "72057594037927936"); + } + + #[test] + fn test_empty_token_object_property_map() { + let test_property_json = r#"{"data": []}"#; + let test_property_json: serde_json::Value = + serde_json::from_str(test_property_json).unwrap(); + let test_struct = TokenObjectDataMock { + default_properties: test_property_json, + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TokenObjectDataMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.default_properties, Value::Object(serde_json::Map::new())); + } +} diff --git a/rust/processor/src/worker.rs b/rust/processor/src/worker.rs new file mode 100644 index 000000000..ac91e69f4 --- /dev/null +++ b/rust/processor/src/worker.rs @@ -0,0 +1,894 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + config::IndexerGrpcHttp2Config, + db::common::models::{ledger_info::LedgerInfo, processor_status::ProcessorStatusQuery}, + gap_detectors::{create_gap_detector_status_tracker_loop, ProcessingResult}, + grpc_stream::TransactionsPBResponse, + processors::{ + account_transactions_processor::AccountTransactionsProcessor, ans_processor::AnsProcessor, + coin_processor::CoinProcessor, default_processor::DefaultProcessor, + events_processor::EventsProcessor, fungible_asset_processor::FungibleAssetProcessor, + monitoring_processor::MonitoringProcessor, nft_metadata_processor::NftMetadataProcessor, + objects_processor::ObjectsProcessor, parquet_default_processor::DefaultParquetProcessor, + stake_processor::StakeProcessor, token_processor::TokenProcessor, + token_v2_processor::TokenV2Processor, + transaction_metadata_processor::TransactionMetadataProcessor, + user_transaction_processor::UserTransactionProcessor, DefaultProcessingResult, Processor, + ProcessorConfig, ProcessorTrait, + }, + schema::ledger_infos, + transaction_filter::TransactionFilter, + utils::{ + counters::{ + ProcessorStep, GRPC_LATENCY_BY_PROCESSOR_IN_SECS, LATEST_PROCESSED_VERSION, + NUM_TRANSACTIONS_PROCESSED_COUNT, PB_CHANNEL_FETCH_WAIT_TIME_SECS, + PROCESSED_BYTES_COUNT, PROCESSOR_DATA_PROCESSED_LATENCY_IN_SECS, + PROCESSOR_DATA_RECEIVED_LATENCY_IN_SECS, PROCESSOR_ERRORS_COUNT, + PROCESSOR_INVOCATIONS_COUNT, PROCESSOR_SUCCESSES_COUNT, + SINGLE_BATCH_DB_INSERTION_TIME_IN_SECS, SINGLE_BATCH_PARSING_TIME_IN_SECS, + SINGLE_BATCH_PROCESSING_TIME_IN_SECS, TRANSACTION_UNIX_TIMESTAMP, + }, + database::{ + execute_with_better_error_conn, new_db_pool, run_pending_migrations, ArcDbPool, + }, + util::{time_diff_since_pb_timestamp_in_secs, timestamp_to_iso, timestamp_to_unixtime}, + }, +}; +use ahash::AHashMap; +use anyhow::{Context, Result}; +use aptos_moving_average::MovingAverage; +use bitflags::bitflags; +use kanal::AsyncSender; +use std::collections::HashSet; +use tokio::task::JoinHandle; +use tracing::{debug, error, info}; +use url::Url; + +// this is how large the fetch queue should be. Each bucket should have a max of 80MB or so, so a batch +// of 50 means that we could potentially have at least 4.8GB of data in memory at any given time and that we should provision +// machines accordingly. + +// TODO: Make this configurable +pub const BUFFER_SIZE: usize = 300; +pub const PROCESSOR_SERVICE_TYPE: &str = "processor"; + +bitflags! { + #[derive(Debug, Clone, Copy)] + pub struct TableFlags: u64 { + const TRANSACTIONS = 1; + const WRITE_SET_CHANGES = 2; + const MOVE_RESOURCES = 4; + } +} + +pub struct Worker { + pub db_pool: ArcDbPool, + pub processor_config: ProcessorConfig, + pub postgres_connection_string: String, + pub indexer_grpc_data_service_address: Url, + pub grpc_http2_config: IndexerGrpcHttp2Config, + pub auth_token: String, + pub starting_version: Option, + pub ending_version: Option, + pub number_concurrent_processing_tasks: usize, + pub gap_detection_batch_size: u64, + pub parquet_gap_detection_batch_size: u64, + pub grpc_chain_id: Option, + pub pb_channel_txn_chunk_size: usize, + pub per_table_chunk_sizes: AHashMap, + pub enable_verbose_logging: Option, + pub transaction_filter: TransactionFilter, + pub grpc_response_item_timeout_in_secs: u64, + pub deprecated_tables: TableFlags, + pub is_parquet_processor: Option, +} + +impl Worker { + #[allow(clippy::too_many_arguments)] + pub async fn new( + processor_config: ProcessorConfig, + postgres_connection_string: String, + indexer_grpc_data_service_address: Url, + grpc_http2_config: IndexerGrpcHttp2Config, + auth_token: String, + starting_version: Option, + ending_version: Option, + number_concurrent_processing_tasks: Option, + db_pool_size: Option, + gap_detection_batch_size: u64, + parquet_gap_detection_batch_size: u64, + // The number of transactions per protobuf batch + pb_channel_txn_chunk_size: usize, + per_table_chunk_sizes: AHashMap, + enable_verbose_logging: Option, + transaction_filter: TransactionFilter, + grpc_response_item_timeout_in_secs: u64, + deprecated_tables: HashSet, + is_parquet_processor: Option, + ) -> Result { + let processor_name = processor_config.name(); + info!(processor_name = processor_name, "[Parser] Kicking off"); + + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[Parser] Creating connection pool" + ); + let conn_pool = new_db_pool(&postgres_connection_string, db_pool_size) + .await + .context("Failed to create connection pool")?; + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[Parser] Finish creating the connection pool" + ); + let number_concurrent_processing_tasks = number_concurrent_processing_tasks.unwrap_or(10); + + let mut deprecated_tables_flags = TableFlags::empty(); + for table in deprecated_tables.iter() { + if let Some(flags) = TableFlags::from_name(table) { + deprecated_tables_flags |= flags; + } + } + + Ok(Self { + db_pool: conn_pool, + processor_config, + postgres_connection_string, + indexer_grpc_data_service_address, + grpc_http2_config, + starting_version, + ending_version, + auth_token, + number_concurrent_processing_tasks, + gap_detection_batch_size, + parquet_gap_detection_batch_size, + grpc_chain_id: None, + pb_channel_txn_chunk_size, + per_table_chunk_sizes, + enable_verbose_logging, + transaction_filter, + grpc_response_item_timeout_in_secs, + deprecated_tables: deprecated_tables_flags, + is_parquet_processor, + }) + } + + /// This is the main logic of the processor. We will do a few large parts: + /// 1. Connect to GRPC and handling all the stuff before starting the stream such as diesel migration + /// 2. Start a thread specifically to fetch data from GRPC. We will keep a buffer of X batches of transactions + /// 3. Start a loop to consume from the buffer. We will have Y threads to process the transactions in parallel. (Y should be less than X for obvious reasons) + /// * Note that the batches will be sequential so we won't have problems with gaps + /// 4. We will keep track of the last processed version and monitoring things like TPS + pub async fn run(&mut self) { + let processor_name = self.processor_config.name(); + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[Parser] Running migrations" + ); + let migration_time = std::time::Instant::now(); + self.run_migrations().await; + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + duration_in_secs = migration_time.elapsed().as_secs_f64(), + "[Parser] Finished migrations" + ); + + let starting_version_from_db = self + .get_start_version() + .await + .expect("[Parser] Database error when getting starting version") + .unwrap_or_else(|| { + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + "[Parser] No starting version from db so starting from version 0" + ); + 0 + }); + + let starting_version = self.starting_version.unwrap_or(starting_version_from_db); + + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + stream_address = self.indexer_grpc_data_service_address.to_string(), + final_start_version = starting_version, + start_version_from_config = self.starting_version, + start_version_from_db = starting_version_from_db, + "[Parser] Building processor", + ); + + let concurrent_tasks = self.number_concurrent_processing_tasks; + + // get the chain id + let chain_id = crate::grpc_stream::get_chain_id( + self.indexer_grpc_data_service_address.clone(), + self.grpc_http2_config.grpc_http2_ping_interval_in_secs(), + self.grpc_http2_config.grpc_http2_ping_timeout_in_secs(), + self.grpc_http2_config.grpc_connection_timeout_secs(), + self.auth_token.clone(), + processor_name.to_string(), + ) + .await; + self.check_or_update_chain_id(chain_id as i64) + .await + .unwrap(); + + self.grpc_chain_id = Some(chain_id); + + let ending_version = self.ending_version; + let indexer_grpc_data_service_address = self.indexer_grpc_data_service_address.clone(); + let indexer_grpc_http2_ping_interval = + self.grpc_http2_config.grpc_http2_ping_interval_in_secs(); + let indexer_grpc_http2_ping_timeout = + self.grpc_http2_config.grpc_http2_ping_timeout_in_secs(); + let indexer_grpc_reconnection_timeout_secs = + self.grpc_http2_config.grpc_connection_timeout_secs(); + let pb_channel_txn_chunk_size = self.pb_channel_txn_chunk_size; + + // Create a transaction fetcher thread that will continuously fetch transactions from the GRPC stream + // and write into a channel + // TODO: change channel size based on number_concurrent_processing_tasks + let (tx, receiver) = kanal::bounded_async::(BUFFER_SIZE); + let request_ending_version = self.ending_version; + let auth_token = self.auth_token.clone(); + let transaction_filter = self.transaction_filter.clone(); + let grpc_response_item_timeout = + std::time::Duration::from_secs(self.grpc_response_item_timeout_in_secs); + let fetcher_task = tokio::spawn(async move { + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + end_version = ending_version, + start_version = starting_version, + "[Parser] Starting fetcher thread" + ); + + crate::grpc_stream::create_fetcher_loop( + tx.clone(), + indexer_grpc_data_service_address.clone(), + indexer_grpc_http2_ping_interval, + indexer_grpc_http2_ping_timeout, + indexer_grpc_reconnection_timeout_secs, + grpc_response_item_timeout, + starting_version, + request_ending_version, + auth_token.clone(), + processor_name.to_string(), + transaction_filter, + pb_channel_txn_chunk_size, + ) + .await + }); + + // Create a gap detector task that will panic if there is a gap in the processing + let (gap_detector_sender, gap_detector_receiver) = + kanal::bounded_async::(BUFFER_SIZE); + let ( + processor, + gap_detection_batch_size, + default_gap_detector_sender, + parquet_gap_detector_sender, + ) = if self.is_parquet_processor.unwrap_or(false) { + let processor = build_processor( + &self.processor_config, + self.per_table_chunk_sizes.clone(), + self.deprecated_tables, + self.db_pool.clone(), + Some(gap_detector_sender.clone()), + ); + let gap_detection_batch_size: u64 = self.parquet_gap_detection_batch_size; + + ( + processor, + gap_detection_batch_size, + None, + Some(gap_detector_sender), + ) + } else { + let processor = build_processor( + &self.processor_config, + self.per_table_chunk_sizes.clone(), + self.deprecated_tables, + self.db_pool.clone(), + None, + ); + let gap_detection_batch_size = self.gap_detection_batch_size; + + ( + processor, + gap_detection_batch_size, + Some(gap_detector_sender), + None, + ) + }; + + tokio::spawn(async move { + create_gap_detector_status_tracker_loop( + gap_detector_receiver, + processor, + starting_version, + gap_detection_batch_size, + ) + .await; + }); + + // This is the consumer side of the channel. These are the major states: + // 1. We're backfilling so we should expect many concurrent threads to process transactions + // 2. We're caught up so we should expect a single thread to process transactions + // 3. We have received either an empty batch or a batch with a gap. We should panic. + // 4. We have not received anything in X seconds, we should panic. + // 5. If it's the wrong chain, panic. + + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + stream_address = self.indexer_grpc_data_service_address.as_str(), + concurrent_tasks, + "[Parser] Spawning concurrent parallel processor tasks", + ); + + let mut processor_tasks = vec![fetcher_task]; + for task_index in 0..concurrent_tasks { + let join_handle: JoinHandle<()> = self + .launch_processor_task( + task_index, + receiver.clone(), + default_gap_detector_sender.clone(), + parquet_gap_detector_sender.clone(), + ) + .await; + processor_tasks.push(join_handle); + } + + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + stream_address = self.indexer_grpc_data_service_address.as_str(), + concurrent_tasks, + "[Parser] Processor tasks spawned", + ); + + // Await the processor tasks: this is forever + futures::future::try_join_all(processor_tasks) + .await + .expect("[Processor] Processor tasks have died"); + } + + async fn launch_processor_task( + &self, + task_index: usize, + receiver: kanal::AsyncReceiver, + default_gap_detector_sender: Option>, + parquet_gap_detector_sender: Option>, + ) -> JoinHandle<()> { + let processor_name = self.processor_config.name(); + let stream_address = self.indexer_grpc_data_service_address.to_string(); + let receiver_clone = receiver.clone(); + let auth_token = self.auth_token.clone(); + + // Build the processor based on the config. + let processor = build_processor( + &self.processor_config, + self.per_table_chunk_sizes.clone(), + self.deprecated_tables, + self.db_pool.clone(), + parquet_gap_detector_sender.clone(), + ); + + let concurrent_tasks = self.number_concurrent_processing_tasks; + + let chain_id = self + .grpc_chain_id + .expect("GRPC chain ID has not been fetched yet!"); + + tokio::spawn(async move { + let task_index_str = task_index.to_string(); + let step = ProcessorStep::ProcessedBatch.get_step(); + let label = ProcessorStep::ProcessedBatch.get_label(); + let mut ma = MovingAverage::new(3000); + + loop { + let txn_channel_fetch_latency = std::time::Instant::now(); + match fetch_transactions( + processor_name, + &stream_address, + receiver_clone.clone(), + task_index, + ) + .await + { + // Fetched transactions from channel + Ok(transactions_pb) => { + let size_in_bytes = transactions_pb.size_in_bytes as f64; + let first_txn_version = transactions_pb + .transactions + .first() + .map(|t| t.version) + .unwrap_or_default(); + let batch_first_txn_version = transactions_pb.start_version; + let last_txn_version = transactions_pb + .transactions + .last() + .map(|t| t.version) + .unwrap_or_default(); + let batch_last_txn_version = transactions_pb.end_version; + let start_txn_timestamp = transactions_pb.start_txn_timestamp.clone(); + let end_txn_timestamp = transactions_pb.end_txn_timestamp.clone(); + + let start_txn_timestamp_unix = start_txn_timestamp + .as_ref() + .map(timestamp_to_unixtime) + .unwrap_or_default(); + let start_txn_timestamp_iso = start_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(); + let end_txn_timestamp_iso = end_txn_timestamp + .as_ref() + .map(timestamp_to_iso) + .unwrap_or_default(); + + let txn_channel_fetch_latency_sec = + txn_channel_fetch_latency.elapsed().as_secs_f64(); + + debug!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + start_version = batch_first_txn_version, + end_version = batch_last_txn_version, + num_of_transactions = + (batch_last_txn_version - batch_first_txn_version) as i64 + 1, + size_in_bytes, + task_index, + duration_in_secs = txn_channel_fetch_latency_sec, + tps = (batch_last_txn_version as f64 - batch_first_txn_version as f64) + / txn_channel_fetch_latency_sec, + bytes_per_sec = size_in_bytes / txn_channel_fetch_latency_sec, + "[Parser][T#{}] Successfully fetched transactions from channel.", + task_index + ); + + // Ensure chain_id has not changed + if transactions_pb.chain_id != chain_id { + error!( + processor_name = processor_name, + stream_address = stream_address.as_str(), + chain_id = transactions_pb.chain_id, + existing_id = chain_id, + task_index, + "[Parser][T#{}] Stream somehow changed chain id!", + task_index + ); + panic!( + "[Parser][T#{}] Stream somehow changed chain id!", + task_index + ); + } + + let processing_time = std::time::Instant::now(); + + let res = do_processor( + transactions_pb, + &processor, + chain_id, + processor_name, + &auth_token, + false, // enable_verbose_logging + ) + .await; + + let processing_result = match res { + Ok(versions) => { + PROCESSOR_SUCCESSES_COUNT + .with_label_values(&[processor_name]) + .inc(); + versions + }, + Err(e) => { + error!( + processor_name = processor_name, + stream_address = stream_address.as_str(), + error = ?e, + task_index, + "[Parser][T#{}] Error processing transactions", task_index + ); + PROCESSOR_ERRORS_COUNT + .with_label_values(&[processor_name]) + .inc(); + panic!( + "[Parser][T#{}] Error processing '{:}' transactions: {:?}", + task_index, processor_name, e + ); + }, + }; + + match processing_result { + ProcessingResult::DefaultProcessingResult(processing_result) => { + let processing_time = processing_time.elapsed().as_secs_f64(); + + // We've processed things: do some data and metrics + ma.tick_now((last_txn_version - first_txn_version) + 1); + let tps = ma.avg().ceil() as u64; + + let num_processed = (last_txn_version - first_txn_version) + 1; + + debug!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + first_txn_version, + batch_first_txn_version, + last_txn_version, + batch_last_txn_version, + start_txn_timestamp_iso, + end_txn_timestamp_iso, + num_of_transactions = num_processed, + concurrent_tasks, + task_index, + size_in_bytes, + processing_duration_in_secs = + processing_result.processing_duration_in_secs, + db_insertion_duration_in_secs = + processing_result.db_insertion_duration_in_secs, + duration_in_secs = processing_time, + tps = tps, + bytes_per_sec = size_in_bytes / processing_time, + step = &step, + "{}", + label, + ); + + // TODO: For these three, do an atomic thing, or ideally move to an async metrics collector! + GRPC_LATENCY_BY_PROCESSOR_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(time_diff_since_pb_timestamp_in_secs( + end_txn_timestamp.as_ref().unwrap(), + )); + LATEST_PROCESSED_VERSION + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .set(last_txn_version as i64); + TRANSACTION_UNIX_TIMESTAMP + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .set(start_txn_timestamp_unix); + + // Single batch metrics + PROCESSED_BYTES_COUNT + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .inc_by(size_in_bytes as u64); + NUM_TRANSACTIONS_PROCESSED_COUNT + .with_label_values(&[ + processor_name, + step, + label, + &task_index_str, + ]) + .inc_by(num_processed); + + SINGLE_BATCH_PROCESSING_TIME_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(processing_time); + SINGLE_BATCH_PARSING_TIME_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(processing_result.processing_duration_in_secs); + SINGLE_BATCH_DB_INSERTION_TIME_IN_SECS + .with_label_values(&[processor_name, &task_index_str]) + .set(processing_result.db_insertion_duration_in_secs); + + default_gap_detector_sender + .as_ref() + .unwrap() + .send(ProcessingResult::DefaultProcessingResult( + processing_result, + )) + .await + .expect("[Parser] Failed to send versions to gap detector"); + }, + ProcessingResult::ParquetProcessingResult(_) => { + debug!("parquet processing result doesn't need to be handled here"); + }, + } + }, + // Could not fetch transactions from channel. This happens when there are + // no more transactions to fetch and the channel is closed. + Err(e) => { + error!( + processor_name = processor_name, + stream_address = stream_address.as_str(), + error = ?e, + task_index, + "[Parser][T#{}] Consumer thread exiting fetching loop", task_index + ); + break; + }, + } + } + }) + } + + // For the normal processor build we just use standard Diesel with the postgres + // feature enabled (which uses libpq under the hood, hence why we named the feature + // this way). + #[cfg(feature = "libpq")] + async fn run_migrations(&self) { + use crate::diesel::Connection; + use diesel::pg::PgConnection; + + info!("Running migrations: {:?}", self.postgres_connection_string); + let mut conn = + PgConnection::establish(&self.postgres_connection_string).expect("migrations failed!"); + run_pending_migrations(&mut conn); + } + + // If the libpq feature isn't enabled, we use diesel async instead. This is used by + // the CLI for the local testnet, where we cannot tolerate the libpq dependency. + #[cfg(not(feature = "libpq"))] + async fn run_migrations(&self) { + use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; + + info!("Running migrations: {:?}", self.postgres_connection_string); + let conn = self + .db_pool + // We need to use this since AsyncConnectionWrapper doesn't know how to + // work with a pooled connection. + .dedicated_connection() + .await + .expect("[Parser] Failed to get connection"); + // We use spawn_blocking since run_pending_migrations is a blocking function. + tokio::task::spawn_blocking(move || { + // This lets us use the connection like a normal diesel connection. See more: + // https://docs.rs/diesel-async/latest/diesel_async/async_connection_wrapper/type.AsyncConnectionWrapper.html + let mut conn: AsyncConnectionWrapper = + AsyncConnectionWrapper::from(conn); + run_pending_migrations(&mut conn); + }) + .await + .expect("[Parser] Failed to run migrations"); + } + + /// Gets the start version for the processor. If not found, start from 0. + pub async fn get_start_version(&self) -> Result> { + let mut conn = self.db_pool.get().await?; + + match ProcessorStatusQuery::get_by_processor(self.processor_config.name(), &mut conn) + .await? + { + Some(status) => Ok(Some(status.last_success_version as u64 + 1)), + None => Ok(None), + } + } + + /// Verify the chain id from GRPC against the database. + pub async fn check_or_update_chain_id(&self, grpc_chain_id: i64) -> Result { + let processor_name = self.processor_config.name(); + info!( + processor_name = processor_name, + "[Parser] Checking if chain id is correct" + ); + let mut conn = self.db_pool.get().await?; + + let maybe_existing_chain_id = LedgerInfo::get(&mut conn).await?.map(|li| li.chain_id); + + match maybe_existing_chain_id { + Some(chain_id) => { + anyhow::ensure!(chain_id == grpc_chain_id, "[Parser] Wrong chain detected! Trying to index chain {} now but existing data is for chain {}", grpc_chain_id, chain_id); + info!( + processor_name = processor_name, + chain_id = chain_id, + "[Parser] Chain id matches! Continue to index...", + ); + Ok(chain_id as u64) + }, + None => { + info!( + processor_name = processor_name, + chain_id = grpc_chain_id, + "[Parser] Adding chain id to db, continue to index..." + ); + execute_with_better_error_conn( + &mut conn, + diesel::insert_into(ledger_infos::table) + .values(LedgerInfo { + chain_id: grpc_chain_id, + }) + .on_conflict_do_nothing(), + None, + ) + .await + .context("[Parser] Error updating chain_id!") + .map(|_| grpc_chain_id as u64) + }, + } + } +} + +async fn fetch_transactions( + processor_name: &str, + stream_address: &str, + receiver: kanal::AsyncReceiver, + task_index: usize, +) -> Result { + let pb_channel_fetch_time = std::time::Instant::now(); + let txn_pb_res = receiver.recv().await; + // Track how much time this task spent waiting for a pb bundle + PB_CHANNEL_FETCH_WAIT_TIME_SECS + .with_label_values(&[processor_name, &task_index.to_string()]) + .set(pb_channel_fetch_time.elapsed().as_secs_f64()); + + match txn_pb_res { + Ok(txn_pb) => Ok(txn_pb), + Err(_e) => { + error!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + stream_address = stream_address, + "[Parser][T#{}] Consumer thread receiver channel closed.", + task_index + ); + Err(anyhow::anyhow!( + "[Parser][T#{}] Consumer thread receiver channel closed.", + task_index + )) + }, + } +} + +pub async fn do_processor( + transactions_pb: TransactionsPBResponse, + processor: &Processor, + db_chain_id: u64, + processor_name: &str, + auth_token: &str, + enable_verbose_logging: bool, +) -> Result { + // We use the value passed from the `transactions_pb` as it may have been filtered + let start_version = transactions_pb.start_version; + let end_version = transactions_pb.end_version; + + // Fake this as it's possible we have filtered out all of the txns in this batch + if transactions_pb.transactions.is_empty() { + return Ok(ProcessingResult::DefaultProcessingResult( + DefaultProcessingResult { + start_version, + end_version, + processing_duration_in_secs: 0.0, + db_insertion_duration_in_secs: 0.0, + last_transaction_timestamp: transactions_pb.end_txn_timestamp, + }, + )); + } + + let txn_time = transactions_pb.start_txn_timestamp; + + if let Some(ref t) = txn_time { + PROCESSOR_DATA_RECEIVED_LATENCY_IN_SECS + .with_label_values(&[auth_token, processor_name]) + .set(time_diff_since_pb_timestamp_in_secs(t)); + } + PROCESSOR_INVOCATIONS_COUNT + .with_label_values(&[processor_name]) + .inc(); + + if enable_verbose_logging { + info!( + processor_name = processor_name, + service_type = PROCESSOR_SERVICE_TYPE, + start_version, + end_version, + size_in_bytes = transactions_pb.size_in_bytes, + "[Parser] Started processing one batch of transactions" + ); + } + + let processed_result = processor + .process_transactions( + transactions_pb.transactions, + start_version, + end_version, + Some(db_chain_id), + ) + .await; + + if let Some(ref t) = txn_time { + PROCESSOR_DATA_PROCESSED_LATENCY_IN_SECS + .with_label_values(&[auth_token, processor_name]) + .set(time_diff_since_pb_timestamp_in_secs(t)); + } + + processed_result +} + +/// Given a config and a db pool, build a concrete instance of a processor. +// As time goes on there might be other things that we need to provide to certain +// processors. As that happens we can revist whether this function (which tends to +// couple processors together based on their args) makes sense. +// TODO: This is not particularly easily extensible; better to refactor to use a trait, and then share one extensible config model (allowing for only one arity) +pub fn build_processor( + config: &ProcessorConfig, + per_table_chunk_sizes: AHashMap, + deprecated_tables: TableFlags, + db_pool: ArcDbPool, + parquet_gap_detector_sender: Option>, // Parquet only +) -> Processor { + match config { + ProcessorConfig::AccountTransactionsProcessor => Processor::from( + AccountTransactionsProcessor::new(db_pool, per_table_chunk_sizes), + ), + ProcessorConfig::AnsProcessor(config) => Processor::from(AnsProcessor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), + ProcessorConfig::CoinProcessor => { + Processor::from(CoinProcessor::new(db_pool, per_table_chunk_sizes)) + }, + ProcessorConfig::DefaultProcessor => Processor::from(DefaultProcessor::new( + db_pool, + per_table_chunk_sizes, + deprecated_tables, + )), + ProcessorConfig::EventsProcessor => { + Processor::from(EventsProcessor::new(db_pool, per_table_chunk_sizes)) + }, + ProcessorConfig::FungibleAssetProcessor => { + Processor::from(FungibleAssetProcessor::new(db_pool, per_table_chunk_sizes)) + }, + ProcessorConfig::MonitoringProcessor => Processor::from(MonitoringProcessor::new(db_pool)), + ProcessorConfig::NftMetadataProcessor(config) => { + Processor::from(NftMetadataProcessor::new(db_pool, config.clone())) + }, + ProcessorConfig::ObjectsProcessor(config) => Processor::from(ObjectsProcessor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), + ProcessorConfig::StakeProcessor(config) => Processor::from(StakeProcessor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), + ProcessorConfig::TokenProcessor(config) => Processor::from(TokenProcessor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), + ProcessorConfig::TokenV2Processor(config) => Processor::from(TokenV2Processor::new( + db_pool, + config.clone(), + per_table_chunk_sizes, + )), + ProcessorConfig::TransactionMetadataProcessor => Processor::from( + TransactionMetadataProcessor::new(db_pool, per_table_chunk_sizes), + ), + ProcessorConfig::UserTransactionProcessor => Processor::from( + UserTransactionProcessor::new(db_pool, per_table_chunk_sizes), + ), + ProcessorConfig::DefaultParquetProcessor(config) => { + Processor::from(DefaultParquetProcessor::new( + db_pool, + config.clone(), + parquet_gap_detector_sender + .expect("Parquet processor requires a gap detector sender"), + )) + }, + } +} diff --git a/rust/rustfmt.toml b/rust/rustfmt.toml new file mode 100644 index 000000000..f1c193bbc --- /dev/null +++ b/rust/rustfmt.toml @@ -0,0 +1,11 @@ +combine_control_expr = false +edition = "2021" +imports_granularity = "Crate" +format_macro_matchers = true +group_imports = "One" +hex_literal_case = "Upper" +match_block_trailing_comma = true +newline_style = "Unix" +overflow_delimited_expr = true +reorder_impl_items = true +use_field_init_shorthand = true diff --git a/rust/scripts/check_banned_deps.sh b/rust/scripts/check_banned_deps.sh new file mode 100755 index 000000000..4a69baf43 --- /dev/null +++ b/rust/scripts/check_banned_deps.sh @@ -0,0 +1,52 @@ +#!/bin/sh + +# This script checks if the crate depends on external deps that it shouldn't. We run +# this in CI to make sure we don't accidentally reintroduce deps that would make the +# crate unusable for the CLI. +# +# While it would be more reliable to actually build the crate and check what libraries +# it links to, e.g. with otool, it is much cheaper to use cargo tree. As far as I can +# tell the entire Rust ecosystem makes use of these `x-sys` libraries to depend on +# external dynamically linked libraries. +# +# We can almost use cargo deny but it doesn't support checking specific build paths. We +# don't care if openssl-sys for example is used at build time (which it is, indirectly +# by shadow-rs), only at run time. See more here: +# https://github.com/EmbarkStudios/cargo-deny/issues/563 +# +# It assumes cargo and friends are available. +# +# Run this from the rust/ directory. + +cd "$(dirname "$0")" +cd .. + +declare -a deps=("pq-sys" "openssl-sys") + +for dep in "${deps[@]}"; do + echo "Checking for banned dependency $dep..." + + # Check for deps. As you can see, we only check for MacOS right now. + # We specify --no-default-features because we only care about these banned deps + # for the local testnet use case, in which case it opts out of the default + # features. + out=`cargo tree --no-default-features -e features,no-build,no-dev --target aarch64-apple-darwin -p processor -i "$dep"` + + # If the exit status was non-zero, great, the dep couldn't be found. + if [ $? -ne 0 ]; then + continue + fi + + # If the exit status was zero we have to check the output to see if the dep is in + # use. If it is in the output, it is in use. + if [[ $out != *"$dep"* ]]; then + continue + fi + + echo "Banned dependency $dep found!" + exit 1 +done + +echo +echo "None of the banned dependencies are in use, great!" +exit 0 diff --git a/rust/scripts/rust_lint.sh b/rust/scripts/rust_lint.sh new file mode 100755 index 000000000..5767e338e --- /dev/null +++ b/rust/scripts/rust_lint.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# This assumes you have already installed cargo-sort: +# cargo install cargo-sort +# +# The best way to do this however is to run scripts/dev_setup.sh +# +# If you want to run this from anywhere in aptos-core, try adding this wrapper +# script to your path: +# https://gist.github.com/banool/e6a2b85e2fff067d3a215cbfaf808032 + +# Make sure we're in the root of the repo. +if [ ! -f "scripts/rust_lint.sh" ] +then + echo "Please run this from the aptos-indexer-processors/rust/ directory." + exit 1 +fi + +# Run in check mode if requested. +CHECK_ARG="" +if [ "$1" = "--check" ]; then + CHECK_ARG="--check" +fi + +set -e +set -x + +cargo +nightly xclippy + +# We require the nightly build of cargo fmt +# to provide stricter rust formatting. +cargo +nightly fmt $CHECK_ARG + +# Once cargo-sort correctly handles workspace dependencies, +# we can move to cleaner workspace dependency notation. +# See: https://github.com/DevinR528/cargo-sort/issues/47 +cargo sort --grouped --workspace $CHECK_ARG diff --git a/rust/server-framework/Cargo.toml b/rust/server-framework/Cargo.toml new file mode 100644 index 000000000..8412ea436 --- /dev/null +++ b/rust/server-framework/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "server-framework" +version = "1.0.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +backtrace = { workspace = true } +clap = { workspace = true } +prometheus = { workspace = true } +serde = { workspace = true } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +toml = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +warp = { workspace = true } + +[target.'cfg(target_os = "linux")'.dependencies] +aptos-system-utils = { workspace = true } diff --git a/rust/server-framework/src/lib.rs b/rust/server-framework/src/lib.rs new file mode 100644 index 000000000..a81361fc9 --- /dev/null +++ b/rust/server-framework/src/lib.rs @@ -0,0 +1,258 @@ +// Copyright © Aptos Foundation + +use anyhow::{Context, Result}; +#[cfg(target_os = "linux")] +use aptos_system_utils::profiling::start_cpu_profiling; +use backtrace::Backtrace; +use clap::Parser; +use prometheus::{Encoder, TextEncoder}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +#[cfg(target_os = "linux")] +use std::convert::Infallible; +use std::{fs::File, io::Read, panic::PanicInfo, path::PathBuf, process}; +use tokio::runtime::Handle; +use tracing::error; +use tracing_subscriber::EnvFilter; +use warp::{http::Response, Filter}; + +/// ServerArgs bootstraps a server with all common pieces. And then triggers the run method for +/// the specific service. +#[derive(Parser)] +pub struct ServerArgs { + #[clap(short, long, value_parser)] + pub config_path: PathBuf, +} + +impl ServerArgs { + pub async fn run(&self, handle: Handle) -> Result<()> + where + C: RunnableConfig, + { + // Set up the server. + setup_logging(); + setup_panic_handler(); + let config = load::>(&self.config_path)?; + run_server_with_config(config, handle).await + } +} + +/// Run a server and the necessary probes. For spawning these tasks, the user must +/// provide a handle to a runtime they already have. +pub async fn run_server_with_config(config: GenericConfig, handle: Handle) -> Result<()> +where + C: RunnableConfig, +{ + let health_port = config.health_check_port; + // Start liveness and readiness probes. + let task_handler = handle.spawn(async move { + register_probes_and_metrics_handler(health_port).await; + anyhow::Ok(()) + }); + let main_task_handler = handle.spawn(async move { config.run().await }); + tokio::select! { + res = task_handler => { + res.expect("Probes and metrics handler unexpectedly exited") + }, + res = main_task_handler => { + res.expect("Main task handler unexpectedly exited") + }, + } +} + +#[derive(Deserialize, Debug, Serialize)] +pub struct GenericConfig { + // Shared configuration among all services. + pub health_check_port: u16, + + // Specific configuration for each service. + pub server_config: T, +} + +#[async_trait::async_trait] +impl RunnableConfig for GenericConfig +where + T: RunnableConfig, +{ + async fn run(&self) -> Result<()> { + self.server_config.run().await + } + + fn get_server_name(&self) -> String { + self.server_config.get_server_name() + } +} + +/// RunnableConfig is a trait that all services must implement for their configuration. +#[async_trait::async_trait] +pub trait RunnableConfig: DeserializeOwned + Send + Sync + 'static { + async fn run(&self) -> Result<()>; + fn get_server_name(&self) -> String; +} + +/// Parse a yaml file into a struct. +pub fn load Deserialize<'de>>(path: &PathBuf) -> Result { + let mut file = + File::open(path).with_context(|| format!("failed to open the file at path: {:?}", path))?; + let mut contents = String::new(); + file.read_to_string(&mut contents) + .with_context(|| format!("failed to read the file at path: {:?}", path))?; + serde_yaml::from_str::(&contents).context("Unable to parse yaml file") +} + +#[derive(Debug, Serialize)] +pub struct CrashInfo { + details: String, + backtrace: String, +} + +/// Invoke to ensure process exits on a thread panic. +/// +/// Tokio's default behavior is to catch panics and ignore them. Invoking this function will +/// ensure that all subsequent thread panics (even Tokio threads) will report the +/// details/backtrace and then exit. +pub fn setup_panic_handler() { + std::panic::set_hook(Box::new(move |pi: &PanicInfo<'_>| { + handle_panic(pi); + })); +} + +// Formats and logs panic information +fn handle_panic(panic_info: &PanicInfo<'_>) { + // The Display formatter for a PanicInfo contains the message, payload and location. + let details = format!("{}", panic_info); + let backtrace = format!("{:#?}", Backtrace::new()); + let info = CrashInfo { details, backtrace }; + let crash_info = toml::to_string_pretty(&info).unwrap(); + error!("{}", crash_info); + // TODO / HACK ALARM: Write crash info synchronously via eprintln! to ensure it is written before the process exits which error! doesn't guarantee. + // This is a workaround until https://github.com/aptos-labs/aptos-core/issues/2038 is resolved. + eprintln!("{}", crash_info); + // Kill the process + process::exit(12); +} + +/// Set up logging for the server. +pub fn setup_logging() { + let env_filter = EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new("info")) + .unwrap(); + tracing_subscriber::fmt() + .json() + .with_file(true) + .with_line_number(true) + .with_thread_ids(true) + .with_target(false) + .with_thread_names(true) + .with_env_filter(env_filter) + .init(); +} + +/// Register readiness and liveness probes and set up metrics endpoint. +async fn register_probes_and_metrics_handler(port: u16) { + let readiness = warp::path("readiness") + .map(move || warp::reply::with_status("ready", warp::http::StatusCode::OK)); + let metrics_endpoint = warp::path("metrics").map(|| { + // Metrics encoding. + let metrics = prometheus::gather(); + let mut encode_buffer = vec![]; + let encoder = TextEncoder::new(); + // If metrics encoding fails, we want to panic and crash the process. + encoder + .encode(&metrics, &mut encode_buffer) + .context("Failed to encode metrics") + .unwrap(); + + Response::builder() + .header("Content-Type", "text/plain") + .body(encode_buffer) + }); + + if cfg!(target_os = "linux") { + #[cfg(target_os = "linux")] + let profilez = warp::path("profilez").and_then(|| async move { + // TODO(grao): Consider make the parameters configurable. + Ok::<_, Infallible>(match start_cpu_profiling(10, 99, false).await { + Ok(body) => { + let response = Response::builder() + .header("Content-Length", body.len()) + .header("Content-Disposition", "inline") + .header("Content-Type", "image/svg+xml") + .body(body); + + match response { + Ok(res) => warp::reply::with_status(res, warp::http::StatusCode::OK), + Err(e) => warp::reply::with_status( + Response::new(format!("Profiling failed: {e:?}.").as_bytes().to_vec()), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ), + } + }, + Err(e) => warp::reply::with_status( + Response::new(format!("Profiling failed: {e:?}.").as_bytes().to_vec()), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ), + }) + }); + #[cfg(target_os = "linux")] + warp::serve(readiness.or(metrics_endpoint).or(profilez)) + .run(([0, 0, 0, 0], port)) + .await; + } else { + warp::serve(readiness.or(metrics_endpoint)) + .run(([0, 0, 0, 0], port)) + .await; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + use tempfile::tempdir; + + #[derive(Clone, Debug, Deserialize, Serialize)] + #[serde(deny_unknown_fields)] + pub struct TestConfig { + test: u32, + test_name: String, + } + + #[async_trait::async_trait] + impl RunnableConfig for TestConfig { + async fn run(&self) -> Result<()> { + assert_eq!(self.test, 123); + assert_eq!(self.test_name, "test"); + Ok(()) + } + + fn get_server_name(&self) -> String { + self.test_name.clone() + } + } + + #[test] + fn test_random_config_creation() { + let dir = tempdir().expect("tempdir failure"); + + let file_path = dir.path().join("testing_yaml.yaml"); + let mut file = File::create(&file_path).expect("create failure"); + let raw_yaml_content = r#" + health_check_port: 12345 + server_config: + test: 123 + test_name: "test" + "#; + writeln!(file, "{}", raw_yaml_content).expect("write_all failure"); + + let config = load::>(&file_path).unwrap(); + assert_eq!(config.health_check_port, 12345); + assert_eq!(config.server_config.test, 123); + assert_eq!(config.server_config.test_name, "test"); + } + + #[test] + fn verify_tool() { + use clap::CommandFactory; + ServerArgs::command().debug_assert() + } +}