From af291b69d259dd2badaf59faa1a9e3532269c17c Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 10:37:05 +0800 Subject: [PATCH 01/11] bump datafusion --- Cargo.lock | 359 ++++++++++++++++++++++++++++++++++++----------------- Cargo.toml | 12 +- 2 files changed, 250 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d7eb5b747..415f07e6ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -86,7 +86,7 @@ version = "1.0.0-alpha02" dependencies = [ "arc-swap 1.5.1", "arena", - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "base64 0.13.0", "bytes 1.2.1", @@ -102,7 +102,7 @@ dependencies = [ "lru", "message_queue", "object_store 1.0.0-alpha02", - "parquet", + "parquet 31.0.0", "parquet_ext", "prometheus 0.12.0", "prost", @@ -228,37 +228,37 @@ dependencies = [ [[package]] name = "arrow" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b556d39f9d19e363833a0fe65d591cd0e2ecc0977589a78179b592bea8dc945" +checksum = "87d948f553cf556656eb89265700258e1032d26fec9b7920cd20319336e06afd" dependencies = [ "ahash 0.8.0", "arrow-arith", - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-cast", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-cast 32.0.0", "arrow-csv", - "arrow-data", - "arrow-ipc", + "arrow-data 32.0.0", + "arrow-ipc 32.0.0", "arrow-json", "arrow-ord", "arrow-row", - "arrow-schema", - "arrow-select", + "arrow-schema 32.0.0", + "arrow-select 32.0.0", "arrow-string", "comfy-table", ] [[package]] name = "arrow-arith" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c61b9235694b48f60d89e0e8d6cb478f39c65dd14b0fe1c3f04379b7d50068" +checksum = "cf30d4ebc3df9dfd8bd26883aa30687d4ddcfd7b2443e62bd7c8fedf153b8e45" dependencies = [ - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", "chrono", "half 2.1.0", "num", @@ -272,8 +272,24 @@ checksum = "a1e6e839764618a911cc460a58ebee5ad3d42bc12d9a5e96a29b7cc296303aa1" dependencies = [ "ahash 0.8.0", "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", + "arrow-data 31.0.0", + "arrow-schema 31.0.0", + "chrono", + "half 2.1.0", + "hashbrown 0.13.2", + "num", +] + +[[package]] +name = "arrow-array" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fe66ec388d882a61fff3eb613b5266af133aa08a3318e5e493daf0f5c1696cb" +dependencies = [ + "ahash 0.8.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", "chrono", "half 2.1.0", "hashbrown 0.13.2", @@ -300,17 +316,43 @@ dependencies = [ "num", ] +[[package]] +name = "arrow-buffer" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ef967dadbccd4586ec8d7aab27d7033ecb5dfae8a605c839613039eac227bda" +dependencies = [ + "half 2.1.0", + "num", +] + [[package]] name = "arrow-cast" version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83dcdb1436cac574f1c1b30fda91c53c467534337bef4064bbd4ea2d6fbc6e04" dependencies = [ - "arrow-array", + "arrow-array 31.0.0", "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", - "arrow-select", + "arrow-data 31.0.0", + "arrow-schema 31.0.0", + "arrow-select 31.0.0", + "chrono", + "lexical-core 0.8.5", + "num", +] + +[[package]] +name = "arrow-cast" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "491a7979ea9e76dc218f532896e2d245fde5235e2e6420ce80d27cf6395dda84" +dependencies = [ + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", + "arrow-select 32.0.0", "chrono", "lexical-core 0.8.5", "num", @@ -318,15 +360,15 @@ dependencies = [ [[package]] name = "arrow-csv" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01677ae9458f5af9e35e1aa6ba97502f539e621db0c6672566403f97edd0448" +checksum = "4b1d4fc91078dbe843c2c50d90f8119c96e8dfac2f78d30f7a8cb9397399c61d" dependencies = [ - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-cast", - "arrow-data", - "arrow-schema", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-cast 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", "chrono", "csv", "csv-core", @@ -342,7 +384,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14e3e69c9fd98357eeeab4aa0f626ecf7ecf663e68e8fc04eac87c424a414477" dependencies = [ "arrow-buffer 31.0.0", - "arrow-schema", + "arrow-schema 31.0.0", + "half 2.1.0", + "num", +] + +[[package]] +name = "arrow-data" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee0c0e3c5d3b80be8f267f4b2af714c08cad630569be01a8379cfe27b4866495" +dependencies = [ + "arrow-buffer 32.0.0", + "arrow-schema 32.0.0", "half 2.1.0", "num", ] @@ -363,57 +417,72 @@ version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64cac2706acbd796965b6eaf0da30204fe44aacf70273f8cb3c9b7d7f3d4c190" dependencies = [ - "arrow-array", + "arrow-array 31.0.0", "arrow-buffer 31.0.0", - "arrow-cast", - "arrow-data", - "arrow-schema", + "arrow-cast 31.0.0", + "arrow-data 31.0.0", + "arrow-schema 31.0.0", "flatbuffers 22.9.29", ] +[[package]] +name = "arrow-ipc" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a3ca7eb8d23c83fe40805cbafec70a6a31df72de47355545ff34c850f715403" +dependencies = [ + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-cast 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", + "flatbuffers 23.1.21", +] + [[package]] name = "arrow-json" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7790e8b7df2d8ef5ac802377ac256cf2fb80cbf7d44b82d6464e20ace6232a5a" +checksum = "bf65aff76d2e340d827d5cab14759e7dd90891a288347e2202e4ee28453d9bed" dependencies = [ - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-cast", - "arrow-data", - "arrow-schema", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-cast 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", "chrono", "half 2.1.0", "indexmap", + "lexical-core 0.8.5", "num", "serde_json", ] [[package]] name = "arrow-ord" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7ee6e1b761dfffaaf7b5bbe68c113a576a3a802146c5c0b9fcec781e30d80a3" +checksum = "074a5a55c37ae4750af4811c8861c0378d8ab2ff6c262622ad24efae6e0b73b3" dependencies = [ - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", - "arrow-select", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", + "arrow-select 32.0.0", "num", ] [[package]] name = "arrow-row" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e65bfedf782fc92721e796fdd26ae7343c98ba9a9243d62def9e4e1c4c1cf0b" +checksum = "e064ac4e64960ebfbe35f218f5e7d9dc9803b59c2e56f611da28ce6d008f839e" dependencies = [ "ahash 0.8.0", - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", "half 2.1.0", "hashbrown 0.13.2", ] @@ -424,30 +493,49 @@ version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73ca49d010b27e2d73f70c1d1f90c1b378550ed0f4ad379c4dea0c997d97d723" +[[package]] +name = "arrow-schema" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ead3f373b9173af52f2fdefcb5a7dd89f453fbc40056f574a8aeb23382a4ef81" + [[package]] name = "arrow-select" version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976cbaeb1a85c09eea81f3f9c149c758630ff422ed0238624c5c3f4704b6a53c" dependencies = [ - "arrow-array", + "arrow-array 31.0.0", "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", + "arrow-data 31.0.0", + "arrow-schema 31.0.0", + "num", +] + +[[package]] +name = "arrow-select" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "646b4f15b5a77c970059e748aeb1539705c68cd397ecf0f0264c4ef3737d35f3" +dependencies = [ + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", "num", ] [[package]] name = "arrow-string" -version = "31.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d4882762f8f48a9218946c016553d38b04b4fe8202038dad4141b3b887b7da8" +checksum = "c8b8bf150caaeca03f39f1a91069701387d93f7cfd256d27f423ac8496d99a51" dependencies = [ - "arrow-array", - "arrow-buffer 31.0.0", - "arrow-data", - "arrow-schema", - "arrow-select", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-data 32.0.0", + "arrow-schema 32.0.0", + "arrow-select 32.0.0", "regex", "regex-syntax", ] @@ -476,7 +564,7 @@ dependencies = [ name = "arrow_ext" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "snafu 0.6.10", "zstd 0.12.1+zstd.1.5.2", ] @@ -664,7 +752,7 @@ version = "1.0.0-alpha02" dependencies = [ "analytic_engine", "arena", - "arrow 31.0.0", + "arrow 32.0.0", "arrow2", "arrow_ext", "base64 0.13.0", @@ -676,7 +764,7 @@ dependencies = [ "futures 0.3.25", "log", "object_store 1.0.0-alpha02", - "parquet", + "parquet 31.0.0", "parquet_ext", "pprof", "rand 0.7.3", @@ -1309,7 +1397,7 @@ dependencies = [ name = "common_types" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "arrow_ext", "byteorder", "bytes_ext", @@ -1330,7 +1418,7 @@ dependencies = [ name = "common_util" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "avro-rs", "backtrace", "ceresdbproto 0.1.0 (git+https://github.com/CeresDB/ceresdbproto.git?rev=81a6d9ead104b2910f5c4484135054d51095090b)", @@ -1784,12 +1872,12 @@ dependencies = [ [[package]] name = "datafusion" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d90cae91414aaeda37ae8022a23ef1124ca8efc08ac7d7770274249f7cf148" +checksum = "cd805bdf93d3137b37fd9966042df0c84ddfca0df5a8d32eaacb16cf6ab0d93d" dependencies = [ "ahash 0.8.0", - "arrow 31.0.0", + "arrow 32.0.0", "async-compression", "async-trait", "bytes 1.2.1", @@ -1813,7 +1901,7 @@ dependencies = [ "num_cpus", "object_store 0.5.3", "parking_lot 0.12.1", - "parquet", + "parquet 32.0.0", "paste 1.0.8", "percent-encoding 2.2.0", "pin-project-lite", @@ -1831,26 +1919,26 @@ dependencies = [ [[package]] name = "datafusion-common" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b21c4b8e8b7815e86d79d25da16854fee6d4d1b386572e802a248b7d43188e86" +checksum = "08c58d6714427f52f9815d19debab7adab5bac5b4d2a99d51c250e606acb6cf5" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "chrono", "num_cpus", "object_store 0.5.3", - "parquet", + "parquet 32.0.0", "sqlparser", ] [[package]] name = "datafusion-expr" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8c07b051fbaf01657a3eb910a76b042ecfed0350a40412f70cf6b949bd5328" +checksum = "2a32ee054230dd9a57d0bed587406869c4a7814d90154616aff2cb9991c1756f" dependencies = [ "ahash 0.8.0", - "arrow 31.0.0", + "arrow 32.0.0", "datafusion-common", "log", "sqlparser", @@ -1858,11 +1946,11 @@ dependencies = [ [[package]] name = "datafusion-optimizer" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ce4d34a808cd2e4c4864cdc759dd1bd22dcac2b8af38aa570e30fd54577c4d" +checksum = "6de4d144924de29a835feeff8313a81fdc2c7190111301508e09ea59a80edbbc" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "chrono", "datafusion-common", @@ -1875,14 +1963,14 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38afa11a09505c24bd7e595039d7914ec39329ba490209413ef2d37895c8220" +checksum = "943e42356f0f6f5ac37ceacd412de9c4d7d8eba1e81b6f724f88699540c7f070" dependencies = [ "ahash 0.8.0", - "arrow 31.0.0", - "arrow-buffer 31.0.0", - "arrow-schema", + "arrow 32.0.0", + "arrow-buffer 32.0.0", + "arrow-schema 32.0.0", "blake2", "blake3", "chrono", @@ -1906,11 +1994,11 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e58a6e887a6965f35c4feb5787ae7185803cc5e2fda15d02825194a0a6c1a" +checksum = "649aec221737d8fb88956a2e6181297456f83b8560d32d775ad1cd22d67fd598" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "chrono", "datafusion", "datafusion-common", @@ -1924,11 +2012,11 @@ dependencies = [ [[package]] name = "datafusion-row" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9172411b25ff4aa97f8e99884898595a581636d93cc96c12f96dbe3bf51cd7e5" +checksum = "6a506f5924f8af54e0806a995da0897f8c2b548d492793e045a3896d88d6714a" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "datafusion-common", "paste 1.0.8", "rand 0.8.5", @@ -1936,11 +2024,11 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbe5e61563ced2f6992a60afea568ff3de69e32940bbf07db06fc5c9d8cd866" +checksum = "4a3d12047a5847f9667f4e2aa8fa2e7d5a6e1094b8e3546d58de492152a50dc7" dependencies = [ - "arrow-schema", + "arrow-schema 32.0.0", "datafusion-common", "datafusion-expr", "log", @@ -2004,7 +2092,7 @@ dependencies = [ name = "df_operator" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "base64 0.13.0", "bincode", "chrono", @@ -2258,6 +2346,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "flatbuffers" +version = "23.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77f5399c2c9c50ae9418e522842ad362f61ee48b346ac106807bd355a8a7c619" +dependencies = [ + "bitflags", + "rustc_version 0.4.0", +] + [[package]] name = "flate2" version = "1.0.24" @@ -3026,7 +3124,7 @@ name = "interpreters" version = "1.0.0-alpha02" dependencies = [ "analytic_engine", - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "catalog", "catalog_impls", @@ -4311,13 +4409,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4ee1ffc0778395c9783a5c74f2cad2fb1a128ade95a965212d31b7b13e3d45" dependencies = [ "ahash 0.8.0", - "arrow-array", + "arrow-array 31.0.0", "arrow-buffer 31.0.0", - "arrow-cast", - "arrow-data", - "arrow-ipc", - "arrow-schema", - "arrow-select", + "arrow-cast 31.0.0", + "arrow-data 31.0.0", + "arrow-ipc 31.0.0", + "arrow-schema 31.0.0", + "arrow-select 31.0.0", + "base64 0.21.0", + "brotli", + "bytes 1.2.1", + "chrono", + "flate2", + "hashbrown 0.13.2", + "lz4", + "num", + "num-bigint 0.4.3", + "paste 1.0.8", + "seq-macro", + "snap", + "thrift 0.17.0", + "twox-hash", + "zstd 0.12.1+zstd.1.5.2", +] + +[[package]] +name = "parquet" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b3d4917209e17e1da5fb07d276da237a42465f0def2b8d5fa5ce0e85855b4c" +dependencies = [ + "ahash 0.8.0", + "arrow-array 32.0.0", + "arrow-buffer 32.0.0", + "arrow-cast 32.0.0", + "arrow-data 32.0.0", + "arrow-ipc 32.0.0", + "arrow-schema 32.0.0", + "arrow-select 32.0.0", "base64 0.21.0", "brotli", "bytes 1.2.1", @@ -4374,7 +4503,7 @@ dependencies = [ name = "parquet_ext" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "arrow_ext", "async-trait", "bytes 1.2.1", @@ -4383,7 +4512,7 @@ dependencies = [ "datafusion-expr", "log", "lru", - "parquet", + "parquet 31.0.0", "parquet-format", "thrift 0.13.0", ] @@ -4861,7 +4990,7 @@ dependencies = [ name = "query_engine" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "common_types", "common_util", @@ -5678,7 +5807,7 @@ name = "server" version = "1.0.0-alpha02" dependencies = [ "analytic_engine", - "arrow 31.0.0", + "arrow 32.0.0", "arrow_ext", "async-trait", "bytes 1.2.1", @@ -6009,7 +6138,7 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" name = "sql" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "catalog", "ceresdbproto 0.1.0 (git+https://github.com/CeresDB/ceresdbproto.git?rev=81a6d9ead104b2910f5c4484135054d51095090b)", @@ -6271,7 +6400,7 @@ dependencies = [ name = "system_catalog" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "catalog", "ceresdbproto 0.1.0 (git+https://github.com/CeresDB/ceresdbproto.git?rev=81a6d9ead104b2910f5c4484135054d51095090b)", @@ -6289,7 +6418,7 @@ dependencies = [ name = "table_engine" version = "1.0.0-alpha02" dependencies = [ - "arrow 31.0.0", + "arrow 32.0.0", "async-trait", "ceresdbproto 0.1.0 (git+https://github.com/CeresDB/ceresdbproto.git?rev=81a6d9ead104b2910f5c4484135054d51095090b)", "common_types", @@ -6302,7 +6431,7 @@ dependencies = [ "futures 0.3.25", "itertools", "log", - "parquet", + "parquet 31.0.0", "parquet_ext", "prost", "serde", @@ -6822,7 +6951,7 @@ dependencies = [ "env_logger", "futures 0.3.25", "object_store 1.0.0-alpha02", - "parquet", + "parquet 31.0.0", "parquet_ext", "table_engine", "tokio 1.25.0", diff --git a/Cargo.toml b/Cargo.toml index c1109f8a68..b87ff5b6f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,8 +50,8 @@ name = "ceresdb-server" path = "src/bin/ceresdb-server.rs" [workspace.dependencies] -arrow = { version = "31.0.0", features = ["prettyprint"] } -arrow_ipc = { version = "31.0.0" } +arrow = { version = "32.0.0", features = ["prettyprint"] } +arrow_ipc = { version = "32.0.0" } arrow_ext = { path = "components/arrow_ext" } analytic_engine = { path = "analytic_engine" } arena = { path = "components/arena" } @@ -68,10 +68,10 @@ cluster = { path = "cluster" } criterion = "0.3" common_types = { path = "common_types" } common_util = { path = "common_util" } -datafusion = "17.0.0" -datafusion-expr = "17.0.0" -datafusion-optimizer = "17.0.0" -datafusion-proto = "17.0.0" +datafusion = "18.0.0" +datafusion-expr = "18.0.0" +datafusion-optimizer = "18.0.0" +datafusion-proto = "18.0.0" df_operator = { path = "df_operator" } env_logger = "0.6" futures = "0.3" From 33a0170eddf59ba8da9d229290814be0135d0cb2 Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 11:20:37 +0800 Subject: [PATCH 02/11] fix breaking changes --- Cargo.lock | 242 +++++++---------------------- Cargo.toml | 2 +- common_types/src/datum.rs | 1 + components/object_store/src/lib.rs | 2 - rust-toolchain | 2 +- table_engine/src/stream.rs | 19 ++- 6 files changed, 69 insertions(+), 199 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 415f07e6ce..5a6a28d774 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,7 +102,7 @@ dependencies = [ "lru", "message_queue", "object_store 1.0.0-alpha02", - "parquet 31.0.0", + "parquet", "parquet_ext", "prometheus 0.12.0", "prost", @@ -234,17 +234,17 @@ checksum = "87d948f553cf556656eb89265700258e1032d26fec9b7920cd20319336e06afd" dependencies = [ "ahash 0.8.0", "arrow-arith", - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-cast 32.0.0", + "arrow-cast", "arrow-csv", - "arrow-data 32.0.0", - "arrow-ipc 32.0.0", + "arrow-data", + "arrow-ipc", "arrow-json", "arrow-ord", "arrow-row", - "arrow-schema 32.0.0", - "arrow-select 32.0.0", + "arrow-schema", + "arrow-select", "arrow-string", "comfy-table", ] @@ -255,31 +255,15 @@ version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf30d4ebc3df9dfd8bd26883aa30687d4ddcfd7b2443e62bd7c8fedf153b8e45" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-data", + "arrow-schema", "chrono", "half 2.1.0", "num", ] -[[package]] -name = "arrow-array" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e839764618a911cc460a58ebee5ad3d42bc12d9a5e96a29b7cc296303aa1" -dependencies = [ - "ahash 0.8.0", - "arrow-buffer 31.0.0", - "arrow-data 31.0.0", - "arrow-schema 31.0.0", - "chrono", - "half 2.1.0", - "hashbrown 0.13.2", - "num", -] - [[package]] name = "arrow-array" version = "32.0.0" @@ -288,8 +272,8 @@ checksum = "9fe66ec388d882a61fff3eb613b5266af133aa08a3318e5e493daf0f5c1696cb" dependencies = [ "ahash 0.8.0", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-data", + "arrow-schema", "chrono", "half 2.1.0", "hashbrown 0.13.2", @@ -306,16 +290,6 @@ dependencies = [ "num", ] -[[package]] -name = "arrow-buffer" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a21d232b1bc1190a3fdd2f9c1e39b7cd41235e95a0d44dd4f522bc5f495748" -dependencies = [ - "half 2.1.0", - "num", -] - [[package]] name = "arrow-buffer" version = "32.0.0" @@ -326,33 +300,17 @@ dependencies = [ "num", ] -[[package]] -name = "arrow-cast" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83dcdb1436cac574f1c1b30fda91c53c467534337bef4064bbd4ea2d6fbc6e04" -dependencies = [ - "arrow-array 31.0.0", - "arrow-buffer 31.0.0", - "arrow-data 31.0.0", - "arrow-schema 31.0.0", - "arrow-select 31.0.0", - "chrono", - "lexical-core 0.8.5", - "num", -] - [[package]] name = "arrow-cast" version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "491a7979ea9e76dc218f532896e2d245fde5235e2e6420ce80d27cf6395dda84" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", - "arrow-select 32.0.0", + "arrow-data", + "arrow-schema", + "arrow-select", "chrono", "lexical-core 0.8.5", "num", @@ -364,11 +322,11 @@ version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b1d4fc91078dbe843c2c50d90f8119c96e8dfac2f78d30f7a8cb9397399c61d" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-cast 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-cast", + "arrow-data", + "arrow-schema", "chrono", "csv", "csv-core", @@ -377,18 +335,6 @@ dependencies = [ "regex", ] -[[package]] -name = "arrow-data" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e3e69c9fd98357eeeab4aa0f626ecf7ecf663e68e8fc04eac87c424a414477" -dependencies = [ - "arrow-buffer 31.0.0", - "arrow-schema 31.0.0", - "half 2.1.0", - "num", -] - [[package]] name = "arrow-data" version = "32.0.0" @@ -396,7 +342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee0c0e3c5d3b80be8f267f4b2af714c08cad630569be01a8379cfe27b4866495" dependencies = [ "arrow-buffer 32.0.0", - "arrow-schema 32.0.0", + "arrow-schema", "half 2.1.0", "num", ] @@ -411,31 +357,17 @@ dependencies = [ "serde", ] -[[package]] -name = "arrow-ipc" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cac2706acbd796965b6eaf0da30204fe44aacf70273f8cb3c9b7d7f3d4c190" -dependencies = [ - "arrow-array 31.0.0", - "arrow-buffer 31.0.0", - "arrow-cast 31.0.0", - "arrow-data 31.0.0", - "arrow-schema 31.0.0", - "flatbuffers 22.9.29", -] - [[package]] name = "arrow-ipc" version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a3ca7eb8d23c83fe40805cbafec70a6a31df72de47355545ff34c850f715403" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-cast 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-cast", + "arrow-data", + "arrow-schema", "flatbuffers 23.1.21", ] @@ -445,11 +377,11 @@ version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf65aff76d2e340d827d5cab14759e7dd90891a288347e2202e4ee28453d9bed" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-cast 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-cast", + "arrow-data", + "arrow-schema", "chrono", "half 2.1.0", "indexmap", @@ -464,11 +396,11 @@ version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "074a5a55c37ae4750af4811c8861c0378d8ab2ff6c262622ad24efae6e0b73b3" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", - "arrow-select 32.0.0", + "arrow-data", + "arrow-schema", + "arrow-select", "num", ] @@ -479,49 +411,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e064ac4e64960ebfbe35f218f5e7d9dc9803b59c2e56f611da28ce6d008f839e" dependencies = [ "ahash 0.8.0", - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-data", + "arrow-schema", "half 2.1.0", "hashbrown 0.13.2", ] -[[package]] -name = "arrow-schema" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73ca49d010b27e2d73f70c1d1f90c1b378550ed0f4ad379c4dea0c997d97d723" - [[package]] name = "arrow-schema" version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ead3f373b9173af52f2fdefcb5a7dd89f453fbc40056f574a8aeb23382a4ef81" -[[package]] -name = "arrow-select" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976cbaeb1a85c09eea81f3f9c149c758630ff422ed0238624c5c3f4704b6a53c" -dependencies = [ - "arrow-array 31.0.0", - "arrow-buffer 31.0.0", - "arrow-data 31.0.0", - "arrow-schema 31.0.0", - "num", -] - [[package]] name = "arrow-select" version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "646b4f15b5a77c970059e748aeb1539705c68cd397ecf0f0264c4ef3737d35f3" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", + "arrow-data", + "arrow-schema", "num", ] @@ -531,11 +444,11 @@ version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8b8bf150caaeca03f39f1a91069701387d93f7cfd256d27f423ac8496d99a51" dependencies = [ - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-data 32.0.0", - "arrow-schema 32.0.0", - "arrow-select 32.0.0", + "arrow-data", + "arrow-schema", + "arrow-select", "regex", "regex-syntax", ] @@ -764,7 +677,7 @@ dependencies = [ "futures 0.3.25", "log", "object_store 1.0.0-alpha02", - "parquet 31.0.0", + "parquet", "parquet_ext", "pprof", "rand 0.7.3", @@ -1901,7 +1814,7 @@ dependencies = [ "num_cpus", "object_store 0.5.3", "parking_lot 0.12.1", - "parquet 32.0.0", + "parquet", "paste 1.0.8", "percent-encoding 2.2.0", "pin-project-lite", @@ -1927,7 +1840,7 @@ dependencies = [ "chrono", "num_cpus", "object_store 0.5.3", - "parquet 32.0.0", + "parquet", "sqlparser", ] @@ -1970,7 +1883,7 @@ dependencies = [ "ahash 0.8.0", "arrow 32.0.0", "arrow-buffer 32.0.0", - "arrow-schema 32.0.0", + "arrow-schema", "blake2", "blake3", "chrono", @@ -2028,7 +1941,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3d12047a5847f9667f4e2aa8fa2e7d5a6e1094b8e3546d58de492152a50dc7" dependencies = [ - "arrow-schema 32.0.0", + "arrow-schema", "datafusion-common", "datafusion-expr", "log", @@ -2336,16 +2249,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "flatbuffers" -version = "22.9.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce016b9901aef3579617931fbb2df8fc9a9f7cb95a16eb8acc8148209bb9e70" -dependencies = [ - "bitflags", - "thiserror", -] - [[package]] name = "flatbuffers" version = "23.1.21" @@ -4402,37 +4305,6 @@ dependencies = [ "windows-sys 0.36.1", ] -[[package]] -name = "parquet" -version = "31.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4ee1ffc0778395c9783a5c74f2cad2fb1a128ade95a965212d31b7b13e3d45" -dependencies = [ - "ahash 0.8.0", - "arrow-array 31.0.0", - "arrow-buffer 31.0.0", - "arrow-cast 31.0.0", - "arrow-data 31.0.0", - "arrow-ipc 31.0.0", - "arrow-schema 31.0.0", - "arrow-select 31.0.0", - "base64 0.21.0", - "brotli", - "bytes 1.2.1", - "chrono", - "flate2", - "hashbrown 0.13.2", - "lz4", - "num", - "num-bigint 0.4.3", - "paste 1.0.8", - "seq-macro", - "snap", - "thrift 0.17.0", - "twox-hash", - "zstd 0.12.1+zstd.1.5.2", -] - [[package]] name = "parquet" version = "32.0.0" @@ -4440,13 +4312,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b3d4917209e17e1da5fb07d276da237a42465f0def2b8d5fa5ce0e85855b4c" dependencies = [ "ahash 0.8.0", - "arrow-array 32.0.0", + "arrow-array", "arrow-buffer 32.0.0", - "arrow-cast 32.0.0", - "arrow-data 32.0.0", - "arrow-ipc 32.0.0", - "arrow-schema 32.0.0", - "arrow-select 32.0.0", + "arrow-cast", + "arrow-data", + "arrow-ipc", + "arrow-schema", + "arrow-select", "base64 0.21.0", "brotli", "bytes 1.2.1", @@ -4512,7 +4384,7 @@ dependencies = [ "datafusion-expr", "log", "lru", - "parquet 31.0.0", + "parquet", "parquet-format", "thrift 0.13.0", ] @@ -6431,7 +6303,7 @@ dependencies = [ "futures 0.3.25", "itertools", "log", - "parquet 31.0.0", + "parquet", "parquet_ext", "prost", "serde", @@ -6951,7 +6823,7 @@ dependencies = [ "env_logger", "futures 0.3.25", "object_store 1.0.0-alpha02", - "parquet 31.0.0", + "parquet", "parquet_ext", "table_engine", "tokio 1.25.0", diff --git a/Cargo.toml b/Cargo.toml index b87ff5b6f4..2524ec5f1b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,7 +86,7 @@ interpreters = { path = "interpreters" } meta_client = { path = "meta_client" } object_store = { path = "components/object_store" } parquet_ext = { path = "components/parquet_ext" } -parquet = { version = "31.0.0" } +parquet = { version = "32.0.0" } paste = "1.0" profile = { path = "components/profile" } prometheus = "0.12" diff --git a/common_types/src/datum.rs b/common_types/src/datum.rs index 8732605f96..f1b8903a37 100644 --- a/common_types/src/datum.rs +++ b/common_types/src/datum.rs @@ -823,6 +823,7 @@ pub mod arrow_convert { | DataType::Dictionary(_, _) | DataType::Decimal128(_, _) | DataType::Decimal256(_, _) + | DataType::RunEndEncoded(_, _) | DataType::Map(_, _) => None, } } diff --git a/components/object_store/src/lib.rs b/components/object_store/src/lib.rs index d603ad3dda..8a84024a68 100644 --- a/components/object_store/src/lib.rs +++ b/components/object_store/src/lib.rs @@ -2,8 +2,6 @@ //! Re-export of [object_store] crate. -#![feature(map_first_last)] - use std::sync::Arc; pub use upstream::{ diff --git a/rust-toolchain b/rust-toolchain index 51a91a0711..3f36906f0f 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2022-08-08 +nightly-2023-02-02 diff --git a/table_engine/src/stream.rs b/table_engine/src/stream.rs index 763cc9e4d5..6f85dd57fc 100644 --- a/table_engine/src/stream.rs +++ b/table_engine/src/stream.rs @@ -8,19 +8,18 @@ use std::{ task::{Context, Poll}, }; -use arrow::{ - datatypes::SchemaRef, - error::{ArrowError, Result as ArrowResult}, - record_batch::RecordBatch as ArrowRecordBatch, -}; +use arrow::{datatypes::SchemaRef, record_batch::RecordBatch as ArrowRecordBatch}; use common_types::{record_batch::RecordBatch, schema::RecordSchema}; use common_util::{ define_result, error::{BoxError, GenericError}, }; -use datafusion::physical_plan::{ - RecordBatchStream as DfRecordBatchStream, - SendableRecordBatchStream as DfSendableRecordBatchStream, +use datafusion::{ + error::{DataFusionError, Result as DataFusionResult}, + physical_plan::{ + RecordBatchStream as DfRecordBatchStream, + SendableRecordBatchStream as DfSendableRecordBatchStream, + }, }; use futures::stream::Stream; use snafu::{Backtrace, ResultExt, Snafu}; @@ -60,7 +59,7 @@ impl PartitionedStreams { pub struct ToDfStream(pub SendableRecordBatchStream); impl Stream for ToDfStream { - type Item = ArrowResult; + type Item = DataFusionResult; fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { match self.0.as_mut().poll_next(ctx) { @@ -68,7 +67,7 @@ impl Stream for ToDfStream { Poll::Ready(Some(Ok(record_batch.into_arrow_record_batch()))) } Poll::Ready(Some(Err(e))) => { - Poll::Ready(Some(Err(ArrowError::ExternalError(Box::new(e))))) + Poll::Ready(Some(Err(DataFusionError::External(Box::new(e))))) } Poll::Ready(None) => Poll::Ready(None), Poll::Pending => Poll::Pending, From 3c1e9d7008514c696ef59bbba3be648464c6e49a Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 11:37:42 +0800 Subject: [PATCH 03/11] fix cow errors --- remote_engine_client/src/lib.rs | 2 ++ sql/src/container.rs | 30 +++++++++++++++--------------- sql/src/provider.rs | 27 ++++++++++++++------------- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/remote_engine_client/src/lib.rs b/remote_engine_client/src/lib.rs index e89156f2c6..bbcd2dc207 100644 --- a/remote_engine_client/src/lib.rs +++ b/remote_engine_client/src/lib.rs @@ -2,6 +2,8 @@ //! Remote table engine implementation +#![feature(let_chains)] + mod cached_router; mod channel; mod client; diff --git a/sql/src/container.rs b/sql/src/container.rs index 94dadb7119..d04d6880fa 100644 --- a/sql/src/container.rs +++ b/sql/src/container.rs @@ -2,7 +2,7 @@ //! Table container -use std::collections::HashMap; +use std::{borrow::Cow, collections::HashMap}; pub use datafusion::catalog::{ResolvedTableReference, TableReference}; use table_engine::table::TableRef; @@ -46,12 +46,12 @@ impl TableContainer { pub fn get(&self, name: TableReference) -> Option { match name { - TableReference::Bare { table } => self.get_default(table), + TableReference::Bare { table } => self.get_default(table.as_ref()), TableReference::Partial { schema, table } => { if schema == self.default_schema { - self.get_default(table) + self.get_default(table.as_ref()) } else { - self.get_other(&self.default_catalog, schema, table) + self.get_other(&self.default_catalog, schema.as_ref(), table.as_ref()) } } TableReference::Full { @@ -60,9 +60,9 @@ impl TableContainer { table, } => { if catalog == self.default_catalog && schema == self.default_schema { - self.get_default(table) + self.get_default(table.as_ref()) } else { - self.get_other(catalog, schema, table) + self.get_other(catalog.as_ref(), schema.as_ref(), table.as_ref()) } } } @@ -82,10 +82,10 @@ impl TableContainer { pub fn insert(&mut self, name: TableReference, table_ref: TableRef) { match name { - TableReference::Bare { table } => self.insert_default(table, table_ref), + TableReference::Bare { table } => self.insert_default(table.as_ref(), table_ref), TableReference::Partial { schema, table } => { if schema == self.default_schema { - self.insert_default(table, table_ref) + self.insert_default(table.as_ref(), table_ref) } else { self.insert_other( self.default_catalog.clone(), @@ -101,7 +101,7 @@ impl TableContainer { table, } => { if catalog == self.default_catalog && schema == self.default_schema { - self.insert_default(table, table_ref) + self.insert_default(table.as_ref(), table_ref) } else { self.insert_other( catalog.to_string(), @@ -145,9 +145,9 @@ impl TableContainer { // default_catalog/default_schema can be empty string, but that's // ok since we have table under them let table_ref = ResolvedTableReference { - catalog: &self.default_catalog, - schema: &self.default_schema, - table, + catalog: Cow::from(&self.default_catalog), + schema: Cow::from(&self.default_schema), + table: Cow::from(table), }; f(table_ref, adapter)?; } @@ -157,9 +157,9 @@ impl TableContainer { for (schema, tables) in schemas { for (table, adapter) in tables { let table_ref = ResolvedTableReference { - catalog, - schema, - table, + catalog: Cow::from(catalog), + schema: Cow::from(schema), + table: Cow::from(table), }; f(table_ref, adapter)?; } diff --git a/sql/src/provider.rs b/sql/src/provider.rs index 28cd2ad6d7..40e8456f04 100644 --- a/sql/src/provider.rs +++ b/sql/src/provider.rs @@ -2,7 +2,7 @@ //! Adapter to providers in datafusion -use std::{any::Any, cell::RefCell, collections::HashMap, sync::Arc}; +use std::{any::Any, borrow::Cow, cell::RefCell, collections::HashMap, sync::Arc}; use async_trait::async_trait; use catalog::manager::ManagerRef; @@ -101,7 +101,7 @@ impl<'a> MetaProvider for CatalogMetaProvider<'a> { let catalog = match self .manager - .catalog_by_name(resolved.catalog) + .catalog_by_name(resolved.catalog.as_ref()) .context(FindCatalog { name: resolved.catalog, })? { @@ -110,21 +110,21 @@ impl<'a> MetaProvider for CatalogMetaProvider<'a> { }; let schema = match catalog - .schema_by_name(resolved.schema) + .schema_by_name(resolved.schema.as_ref()) .context(FindSchema { - name: resolved.schema, + name: resolved.schema.to_string(), })? { Some(s) => s, None => { return SchemaNotFound { - name: resolved.schema, + name: resolved.schema.to_string(), } .fail(); } }; schema - .table_by_name(resolved.table) + .table_by_name(resolved.table.as_ref()) .map_err(Box::new) .context(FindTable { name: resolved.table, @@ -227,12 +227,13 @@ impl<'a, P: MetaProvider> ContextProvider for ContextProviderAdapter<'a, P> { name: TableReference, ) -> std::result::Result, DataFusionError> { // Find in local cache - if let Some(table_ref) = self.table_cache.borrow().get(name) { + if let Some(table_ref) = self.table_cache.borrow().get(name.clone()) { return Ok(self.table_source(table_ref)); } // Find in meta provider - match self.meta_provider.table(name) { + // TODO: possible to remove this clone? + match self.meta_provider.table(name.clone()) { Ok(Some(table)) => { self.table_cache.borrow_mut().insert(name, table.clone()); Ok(self.table_source(table)) @@ -317,9 +318,9 @@ impl SchemaProvider for SchemaProviderAdapter { async fn table(&self, name: &str) -> Option> { let name_ref = TableReference::Full { - catalog: &self.catalog, - schema: &self.schema, - table: name, + catalog: Cow::from(&self.catalog), + schema: Cow::from(&self.schema), + table: Cow::from(name), }; self.tables @@ -342,14 +343,14 @@ impl CatalogProviderAdapter { let mut catalog_adapters = HashMap::with_capacity(tables.num_catalogs()); let _ = tables.visit::<_, ()>(|name, _| { // Get or create catalog - let catalog = match catalog_adapters.get_mut(name.catalog) { + let catalog = match catalog_adapters.get_mut(name.catalog.as_ref()) { Some(v) => v, None => catalog_adapters .entry(name.catalog.to_string()) .or_insert_with(CatalogProviderAdapter::default), }; // Get or create schema - if catalog.schemas.get(name.schema).is_none() { + if catalog.schemas.get(name.schema.as_ref()).is_none() { catalog.schemas.insert( name.schema.to_string(), Arc::new(SchemaProviderAdapter { From facdbecc38cbed83fb4f591a2b7f890d094b6a1a Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 11:48:10 +0800 Subject: [PATCH 04/11] fix error convert --- interpreters/src/validator.rs | 2 +- .../src/df_execution_extension/prom_align.rs | 14 +++++++++----- .../src/logical_optimizer/type_conversion.rs | 1 + 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/interpreters/src/validator.rs b/interpreters/src/validator.rs index 9d035ce813..6fa1fa5733 100644 --- a/interpreters/src/validator.rs +++ b/interpreters/src/validator.rs @@ -47,7 +47,7 @@ impl Validator { match plan { Plan::Query(plan) => { let res = plan.tables.visit::<_, ()>(|name, _| { - if partition::is_sub_partition_table(name.table) { + if partition::is_sub_partition_table(name.table.as_ref()) { Err(()) } else { Ok(()) diff --git a/query_engine/src/df_execution_extension/prom_align.rs b/query_engine/src/df_execution_extension/prom_align.rs index 84ba36a497..09fda22b84 100644 --- a/query_engine/src/df_execution_extension/prom_align.rs +++ b/query_engine/src/df_execution_extension/prom_align.rs @@ -482,7 +482,7 @@ impl PromAlignReader { } impl Stream for PromAlignReader { - type Item = std::result::Result; + type Item = datafusion::error::Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.done { @@ -497,9 +497,12 @@ impl Stream for PromAlignReader { } let tsid_samples = self .accumulate_record_batch(batch) - .map_err(|e| ArrowError::SchemaError(e.to_string()))?; // convert all Error enum to SchemaError + .map_err(|e| DataFusionError::External(Box::new(e) as Box<_>))?; // convert all Error enum to SchemaError if !tsid_samples.is_empty() { - Poll::Ready(Some(self.samples_to_record_batch(schema, tsid_samples))) + Poll::Ready(Some( + self.samples_to_record_batch(schema, tsid_samples) + .map_err(|e| DataFusionError::ArrowError(e)), + )) } else { Poll::Ready(Some(Ok(RecordBatch::new_empty(schema)))) } @@ -509,10 +512,11 @@ impl Stream for PromAlignReader { if let Some(schema) = mem::take(&mut self.record_schema) { let tsid_samples = self .accumulate_record_batch(RecordBatch::new_empty(schema.clone())) - .map_err(|e| ArrowError::SchemaError(e.to_string()))?; + .map_err(|e| DataFusionError::External(Box::new(e) as Box<_>))?; if !tsid_samples.is_empty() { return Poll::Ready(Some( - self.samples_to_record_batch(schema, tsid_samples), + self.samples_to_record_batch(schema, tsid_samples) + .map_err(|e| DataFusionError::ArrowError(e)), )); } } diff --git a/query_engine/src/logical_optimizer/type_conversion.rs b/query_engine/src/logical_optimizer/type_conversion.rs index 3dc33b4bf4..20b352c61b 100644 --- a/query_engine/src/logical_optimizer/type_conversion.rs +++ b/query_engine/src/logical_optimizer/type_conversion.rs @@ -117,6 +117,7 @@ impl OptimizerRule for TypeConversion { | LogicalPlan::CreateView(_) | LogicalPlan::CreateCatalogSchema(_) | LogicalPlan::CreateCatalog(_) + | LogicalPlan::Unnest(_) | LogicalPlan::EmptyRelation { .. } => Ok(Some(plan.clone())), } } From 48ca8b44819f6b603058063973a8d01a80688375 Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 12:00:45 +0800 Subject: [PATCH 05/11] fix clippy --- build.rs | 2 +- components/arena/src/fixed_size.rs | 6 +++--- components/arrow_ext/src/operation.rs | 2 +- components/logger/src/lib.rs | 6 +++--- components/profile/src/lib.rs | 4 ++-- server/src/http.rs | 28 ++++++++++++++++----------- 6 files changed, 27 insertions(+), 21 deletions(-) diff --git a/build.rs b/build.rs index 30efa28554..9f99969fd8 100644 --- a/build.rs +++ b/build.rs @@ -18,7 +18,7 @@ fn main() { .expect("Convert git branch env to string"); if !branch.is_empty() { *config.git_mut().branch_mut() = false; - println!("cargo:rustc-env=VERGEN_GIT_BRANCH={}", branch); + println!("cargo:rustc-env=VERGEN_GIT_BRANCH={branch}"); } } diff --git a/components/arena/src/fixed_size.rs b/components/arena/src/fixed_size.rs index f7305e6144..8773ada58e 100644 --- a/components/arena/src/fixed_size.rs +++ b/components/arena/src/fixed_size.rs @@ -39,7 +39,7 @@ impl Core { /// - new_unchecked /// `ptr` is allocated from allocator. fn with_capacity(cap: usize) -> Self { - let layout = Layout::from_size_align(cap as usize, DEFAULT_ALIGN).unwrap(); + let layout = Layout::from_size_align(cap, DEFAULT_ALIGN).unwrap(); let ptr = unsafe { alloc(layout) }; Self { @@ -55,7 +55,7 @@ impl Core { let layout = layout.pad_to_align(); let size = layout.size(); - let offset = self.len.fetch_add(size, Ordering::SeqCst) as usize; + let offset = self.len.fetch_add(size, Ordering::SeqCst); if offset + size > self.cap { self.len.fetch_sub(size, Ordering::SeqCst); return None; @@ -87,7 +87,7 @@ impl Arena for FixedSizeArena { fn stats(&self) -> Self::Stats { Self::Stats { bytes_used: self.core.cap, - bytes_allocated: self.core.len.load(Ordering::SeqCst) as usize, + bytes_allocated: self.core.len.load(Ordering::SeqCst), } } } diff --git a/components/arrow_ext/src/operation.rs b/components/arrow_ext/src/operation.rs index ac5c4dc4b4..ec10eb1f85 100644 --- a/components/arrow_ext/src/operation.rs +++ b/components/arrow_ext/src/operation.rs @@ -23,7 +23,7 @@ pub fn reverse_record_batch(batch: &RecordBatch) -> Result { )) })?; // TODO(xikai): avoid this memory allocation. - let indices = UInt32Array::from_iter_values((0..num_rows).into_iter().rev()); + let indices = UInt32Array::from_iter_values((0..num_rows).rev()); let mut cols = Vec::with_capacity(batch.num_columns()); for orig_col_data in batch.columns() { diff --git a/components/logger/src/lib.rs b/components/logger/src/lib.rs index 3ecdc14588..a64e3a6a05 100644 --- a/components/logger/src/lib.rs +++ b/components/logger/src/lib.rs @@ -229,7 +229,7 @@ impl RuntimeLevel { pub fn set_level_by_str(&self, level_str: &str) -> Result<(), String> { Level::from_str(level_str) - .map_err(|_| format!("Invalid level {}", level_str)) + .map_err(|_| format!("Invalid level {level_str}")) .and_then(|level| match level { Level::Trace | Level::Debug | Level::Info => Ok(level), _ => Err("Only allow to change log level to ".to_owned()), @@ -355,7 +355,7 @@ impl<'a> slog::Serializer for Serializer<'a> { // Write key write!(self.decorator, "[")?; self.decorator.start_key()?; - write!(self.decorator, "{}", key)?; + write!(self.decorator, "{key}")?; // Write separator self.decorator.start_separator()?; @@ -363,7 +363,7 @@ impl<'a> slog::Serializer for Serializer<'a> { // Write value self.decorator.start_value()?; - write!(self.decorator, "{}", val)?; + write!(self.decorator, "{val}")?; self.decorator.reset()?; write!(self.decorator, "]")?; diff --git a/components/profile/src/lib.rs b/components/profile/src/lib.rs index 453f8262e3..524e97b7f2 100644 --- a/components/profile/src/lib.rs +++ b/components/profile/src/lib.rs @@ -23,7 +23,7 @@ pub enum Error { impl std::fmt::Display for Error { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "Profile Error: {:?}", self) + write!(f, "Profile Error: {self:?}") } } @@ -92,7 +92,7 @@ impl Profiler { pub fn dump_mem_prof(&self, seconds: u64) -> Result> { // concurrent profiling is disabled. let lock_guard = self.mem_prof_lock.try_lock().map_err(|e| Error::Internal { - msg: format!("failed to acquire mem_prof_lock, err:{}", e), + msg: format!("failed to acquire mem_prof_lock, err:{e}"), })?; info!( "Profiler::dump_mem_prof start memory profiling {} seconds", diff --git a/server/src/http.rs b/server/src/http.rs index c74b3ee505..fff8489ffc 100644 --- a/server/src/http.rs +++ b/server/src/http.rs @@ -119,7 +119,9 @@ impl Service { } impl Service { - fn routes(&self) -> impl Filter + Clone { + fn routes( + &self, + ) -> impl Filter + Clone { self.home() .or(self.metrics()) .or(self.sql()) @@ -132,7 +134,9 @@ impl Service { /// Expose `/prom/v1/read` and `/prom/v1/write` to serve Prometheus remote /// storage request - fn prom_api(&self) -> impl Filter + Clone { + fn prom_api( + &self, + ) -> impl Filter + Clone { let write_api = warp::path!("write") .and(web::warp::with_remote_storage( self.prom_remote_storage.clone(), @@ -153,7 +157,7 @@ impl Service { .and(write_api.or(query_api)) } - fn home(&self) -> impl Filter + Clone { + fn home(&self) -> impl Filter + Clone { warp::path::end().and(warp::get()).map(|| { let mut resp = HashMap::new(); resp.insert("status", "ok"); @@ -162,7 +166,7 @@ impl Service { } // TODO(yingwen): Avoid boilerplate code if there are more handlers - fn sql(&self) -> impl Filter + Clone { + fn sql(&self) -> impl Filter + Clone { // accept json or plain text let extract_request = warp::body::json() .or(warp::body::bytes().map(Request::from)) @@ -193,7 +197,7 @@ impl Service { fn flush_memtable( &self, - ) -> impl Filter + Clone { + ) -> impl Filter + Clone { warp::path!("flush_memtable") .and(warp::post()) .and(self.with_instance()) @@ -238,13 +242,15 @@ impl Service { }) } - fn metrics(&self) -> impl Filter + Clone { + fn metrics( + &self, + ) -> impl Filter + Clone { warp::path!("metrics").and(warp::get()).map(metrics::dump) } fn heap_profile( &self, - ) -> impl Filter + Clone { + ) -> impl Filter + Clone { warp::path!("debug" / "heap_profile" / ..) .and(warp::path::param::()) .and(warp::get()) @@ -267,7 +273,7 @@ impl Service { fn update_log_level( &self, - ) -> impl Filter + Clone { + ) -> impl Filter + Clone { warp::path!("log_level" / String) .and(warp::put()) .and(self.with_log_runtime()) @@ -346,7 +352,7 @@ impl Service { fn admin_block( &self, - ) -> impl Filter + Clone { + ) -> impl Filter + Clone { warp::path!("block") .and(warp::post()) .and(warp::body::json()) @@ -492,7 +498,7 @@ fn error_to_status_code(err: &Error) -> StatusCode { async fn handle_rejection( rejection: warp::Rejection, -) -> std::result::Result { +) -> std::result::Result<(impl warp::Reply,), Infallible> { let code; let message; @@ -514,5 +520,5 @@ async fn handle_rejection( message, }); - Ok(reply::with_status(json, code)) + Ok((reply::with_status(json, code),)) } From 53396489f0f0acc65e34794c1c5010507634c9e4 Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 12:15:02 +0800 Subject: [PATCH 06/11] fix clippy again --- Cargo.lock | 2 +- analytic_engine/src/compaction/mod.rs | 8 ++- .../src/instance/flush_compaction.rs | 4 +- analytic_engine/src/lib.rs | 2 +- analytic_engine/src/payload.rs | 2 +- analytic_engine/src/setup.rs | 3 +- analytic_engine/src/sst/file.rs | 1 - .../src/sst/parquet/async_reader.rs | 10 ++-- analytic_engine/src/sst/parquet/encoding.rs | 4 +- analytic_engine/src/sst/parquet/hybrid.rs | 2 +- analytic_engine/src/table/sst_util.rs | 2 +- analytic_engine/src/tests/table.rs | 4 +- analytic_engine/src/tests/util.rs | 4 +- benchmarks/benches/bench.rs | 8 +-- benchmarks/src/arrow2_bench.rs | 2 +- benchmarks/src/config.rs | 3 +- benchmarks/src/parquet_bench.rs | 2 +- benchmarks/src/util.rs | 4 +- catalog_impls/src/table_based.rs | 6 +-- catalog_impls/src/volatile.rs | 4 +- cluster/src/shard_tables_cache.rs | 2 +- common_types/src/datum.rs | 2 +- common_types/src/schema.rs | 4 +- common_types/src/time.rs | 2 +- common_util/src/avro.rs | 12 ++--- common_util/src/codec/memcomparable/number.rs | 2 +- common_util/src/config.rs | 45 ++++++++-------- common_util/src/panic.rs | 4 +- common_util/src/record_batch.rs | 3 +- components/message_queue/src/tests/util.rs | 4 +- components/object_store/src/mem_cache.rs | 2 +- components/object_store/src/prefix.rs | 6 +-- components/parquet_ext/src/meta_data.rs | 6 +-- components/parquet_ext/src/tests.rs | 2 +- components/skiplist/src/list.rs | 4 +- components/skiplist/tests/tests.rs | 20 ++++---- components/table_kv/src/config.rs | 8 ++- components/table_kv/src/obkv.rs | 20 +++----- components/table_kv/src/obkv/tests.rs | 6 +-- components/table_kv/src/tests.rs | 4 +- df_operator/src/aggregate.rs | 8 +-- df_operator/src/functions.rs | 7 ++- integration_tests/Cargo.toml | 3 +- integration_tests/src/database.rs | 9 ++-- interpreters/src/show.rs | 2 +- interpreters/src/show_create.rs | 6 +-- .../src/table_manipulator/meta_based.rs | 7 ++- interpreters/src/tests.rs | 14 ++--- .../src/df_execution_extension/prom_align.rs | 4 +- .../logical_optimizer/order_by_primary_key.rs | 3 +- query_engine/src/logical_optimizer/tests.rs | 4 +- .../src/logical_optimizer/type_conversion.rs | 3 +- remote_engine_client/src/channel.rs | 2 +- router/src/cluster_based.rs | 3 +- router/src/endpoint.rs | 2 +- server/src/grpc/forward.rs | 6 +-- server/src/grpc/meta_event_service/error.rs | 2 +- server/src/grpc/meta_event_service/mod.rs | 39 +++++++------- server/src/grpc/mod.rs | 2 +- .../src/grpc/remote_engine_service/error.rs | 2 +- server/src/grpc/storage_service/error.rs | 2 +- server/src/grpc/storage_service/mod.rs | 20 +++----- server/src/grpc/storage_service/prom_query.rs | 3 +- server/src/grpc/storage_service/write.rs | 45 +++++++--------- server/src/http.rs | 2 +- server/src/local_tables.rs | 8 +-- server/src/mysql/service.rs | 2 +- server/src/mysql/writer.rs | 2 +- sql/src/parser.rs | 51 +++++++++---------- sql/src/partition.rs | 5 +- sql/src/planner.rs | 2 +- sql/src/promql/convert.rs | 6 +-- sql/src/promql/datafusion_util.rs | 2 +- sql/src/promql/udf.rs | 5 +- sql/src/provider.rs | 6 +-- src/bin/ceresdb-server.rs | 3 +- table_engine/src/partition/mod.rs | 3 +- .../src/partition/rule/df_adapter/mod.rs | 5 +- table_engine/src/partition/rule/factory.rs | 6 +-- table_engine/src/partition/rule/key.rs | 4 +- table_engine/src/predicate.rs | 2 +- table_engine/src/provider.rs | 19 +++---- tools/src/bin/sst-convert.rs | 4 +- wal/src/message_queue_impl/encoding.rs | 4 +- wal/src/message_queue_impl/log_cleaner.rs | 2 +- wal/src/message_queue_impl/region.rs | 19 +++---- wal/src/message_queue_impl/region_context.rs | 7 ++- wal/src/table_kv_impl/encoding.rs | 12 ++--- wal/src/table_kv_impl/namespace.rs | 4 +- wal/src/table_kv_impl/table_unit.rs | 3 +- 90 files changed, 268 insertions(+), 353 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a6a28d774..243a798b5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1084,7 +1084,7 @@ dependencies = [ [[package]] name = "ceresdb-client-rs" version = "0.1.0" -source = "git+https://github.com/CeresDB/ceresdb-client-rs.git?rev=5fbd1a1526c3ddd25bb1f38f63f869c892052f7c#5fbd1a1526c3ddd25bb1f38f63f869c892052f7c" +source = "git+https://github.com/jiacai2050/ceresdb-client-rs.git?rev=80411cd1597105c19a1a41582b929c58b8bae11f#80411cd1597105c19a1a41582b929c58b8bae11f" dependencies = [ "arrow 23.0.0", "async-trait", diff --git a/analytic_engine/src/compaction/mod.rs b/analytic_engine/src/compaction/mod.rs index 53b2e16919..9b8506ac38 100644 --- a/analytic_engine/src/compaction/mod.rs +++ b/analytic_engine/src/compaction/mod.rs @@ -58,7 +58,9 @@ pub enum Error { } #[derive(Debug, Clone, Copy, Deserialize, PartialEq)] +#[derive(Default)] pub enum CompactionStrategy { + #[default] Default, TimeWindow(TimeWindowCompactionOptions), SizeTiered(SizeTieredCompactionOptions), @@ -113,11 +115,7 @@ impl Default for TimeWindowCompactionOptions { } } -impl Default for CompactionStrategy { - fn default() -> Self { - CompactionStrategy::Default - } -} + const BUCKET_LOW_KEY: &str = "compaction_bucket_low"; const BUCKET_HIGH_KEY: &str = "compaction_bucket_high"; diff --git a/analytic_engine/src/instance/flush_compaction.rs b/analytic_engine/src/instance/flush_compaction.rs index d4dc86f3bf..39becd595f 100644 --- a/analytic_engine/src/instance/flush_compaction.rs +++ b/analytic_engine/src/instance/flush_compaction.rs @@ -1082,7 +1082,6 @@ fn split_record_batch_with_time_ranges( timestamp_idx: usize, ) -> Result> { let mut builders: Vec = (0..time_ranges.len()) - .into_iter() .map(|_| RecordBatchWithKeyBuilder::new(record_batch.schema_with_key().clone())) .collect(); @@ -1108,8 +1107,7 @@ fn split_record_batch_with_time_ranges( .context(SplitRecordBatch)?; } else { panic!( - "Record timestamp is not in time_ranges, timestamp:{:?}, time_ranges:{:?}", - timestamp, time_ranges + "Record timestamp is not in time_ranges, timestamp:{timestamp:?}, time_ranges:{time_ranges:?}" ); } } diff --git a/analytic_engine/src/lib.rs b/analytic_engine/src/lib.rs index a389cda166..58b1d42fd2 100644 --- a/analytic_engine/src/lib.rs +++ b/analytic_engine/src/lib.rs @@ -108,7 +108,7 @@ impl Default for Config { db_write_buffer_size: 0, scan_batch_size: 500, sst_background_read_parallelism: 8, - wal_storage: WalStorageConfig::RocksDB(Box::new(RocksDBConfig::default())), + wal_storage: WalStorageConfig::RocksDB(Box::default()), remote_engine_client: remote_engine_client::config::Config::default(), } } diff --git a/analytic_engine/src/payload.rs b/analytic_engine/src/payload.rs index f6d39265d7..bbf5083e43 100644 --- a/analytic_engine/src/payload.rs +++ b/analytic_engine/src/payload.rs @@ -119,7 +119,7 @@ impl<'a> Payload for WritePayload<'a> { WritePayload::AlterOption(req) => req.encoded_len(), }; - HEADER_SIZE + body_size as usize + HEADER_SIZE + body_size } fn encode_to(&self, buf: &mut B) -> Result<()> { diff --git a/analytic_engine/src/setup.rs b/analytic_engine/src/setup.rs index a9f762455c..641ed8d0b9 100644 --- a/analytic_engine/src/setup.rs +++ b/analytic_engine/src/setup.rs @@ -325,8 +325,7 @@ impl EngineBuilder for KafkaWalEngineBuilder { _ => { return InvalidWalConfig { msg: format!( - "invalid wal storage config while opening kafka wal, config:{:?}", - config + "invalid wal storage config while opening kafka wal, config:{config:?}" ), } .fail(); diff --git a/analytic_engine/src/sst/file.rs b/analytic_engine/src/sst/file.rs index 3ad9a44425..9215560595 100644 --- a/analytic_engine/src/sst/file.rs +++ b/analytic_engine/src/sst/file.rs @@ -327,7 +327,6 @@ impl FileHandleSet { let seek_key = FileOrdKey::for_seek(time_range.inclusive_start()); self.file_map .range(seek_key..) - .into_iter() .filter_map(|(_key, file)| { if file.intersect_with_time_range(time_range) { Some(file.clone()) diff --git a/analytic_engine/src/sst/parquet/async_reader.rs b/analytic_engine/src/sst/parquet/async_reader.rs index a843ecdd65..70d2efb327 100644 --- a/analytic_engine/src/sst/parquet/async_reader.rs +++ b/analytic_engine/src/sst/parquet/async_reader.rs @@ -400,8 +400,7 @@ impl AsyncFileReader for ObjectStoreReader { .get_range(&self.path, range) .map_err(|e| { parquet::errors::ParquetError::General(format!( - "Failed to fetch range from object store, err:{}", - e + "Failed to fetch range from object store, err:{e}" )) }) .boxed() @@ -423,8 +422,7 @@ impl AsyncFileReader for ObjectStoreReader { .get_ranges(&self.path, &ranges) .map_err(|e| { parquet::errors::ParquetError::General(format!( - "Failed to fetch ranges from object store, err:{}", - e + "Failed to fetch ranges from object store, err:{e}" )) }) .await @@ -572,8 +570,7 @@ impl Stream for RecordBatchReceiver { let rx_group_len = self.rx_group.len(); let cur_rx = self.rx_group.get_mut(cur_rx_idx).unwrap_or_else(|| { panic!( - "cur_rx_idx is impossible to be out-of-range, cur_rx_idx:{}, rx_group len:{}", - cur_rx_idx, rx_group_len + "cur_rx_idx is impossible to be out-of-range, cur_rx_idx:{cur_rx_idx}, rx_group len:{rx_group_len}" ) }); let poll_result = cur_rx.poll_recv(cx); @@ -673,7 +670,6 @@ impl<'a> SstReader for ThreadedReader<'a> { let channel_cap_per_sub_reader = self.channel_cap / self.read_parallelism + 1; let (tx_group, rx_group): (Vec<_>, Vec<_>) = (0..read_parallelism) - .into_iter() .map(|_| mpsc::channel::>(channel_cap_per_sub_reader)) .unzip(); diff --git a/analytic_engine/src/sst/parquet/encoding.rs b/analytic_engine/src/sst/parquet/encoding.rs index 44213c107d..c199faa603 100644 --- a/analytic_engine/src/sst/parquet/encoding.rs +++ b/analytic_engine/src/sst/parquet/encoding.rs @@ -169,7 +169,7 @@ pub const META_VALUE_HEADER: u8 = 0; pub fn encode_sst_meta_data(meta_data: ParquetMetaData) -> Result { let meta_data_pb = sst_pb::ParquetMetaData::from(meta_data); - let mut buf = BytesMut::with_capacity(meta_data_pb.encoded_len() as usize + 1); + let mut buf = BytesMut::with_capacity(meta_data_pb.encoded_len() + 1); buf.try_put_u8(META_VALUE_HEADER) .expect("Should write header into the buffer successfully"); @@ -588,7 +588,7 @@ impl HybridRecordDecoder { if let Some(bitmap) = old_null_bitmap { if !bitmap.is_set(idx) { for i in 0..value_num { - bit_util::unset_bit(null_slice, length_so_far + i as usize); + bit_util::unset_bit(null_slice, length_so_far + i); } } } diff --git a/analytic_engine/src/sst/parquet/hybrid.rs b/analytic_engine/src/sst/parquet/hybrid.rs index 61e696d235..9a4f60cb67 100644 --- a/analytic_engine/src/sst/parquet/hybrid.rs +++ b/analytic_engine/src/sst/parquet/hybrid.rs @@ -396,7 +396,7 @@ impl ListArrayBuilder { let end = array.value_offsets()[slice_arg.offset + slice_arg.length]; for i in - (slice_arg.offset as usize)..(slice_arg.offset + slice_arg.length as usize) + slice_arg.offset..(slice_arg.offset + slice_arg.length) { inner_length_so_far += array.value_length(i); inner_offsets.push(inner_length_so_far); diff --git a/analytic_engine/src/table/sst_util.rs b/analytic_engine/src/table/sst_util.rs index fbdc36cc24..f8d3bd4aa9 100644 --- a/analytic_engine/src/table/sst_util.rs +++ b/analytic_engine/src/table/sst_util.rs @@ -14,7 +14,7 @@ const SST_FILE_SUFFIX: &str = "sst"; #[inline] /// Generate the sst file name. pub fn sst_file_name(id: FileId) -> String { - format!("{}.{}", id, SST_FILE_SUFFIX) + format!("{id}.{SST_FILE_SUFFIX}") } pub fn new_sst_file_path(space_id: SpaceId, table_id: TableId, file_id: FileId) -> Path { diff --git a/analytic_engine/src/tests/table.rs b/analytic_engine/src/tests/table.rs index a27be3f256..f7baf84b69 100644 --- a/analytic_engine/src/tests/table.rs +++ b/analytic_engine/src/tests/table.rs @@ -211,9 +211,7 @@ pub fn assert_batch_eq_to_row_group(record_batches: &[RecordBatch], row_group: & assert_eq!( &cursor.datum(column_idx), datum, - "record_batches:{:?}, row_group:{:?}", - record_batches, - row_group + "record_batches:{record_batches:?}, row_group:{row_group:?}" ); } cursor.step(); diff --git a/analytic_engine/src/tests/util.rs b/analytic_engine/src/tests/util.rs index d8507d57f3..58f1af8d5a 100644 --- a/analytic_engine/src/tests/util.rs +++ b/analytic_engine/src/tests/util.rs @@ -36,7 +36,7 @@ use crate::{ }, storage_options::{LocalOptions, ObjectStoreOptions, StorageOptions}, tests::table::{self, FixedSchemaTable, RowTuple}, - Config, ObkvWalConfig, RocksDBConfig, WalStorageConfig, + Config, RocksDBConfig, WalStorageConfig, }; const DAY_MS: i64 = 24 * 60 * 60 * 1000; @@ -544,7 +544,7 @@ impl Default for MemoryEngineContext { data_path: dir.path().to_str().unwrap().to_string(), }), }, - wal_storage: WalStorageConfig::Obkv(Box::new(ObkvWalConfig::default())), + wal_storage: WalStorageConfig::Obkv(Box::default()), ..Default::default() }; diff --git a/benchmarks/benches/bench.rs b/benchmarks/benches/bench.rs index e545a59c9c..c134cb47c3 100644 --- a/benchmarks/benches/bench.rs +++ b/benchmarks/benches/bench.rs @@ -73,14 +73,14 @@ fn bench_merge_sst(c: &mut Criterion) { for i in 0..bench.num_benches() { bench.init_for_bench(i, true); group.bench_with_input( - BenchmarkId::new("merge_sst", format!("{}/{}/dedup", sst_file_ids, i)), + BenchmarkId::new("merge_sst", format!("{sst_file_ids}/{i}/dedup")), &bench, bench_merge_sst_iter, ); bench.init_for_bench(i, false); group.bench_with_input( - BenchmarkId::new("merge_sst", format!("{}/{}/no-dedup", sst_file_ids, i)), + BenchmarkId::new("merge_sst", format!("{sst_file_ids}/{i}/no-dedup")), &bench, bench_merge_sst_iter, ); @@ -155,14 +155,14 @@ fn bench_merge_memtable(c: &mut Criterion) { for i in 0..bench.num_benches() { bench.init_for_bench(i, true); group.bench_with_input( - BenchmarkId::new("merge_memtable", format!("{}/{}/dedup", sst_file_ids, i)), + BenchmarkId::new("merge_memtable", format!("{sst_file_ids}/{i}/dedup")), &bench, bench_merge_memtable_iter, ); bench.init_for_bench(i, false); group.bench_with_input( - BenchmarkId::new("merge_memtable", format!("{}/{}/no-dedup", sst_file_ids, i)), + BenchmarkId::new("merge_memtable", format!("{sst_file_ids}/{i}/no-dedup")), &bench, bench_merge_memtable_iter, ); diff --git a/benchmarks/src/arrow2_bench.rs b/benchmarks/src/arrow2_bench.rs index 82c41e1e63..686f186d9c 100644 --- a/benchmarks/src/arrow2_bench.rs +++ b/benchmarks/src/arrow2_bench.rs @@ -38,7 +38,7 @@ impl Arrow2Bench { pub fn init_for_bench(&mut self, i: usize) { let projection = if i < self.max_projections { - (0..i + 1).into_iter().collect() + (0..i + 1).collect() } else { Vec::new() }; diff --git a/benchmarks/src/config.rs b/benchmarks/src/config.rs index 4f515a9ca0..0b92b61e3e 100644 --- a/benchmarks/src/config.rs +++ b/benchmarks/src/config.rs @@ -32,8 +32,7 @@ pub fn bench_config_from_env() -> BenchConfig { let path = match env::var(BENCH_CONFIG_PATH_KEY) { Ok(v) => v, Err(e) => panic!( - "Env {} is required to run benches, err:{}.", - BENCH_CONFIG_PATH_KEY, e + "Env {BENCH_CONFIG_PATH_KEY} is required to run benches, err:{e}." ), }; diff --git a/benchmarks/src/parquet_bench.rs b/benchmarks/src/parquet_bench.rs index 4753bc4a65..edbf5d82ae 100644 --- a/benchmarks/src/parquet_bench.rs +++ b/benchmarks/src/parquet_bench.rs @@ -59,7 +59,7 @@ impl ParquetBench { pub fn init_for_bench(&mut self, i: usize) { let projection = if i < self.max_projections { - (0..i + 1).into_iter().collect() + (0..i + 1).collect() } else { Vec::new() }; diff --git a/benchmarks/src/util.rs b/benchmarks/src/util.rs index 8d8af7ac62..0e0b7fd6bc 100644 --- a/benchmarks/src/util.rs +++ b/benchmarks/src/util.rs @@ -85,7 +85,7 @@ pub fn projected_schema_by_number( max_projections: usize, ) -> ProjectedSchema { if num_columns < max_projections { - let projection = (0..num_columns + 1).into_iter().collect(); + let projection = (0..num_columns + 1).collect(); ProjectedSchema::new(schema.clone(), Some(projection)).unwrap() } else { @@ -202,7 +202,7 @@ impl<'a> Payload for WritePayload<'a> { fn encode_size(&self) -> usize { let body_size = self.0.len(); - HEADER_SIZE + body_size as usize + HEADER_SIZE + body_size } fn encode_to(&self, buf: &mut B) -> Result<()> { diff --git a/catalog_impls/src/table_based.rs b/catalog_impls/src/table_based.rs index 265f4b1ca4..6c5f6e582c 100644 --- a/catalog_impls/src/table_based.rs +++ b/catalog_impls/src/table_based.rs @@ -111,7 +111,7 @@ impl Manager for TableBasedManager { } fn all_catalogs(&self) -> manager::Result> { - Ok(self.catalogs.iter().map(|(_, v)| v.clone() as _).collect()) + Ok(self.catalogs.values().map(|v| v.clone() as _).collect()) } } @@ -894,9 +894,7 @@ impl Schema for SchemaImpl { .tables .read() .unwrap() - .tables_by_name - .iter() - .map(|(_, v)| v.clone()) + .tables_by_name.values().cloned() .collect()) } } diff --git a/catalog_impls/src/volatile.rs b/catalog_impls/src/volatile.rs index 3315b03480..06b9e1a49f 100644 --- a/catalog_impls/src/volatile.rs +++ b/catalog_impls/src/volatile.rs @@ -67,9 +67,7 @@ impl Manager for ManagerImpl { fn all_catalogs(&self) -> manager::Result> { Ok(self - .catalogs - .iter() - .map(|(_, v)| v.clone() as CatalogRef) + .catalogs.values().map(|v| v.clone() as CatalogRef) .collect()) } } diff --git a/cluster/src/shard_tables_cache.rs b/cluster/src/shard_tables_cache.rs index 8b9000fbe5..c778aeaec9 100644 --- a/cluster/src/shard_tables_cache.rs +++ b/cluster/src/shard_tables_cache.rs @@ -225,7 +225,7 @@ impl Inner { .iter() .position(|v| v.id == new_table.id) .with_context(|| TableNotFound { - msg: format!("the table to remove is not found, table:{:?}", new_table), + msg: format!("the table to remove is not found, table:{new_table:?}"), })?; // Update tables of shard. diff --git a/common_types/src/datum.rs b/common_types/src/datum.rs index f1b8903a37..a828f7b253 100644 --- a/common_types/src/datum.rs +++ b/common_types/src/datum.rs @@ -541,7 +541,7 @@ impl Datum { Datum::Timestamp(v) => Local.timestamp_millis_opt(v.as_i64()).unwrap().to_rfc3339(), Datum::Double(v) => v.to_string(), Datum::Float(v) => v.to_string(), - Datum::Varbinary(v) => format!("{:?}", v), + Datum::Varbinary(v) => format!("{v:?}"), Datum::String(v) => v.to_string(), Datum::UInt64(v) => v.to_string(), Datum::UInt32(v) => v.to_string(), diff --git a/common_types/src/schema.rs b/common_types/src/schema.rs index 82abbcc7b3..3dcbdb4d85 100644 --- a/common_types/src/schema.rs +++ b/common_types/src/schema.rs @@ -328,7 +328,7 @@ pub struct IndexInWriterSchema(Vec>); impl IndexInWriterSchema { /// Create a index mapping for same schema with `num_columns` columns. pub fn for_same_schema(num_columns: usize) -> Self { - let indexes = (0..num_columns).into_iter().map(Some).collect(); + let indexes = (0..num_columns).map(Some).collect(); Self(indexes) } @@ -1254,7 +1254,7 @@ impl SchemaEncoder { pub fn encode(&self, schema: &Schema) -> Result> { let pb_schema = schema_pb::TableSchema::from(schema); - let mut buf = Vec::with_capacity(1 + pb_schema.encoded_len() as usize); + let mut buf = Vec::with_capacity(1 + pb_schema.encoded_len()); buf.push(self.version); pb_schema.encode(&mut buf).context(EncodeSchemaToPb)?; diff --git a/common_types/src/time.rs b/common_types/src/time.rs index 0875bce506..e785316ef5 100644 --- a/common_types/src/time.rs +++ b/common_types/src/time.rs @@ -383,6 +383,6 @@ mod test { fn test_bucket_of_negative_timestamp() { let ts = Timestamp::new(-126316800000); let range = TimeRange::bucket_of(ts, Duration::from_millis(25920000000)).unwrap(); - assert!(range.contains(ts), "range:{:?}", range); + assert!(range.contains(ts), "range:{range:?}"); } } diff --git a/common_util/src/avro.rs b/common_util/src/avro.rs index a00ecfb497..2ed4f72ac7 100644 --- a/common_util/src/avro.rs +++ b/common_util/src/avro.rs @@ -182,8 +182,7 @@ pub fn avro_rows_to_record_batch( .box_err() .context(AvroRowsToRecordBatch { msg: format!( - "parse avro raw to row failed, avro schema:{:?}, raw:{:?}", - avro_schema, raw + "parse avro raw to row failed, avro schema:{avro_schema:?}, raw:{raw:?}" ), })?; assert_eq!(row_buf.len(), column_block_builders.len()); @@ -195,8 +194,7 @@ pub fn avro_rows_to_record_batch( .box_err() .context(AvroRowsToRecordBatch { msg: format!( - "append datum to column block builder failed, datum:{:?}, builder:{:?}", - datum, column_block_builder + "append datum to column block builder failed, datum:{datum:?}, builder:{column_block_builder:?}" ), })? } @@ -239,8 +237,7 @@ pub fn row_group_to_avro_rows(row_group: RowGroup) -> Result>> { let row = row_group.get_row(row_idx).unwrap(); let mut avro_record = Record::new(&avro_schema).context(RowGroupToAvroRowsNoCause { msg: format!( - "new avro record with schema failed, schema:{:?}", - avro_schema + "new avro record with schema failed, schema:{avro_schema:?}" ), })?; @@ -254,8 +251,7 @@ pub fn row_group_to_avro_rows(row_group: RowGroup) -> Result>> { .box_err() .context(RowGroupToAvroRowsWithCause { msg: format!( - "new avro record with schema failed, schema:{:?}", - avro_schema + "new avro record with schema failed, schema:{avro_schema:?}" ), })?; rows.push(row_bytes); diff --git a/common_util/src/codec/memcomparable/number.rs b/common_util/src/codec/memcomparable/number.rs index ba3a2ccbff..d3dc01643a 100644 --- a/common_util/src/codec/memcomparable/number.rs +++ b/common_util/src/codec/memcomparable/number.rs @@ -301,7 +301,7 @@ mod test { ret: Ordering::Less, }, TblU64 { - arg1: u64::MAX as u64, + arg1: u64::MAX, arg2: i64::MAX as u64, ret: Ordering::Greater, }, diff --git a/common_util/src/config.rs b/common_util/src/config.rs index bd6ef35d41..dac513307f 100644 --- a/common_util/src/config.rs +++ b/common_util/src/config.rs @@ -95,7 +95,7 @@ impl FromStr for TimeUnit { fn from_str(tu_str: &str) -> Result { let tu_str = tu_str.trim(); if !tu_str.is_ascii() { - return Err(format!("unexpect ascii string: {}", tu_str)); + return Err(format!("unexpect ascii string: {tu_str}")); } match tu_str.to_lowercase().as_str() { @@ -106,7 +106,7 @@ impl FromStr for TimeUnit { "minutes" => Ok(TimeUnit::Minutes), "hours" => Ok(TimeUnit::Hours), "days" => Ok(TimeUnit::Days), - _ => Err(format!("unexpect TimeUnit: {}", tu_str)), + _ => Err(format!("unexpect TimeUnit: {tu_str}")), } } } @@ -122,7 +122,7 @@ impl fmt::Display for TimeUnit { TimeUnit::Hours => "hours", TimeUnit::Days => "days", }; - write!(f, "{}", s) + write!(f, "{s}") } } @@ -183,16 +183,16 @@ impl Serialize for ReadableSize { let size = self.0; let mut buffer = String::new(); if size == 0 { - write!(buffer, "{}KiB", size).unwrap(); + write!(buffer, "{size}KiB").unwrap(); } else if size % PIB == 0 { write!(buffer, "{}PiB", size / PIB).unwrap(); } else if size % TIB == 0 { write!(buffer, "{}TiB", size / TIB).unwrap(); - } else if size % GIB as u64 == 0 { + } else if size % GIB == 0 { write!(buffer, "{}GiB", size / GIB).unwrap(); - } else if size % MIB as u64 == 0 { + } else if size % MIB == 0 { write!(buffer, "{}MiB", size / MIB).unwrap(); - } else if size % KIB as u64 == 0 { + } else if size % KIB == 0 { write!(buffer, "{}KiB", size / KIB).unwrap(); } else { return serializer.serialize_u64(size); @@ -208,11 +208,11 @@ impl FromStr for ReadableSize { fn from_str(s: &str) -> Result { let size_str = s.trim(); if size_str.is_empty() { - return Err(format!("{:?} is not a valid size.", s)); + return Err(format!("{s:?} is not a valid size.")); } if !size_str.is_ascii() { - return Err(format!("ASCII string is expected, but got {:?}", s)); + return Err(format!("ASCII string is expected, but got {s:?}")); } // size: digits and '.' as decimal separator @@ -234,15 +234,14 @@ impl FromStr for ReadableSize { "B" | "" => UNIT, _ => { return Err(format!( - "only B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, and PiB are supported: {:?}", - s + "only B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, and PiB are supported: {s:?}" )); } }; match size.parse::() { Ok(n) => Ok(ReadableSize((n * unit as f64) as u64)), - Err(_) => Err(format!("invalid size string: {:?}", s)), + Err(_) => Err(format!("invalid size string: {s:?}")), } } } @@ -369,7 +368,7 @@ impl FromStr for ReadableDuration { fn from_str(dur_str: &str) -> Result { let dur_str = dur_str.trim(); if !dur_str.is_ascii() { - return Err(format!("unexpect ascii string: {}", dur_str)); + return Err(format!("unexpect ascii string: {dur_str}")); } let err_msg = "valid duration, only d, h, m, s, ms are supported.".to_owned(); let mut left = dur_str.as_bytes(); @@ -408,8 +407,8 @@ impl FromStr for ReadableDuration { if dur.is_sign_negative() { return Err("duration should be positive.".to_owned()); } - let secs = dur as u64 / SECOND as u64; - let millis = (dur as u64 % SECOND as u64) as u32 * 1_000_000; + let secs = dur as u64 / SECOND; + let millis = (dur as u64 % SECOND) as u32 * 1_000_000; Ok(ReadableDuration(Duration::new(secs, millis))) } } @@ -475,7 +474,7 @@ impl fmt::Display for ReadableDuration { } if dur > 0 { written = true; - write!(f, "{}ms", dur)?; + write!(f, "{dur}ms")?; } if !written { write!(f, "0s")?; @@ -490,7 +489,7 @@ impl Serialize for ReadableDuration { S: Serializer, { let mut buffer = String::new(); - write!(buffer, "{}", self).unwrap(); + write!(buffer, "{self}").unwrap(); serializer.serialize_str(&buffer) } } @@ -562,7 +561,7 @@ mod tests { s: ReadableSize(size), }; let res_str = toml::to_string(&c).unwrap(); - let exp_str = format!("s = {:?}\n", exp); + let exp_str = format!("s = {exp:?}\n"); assert_eq!(res_str, exp_str); let res_size: SizeHolder = toml::from_str(&exp_str).unwrap(); assert_eq!(res_size.s.0, size); @@ -612,7 +611,7 @@ mod tests { ("0e+10MB", 0), ]; for (src, exp) in decode_cases { - let src = format!("s = {:?}", src); + let src = format!("s = {src:?}"); let res: SizeHolder = toml::from_str(&src).unwrap(); assert_eq!(res.s.0, exp); } @@ -622,7 +621,7 @@ mod tests { "4B7", "5M_", ]; for src in illegal_cases { - let src_str = format!("s = {:?}", src); + let src_str = format!("s = {src:?}"); assert!(toml::from_str::(&src_str).is_err(), "{}", src); } } @@ -672,7 +671,7 @@ mod tests { d: ReadableDuration(Duration::new(secs, ms * 1_000_000)), }; let res_str = toml::to_string(&d).unwrap(); - let exp_str = format!("d = {:?}\n", exp); + let exp_str = format!("d = {exp:?}\n"); assert_eq!(res_str, exp_str); let res_dur: DurHolder = toml::from_str(&exp_str).unwrap(); assert_eq!(res_dur.d.0, d.d.0); @@ -680,14 +679,14 @@ mod tests { let decode_cases = vec![(" 0.5 h2m ", 3600 / 2 + 2 * 60, 0)]; for (src, secs, ms) in decode_cases { - let src = format!("d = {:?}", src); + let src = format!("d = {src:?}"); let res: DurHolder = toml::from_str(&src).unwrap(); assert_eq!(res.d.0, Duration::new(secs, ms * 1_000_000)); } let illegal_cases = vec!["1H", "1M", "1S", "1MS", "1h1h", "h"]; for src in illegal_cases { - let src_str = format!("d = {:?}", src); + let src_str = format!("d = {src:?}"); assert!(toml::from_str::(&src_str).is_err(), "{}", src); } assert!(toml::from_str::("d = 23").is_err()); diff --git a/common_util/src/panic.rs b/common_util/src/panic.rs index 9c45713dbe..f2a1322e86 100644 --- a/common_util/src/panic.rs +++ b/common_util/src/panic.rs @@ -95,13 +95,13 @@ mod tests { match unsafe { fork() } { Ok(ForkResult::Parent { .. }) => match wait().unwrap() { WaitStatus::Exited(_, status) => Ok(status), - v => Err(format!("{:?}", v)), + v => Err(format!("{v:?}")), }, Ok(ForkResult::Child) => { child(); std::process::exit(0); } - Err(e) => Err(format!("Fork failed: {}", e)), + Err(e) => Err(format!("Fork failed: {e}")), } } diff --git a/common_util/src/record_batch.rs b/common_util/src/record_batch.rs index 9e9570c648..de8035e5b8 100644 --- a/common_util/src/record_batch.rs +++ b/common_util/src/record_batch.rs @@ -17,7 +17,6 @@ pub fn assert_record_batches_eq(expected: &[&str], record_batches: Vec = formatted.trim().lines().collect(); assert_eq!( expected_lines, actual_lines, - "\n\nexpected:\n\n{:#?}\nactual:\n\n{:#?}\n\n", - expected_lines, actual_lines + "\n\nexpected:\n\n{expected_lines:#?}\nactual:\n\n{actual_lines:#?}\n\n" ); } diff --git a/components/message_queue/src/tests/util.rs b/components/message_queue/src/tests/util.rs index d409886a77..75443d2b9a 100644 --- a/components/message_queue/src/tests/util.rs +++ b/components/message_queue/src/tests/util.rs @@ -12,8 +12,8 @@ pub fn generate_test_data(cnt: usize) -> Vec { let mut messages = Vec::with_capacity(cnt); let base_ts = Utc.timestamp_millis_opt(1337).unwrap(); for i in 0..cnt { - let key = format!("test_key_{}", i); - let val = format!("test_val_{}", i); + let key = format!("test_key_{i}"); + let val = format!("test_val_{i}"); let timestamp = base_ts + Duration::milliseconds(i as i64); messages.push(message(key.as_bytes(), val.as_bytes(), timestamp)); diff --git a/components/object_store/src/mem_cache.rs b/components/object_store/src/mem_cache.rs index c0a456ed0d..00fec0fd6f 100644 --- a/components/object_store/src/mem_cache.rs +++ b/components/object_store/src/mem_cache.rs @@ -140,7 +140,7 @@ impl MemCache { .await .into_iter() .enumerate() - .map(|(part_no, keys)| format!("{}: [{}]", part_no, keys)) + .map(|(part_no, keys)| format!("{part_no}: [{keys}]")) .collect::>() .join("\n") } diff --git a/components/object_store/src/prefix.rs b/components/object_store/src/prefix.rs index d43dd518af..cc6be1db09 100644 --- a/components/object_store/src/prefix.rs +++ b/components/object_store/src/prefix.rs @@ -280,7 +280,7 @@ mod tests { let prefix = prefix.map(|v| v.to_string()).unwrap_or_default(); let mut objects = Vec::with_capacity(self.file_num); for file_idx in 0..self.file_num { - let raw_filepath = format!("{}/{}", prefix, file_idx); + let raw_filepath = format!("{prefix}/{file_idx}"); let filepath = Path::from(raw_filepath); let object = ObjectMeta { location: filepath, @@ -376,7 +376,7 @@ mod tests { let prefix_store = StoreWithPrefix::new(prefix.to_string(), local_store.clone()).unwrap(); let real_loc = prefix_store.add_prefix_to_loc(&Path::from(filename)); - assert_eq!(expect_loc, real_loc.as_ref(), "prefix:{}", prefix); + assert_eq!(expect_loc, real_loc.as_ref(), "prefix:{prefix}"); } for (prefix, expect_filename, loc) in cases { @@ -385,7 +385,7 @@ mod tests { let real_filename = prefix_store .remove_prefix_from_loc(&Path::from(loc)) .unwrap(); - assert_eq!(expect_filename, real_filename.as_ref(), "prefix:{}", prefix); + assert_eq!(expect_filename, real_filename.as_ref(), "prefix:{prefix}"); } } } diff --git a/components/parquet_ext/src/meta_data.rs b/components/parquet_ext/src/meta_data.rs index 310e1902b0..1abe344df3 100644 --- a/components/parquet_ext/src/meta_data.rs +++ b/components/parquet_ext/src/meta_data.rs @@ -25,7 +25,7 @@ pub async fn fetch_parquet_metadata( const FOOTER_LEN: usize = 8; if file_size < FOOTER_LEN { - let err_msg = format!("file size of {} is less than footer", file_size); + let err_msg = format!("file size of {file_size} is less than footer"); return Err(ParquetError::General(err_msg)); } @@ -35,7 +35,7 @@ pub async fn fetch_parquet_metadata( .get_bytes(footer_start..file_size) .await .map_err(|e| { - let err_msg = format!("failed to get footer bytes, err:{}", e); + let err_msg = format!("failed to get footer bytes, err:{e}"); ParquetError::General(err_msg) })?; @@ -59,7 +59,7 @@ pub async fn fetch_parquet_metadata( .get_bytes(metadata_start..footer_start) .await .map_err(|e| { - let err_msg = format!("failed to get metadata bytes, err:{}", e); + let err_msg = format!("failed to get metadata bytes, err:{e}"); ParquetError::General(err_msg) })?; diff --git a/components/parquet_ext/src/tests.rs b/components/parquet_ext/src/tests.rs index 5aae9d281a..d623c1ba27 100644 --- a/components/parquet_ext/src/tests.rs +++ b/components/parquet_ext/src/tests.rs @@ -50,7 +50,7 @@ fn get_data_dir( fn parquet_test_data() -> String { match get_data_dir("PARQUET_TEST_DATA", "../parquet-testing/data") { Ok(pb) => pb.display().to_string(), - Err(err) => panic!("failed to get parquet data dir: {}", err), + Err(err) => panic!("failed to get parquet data dir: {err}"), } } diff --git a/components/skiplist/src/list.rs b/components/skiplist/src/list.rs index e2a9d70e19..77a8142163 100644 --- a/components/skiplist/src/list.rs +++ b/components/skiplist/src/list.rs @@ -655,7 +655,7 @@ mod tests { let list = Skiplist::with_arena(comp, arena); for i in 0..1000 { let key = Bytes::from(format!("{:05}{:08}", i * 10 + 5, 0)); - let value = Bytes::from(format!("{:05}", i)); + let value = Bytes::from(format!("{i:05}")); list.put(&key, &value); } let mut cases = vec![ @@ -692,7 +692,7 @@ mod tests { continue; } let e = format!("{}{:08}", exp.unwrap(), 0); - assert_eq!(unsafe { (*res).key() }, e.as_bytes(), "{}", i); + assert_eq!(unsafe { (*res).key() }, e.as_bytes(), "{i}"); } } } diff --git a/components/skiplist/tests/tests.rs b/components/skiplist/tests/tests.rs index b8e6473341..4987a32546 100644 --- a/components/skiplist/tests/tests.rs +++ b/components/skiplist/tests/tests.rs @@ -13,11 +13,11 @@ use skiplist::*; use yatp::task::callback::Handle; fn new_value(v: usize) -> Bytes { - Bytes::from(format!("{:05}", v)) + Bytes::from(format!("{v:05}")) } fn key_with_ts(key: &str, ts: u64) -> Bytes { - Bytes::from(format!("{}{:08}", key, ts)) + Bytes::from(format!("{key}{ts:08}")) } #[test] @@ -62,7 +62,7 @@ fn test_basic() { assert!(!list.is_empty()); for (key, value) in &table { let get_key = key_with_ts(key, 0); - assert_eq!(list.get(&get_key), Some(&value[..]), "{}", key); + assert_eq!(list.get(&get_key), Some(&value[..]), "{key}"); } } @@ -74,8 +74,8 @@ fn test_concurrent_basic(n: usize, value_len: usize) { let kvs: Vec<_> = (0..n) .map(|i| { ( - key_with_ts(format!("{:05}", i).as_str(), 0), - Bytes::from(format!("{1:00$}", value_len, i)), + key_with_ts(format!("{i:05}").as_str(), 0), + Bytes::from(format!("{i:0value_len$}")), ) }) .collect(); @@ -96,7 +96,7 @@ fn test_concurrent_basic(n: usize, value_len: usize) { let list = list.clone(); pool.spawn(move |_: &mut Handle<'_>| { let val = list.get(&k); - assert_eq!(val, Some(&v[..]), "{:?}", k); + assert_eq!(val, Some(&v[..]), "{k:?}"); tx.send(()).unwrap(); }); } @@ -163,7 +163,7 @@ fn test_one_key() { match rx.recv_timeout(Duration::from_secs(3)) { Ok("w") => w += 1, Ok("r") => r += 1, - Err(err) => panic!("timeout on receiving r{} w{} msg {:?}", r, w, err), + Err(err) => panic!("timeout on receiving r{r} w{w} msg {err:?}"), _ => panic!("unexpected value"), } } @@ -182,7 +182,7 @@ fn test_iterator_next() { iter_ref.seek_to_first(); assert!(!iter_ref.valid()); for i in (0..n).rev() { - let key = key_with_ts(format!("{:05}", i).as_str(), 0); + let key = key_with_ts(format!("{i:05}").as_str(), 0); list.put(&key, &new_value(i)); } iter_ref.seek_to_first(); @@ -206,7 +206,7 @@ fn test_iterator_prev() { iter_ref.seek_to_last(); assert!(!iter_ref.valid()); for i in (0..n).rev() { - let key = key_with_ts(format!("{:05}", i).as_str(), 0); + let key = key_with_ts(format!("{i:05}").as_str(), 0); list.put(&key, &new_value(i)); } iter_ref.seek_to_last(); @@ -231,7 +231,7 @@ fn test_iterator_seek() { assert!(!iter_ref.valid()); for i in (0..n).rev() { let v = i * 10 + 1000; - let key = key_with_ts(format!("{:05}", v).as_str(), 0); + let key = key_with_ts(format!("{v:05}").as_str(), 0); list.put(&key, &new_value(v)); } iter_ref.seek_to_first(); diff --git a/components/table_kv/src/config.rs b/components/table_kv/src/config.rs index d97d319d1e..6b0a0e4c1d 100644 --- a/components/table_kv/src/config.rs +++ b/components/table_kv/src/config.rs @@ -68,7 +68,9 @@ impl ObkvConfig { /// Obkv server log level. #[derive(Copy, Clone, Serialize, Deserialize, PartialEq, Eq, Debug)] #[serde(rename_all = "lowercase")] +#[derive(Default)] pub enum ObLogLevel { + #[default] None = 7, Error = 0, Warn = 2, @@ -77,11 +79,7 @@ pub enum ObLogLevel { Debug = 5, } -impl Default for ObLogLevel { - fn default() -> ObLogLevel { - ObLogLevel::None - } -} + impl From for ObLogLevel { fn from(level: u16) -> Self { diff --git a/components/table_kv/src/obkv.rs b/components/table_kv/src/obkv.rs index e0eb7e72b0..3b59274008 100644 --- a/components/table_kv/src/obkv.rs +++ b/components/table_kv/src/obkv.rs @@ -707,24 +707,18 @@ fn row_to_key_value( fn format_create_table_sql(table_name: &str) -> String { format!( - "CREATE TABLE IF NOT EXISTS {}( - {} VARBINARY({}), - {} {} NOT NULL, - PRIMARY KEY({}) - );", - table_name, - KEY_COLUMN_NAME, - KEY_COLUMN_LEN, - VALUE_COLUMN_NAME, - VALUE_COLUMN_TYPE, - KEY_COLUMN_NAME + "CREATE TABLE IF NOT EXISTS {table_name}( + {KEY_COLUMN_NAME} VARBINARY({KEY_COLUMN_LEN}), + {VALUE_COLUMN_NAME} {VALUE_COLUMN_TYPE} NOT NULL, + PRIMARY KEY({KEY_COLUMN_NAME}) + );" ) } fn format_drop_table_sql(table_name: &str, purge_recyclebin: bool) -> String { if purge_recyclebin { - format!("DROP TABLE IF EXISTS {}; PURGE RECYCLEBIN;", table_name) + format!("DROP TABLE IF EXISTS {table_name}; PURGE RECYCLEBIN;") } else { - format!("DROP TABLE IF EXISTS {};", table_name) + format!("DROP TABLE IF EXISTS {table_name};") } } diff --git a/components/table_kv/src/obkv/tests.rs b/components/table_kv/src/obkv/tests.rs index 3941938c2e..85c0edd4e0 100644 --- a/components/table_kv/src/obkv/tests.rs +++ b/components/table_kv/src/obkv/tests.rs @@ -134,7 +134,7 @@ fn random_table_name(prefix: &str) -> String { let mut rng = thread_rng(); let v: u32 = rng.gen_range(0, MAX_TABLE_ID); - format!("{}_{}", prefix, v) + format!("{prefix}_{v}") } fn new_scan_ctx(batch_size: i32) -> ScanContext { @@ -279,10 +279,10 @@ fn check_duplicate_primary_key(ret: Result<()>, expect_table_name: &str) { { assert_eq!(expect_table_name, table_name); } else { - panic!("Unexpected insert error, err:{:?}", err); + panic!("Unexpected insert error, err:{err:?}"); } } else { - panic!("Unexpected insert result, ret:{:?}", ret); + panic!("Unexpected insert result, ret:{ret:?}"); } } diff --git a/components/table_kv/src/tests.rs b/components/table_kv/src/tests.rs index 098ca2b2f5..e2add4663e 100644 --- a/components/table_kv/src/tests.rs +++ b/components/table_kv/src/tests.rs @@ -148,7 +148,7 @@ fn random_table_name(prefix: &str) -> String { let mut rng = thread_rng(); let v: u32 = rng.gen_range(0, MAX_TABLE_ID); - format!("{}_{}", prefix, v) + format!("{prefix}_{v}") } fn new_scan_ctx(batch_size: i32) -> ScanContext { @@ -293,7 +293,7 @@ fn test_insert_duplicate(tester: &TableKvTester, table_name: &str if let Err(err) = ret { assert!(err.is_primary_key_duplicate()); } else { - panic!("Unexpected insert result, ret:{:?}", ret); + panic!("Unexpected insert result, ret:{ret:?}"); } } diff --git a/df_operator/src/aggregate.rs b/df_operator/src/aggregate.rs index 07f6036909..45540c6e1a 100644 --- a/df_operator/src/aggregate.rs +++ b/df_operator/src/aggregate.rs @@ -110,7 +110,7 @@ impl ToDfAccumulator { impl DfAccumulator for ToDfAccumulator { fn state(&self) -> DfResult> { let state = self.accumulator.state().map_err(|e| { - DataFusionError::Execution(format!("Accumulator failed to get state, err:{}", e)) + DataFusionError::Execution(format!("Accumulator failed to get state, err:{e}")) })?; Ok(state.into_state()) } @@ -127,7 +127,7 @@ impl DfAccumulator for ToDfAccumulator { let input = Input(&v); self.accumulator.update(input).map_err(|e| { - DataFusionError::Execution(format!("Accumulator failed to update, err:{}", e)) + DataFusionError::Execution(format!("Accumulator failed to update, err:{e}")) }) }) } @@ -144,14 +144,14 @@ impl DfAccumulator for ToDfAccumulator { let state_ref = StateRef(Input(&v)); self.accumulator.merge(state_ref).map_err(|e| { - DataFusionError::Execution(format!("Accumulator failed to merge, err:{}", e)) + DataFusionError::Execution(format!("Accumulator failed to merge, err:{e}")) }) }) } fn evaluate(&self) -> DfResult { let value = self.accumulator.evaluate().map_err(|e| { - DataFusionError::Execution(format!("Accumulator failed to evaluate, err:{}", e)) + DataFusionError::Execution(format!("Accumulator failed to evaluate, err:{e}")) })?; Ok(value.into_df_scalar_value()) diff --git a/df_operator/src/functions.rs b/df_operator/src/functions.rs index 6478193607..a560911bd4 100644 --- a/df_operator/src/functions.rs +++ b/df_operator/src/functions.rs @@ -208,8 +208,7 @@ impl ScalarFunction { for df_arg in df_args { let value = ColumnarValue::try_from_df_columnar_value(df_arg).map_err(|e| { DataFusionError::Internal(format!( - "Failed to convert datafusion columnar value, err:{}", - e + "Failed to convert datafusion columnar value, err:{e}" )) })?; values.push(value); @@ -217,7 +216,7 @@ impl ScalarFunction { // Execute our function. let result_value = func(&values).map_err(|e| { - DataFusionError::Execution(format!("Failed to execute function, err:{}", e)) + DataFusionError::Execution(format!("Failed to execute function, err:{e}")) })?; // Convert the result value to DfColumnarValue. @@ -270,7 +269,7 @@ impl AggregateFunction { // Create accumulator. let df_adapter = move |data_type: &DataType| { let accumulator = accumulator_fn(data_type).map_err(|e| { - DataFusionError::Execution(format!("Failed to create accumulator, err:{}", e)) + DataFusionError::Execution(format!("Failed to create accumulator, err:{e}")) })?; let accumulator = Box::new(ToDfAccumulator::new(accumulator)); diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index 031bd0bdfe..25d26d0ee4 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -8,6 +8,7 @@ workspace = true [dependencies] anyhow = "1.0.58" async-trait = "0.1" -ceresdb-client-rs = { git = "https://github.com/CeresDB/ceresdb-client-rs.git", rev = "5fbd1a1526c3ddd25bb1f38f63f869c892052f7c" } +# ceresdb-client-rs = { git = "https://github.com/CeresDB/ceresdb-client-rs.git", rev = "5fbd1a1526c3ddd25bb1f38f63f869c892052f7c" } +ceresdb-client-rs = { git = "https://github.com/jiacai2050/ceresdb-client-rs.git", rev = "80411cd1597105c19a1a41582b929c58b8bae11f" } sqlness = "0.3" tokio = { workspace = true } diff --git a/integration_tests/src/database.rs b/integration_tests/src/database.rs index c2e2eb0e52..4ae636971d 100644 --- a/integration_tests/src/database.rs +++ b/integration_tests/src/database.rs @@ -43,21 +43,20 @@ impl CeresDB { let stdout = File::create(stdout).expect("Cannot create stdout"); let stderr = File::create(stderr).expect("Cannot create stderr"); - println!("Start {} with {}...", bin, config); + println!("Start {bin} with {config}..."); let server_process = Command::new(&bin) .args(["--config", &config]) .stdout(stdout) .stderr(stderr) .spawn() - .unwrap_or_else(|_| panic!("Failed to start server at {:?}", bin)); + .unwrap_or_else(|_| panic!("Failed to start server at {bin:?}")); // Wait for a while std::thread::sleep(std::time::Duration::from_secs(5)); let endpoint = env::var(SERVER_ENDPOINT_ENV).unwrap_or_else(|_| { panic!( - "Cannot read server endpoint from env {:?}", - SERVER_ENDPOINT_ENV + "Cannot read server endpoint from env {SERVER_ENDPOINT_ENV:?}" ) }); @@ -92,7 +91,7 @@ impl CeresDB { format!("{}", CsvFormatter { resp }) } } - Err(e) => format!("Failed to execute query, err: {:?}", e), + Err(e) => format!("Failed to execute query, err: {e:?}"), }) } } diff --git a/interpreters/src/show.rs b/interpreters/src/show.rs index 5e50fb8cdc..69bff8d104 100644 --- a/interpreters/src/show.rs +++ b/interpreters/src/show.rs @@ -176,7 +176,7 @@ fn to_pattern_re(pattern: &str) -> Result { // so replace those meta character to regexp syntax // TODO: support escape char to match exact those two chars let pattern = pattern.replace('_', ".").replace('%', ".*"); - let pattern = format!("^{}$", pattern); + let pattern = format!("^{pattern}$"); Regex::new(&pattern).context(InvalidRegexp) } diff --git a/interpreters/src/show_create.rs b/interpreters/src/show_create.rs index 85fddec34d..fa1e319023 100644 --- a/interpreters/src/show_create.rs +++ b/interpreters/src/show_create.rs @@ -91,7 +91,7 @@ impl ShowCreateInterpreter { } if let Some(expr) = &col.default_value { - res += format!(" DEFAULT {}", expr).as_str(); + res += format!(" DEFAULT {expr}").as_str(); } if !col.comment.is_empty() { @@ -101,7 +101,7 @@ impl ShowCreateInterpreter { } let keys: Vec = key_columns.iter().map(|col| col.name.to_string()).collect(); res += format!("PRIMARY KEY({}), ", keys.join(",")).as_str(); - res += format!("TIMESTAMP KEY({})", timestamp_key).as_str(); + res += format!("TIMESTAMP KEY({timestamp_key})").as_str(); res } @@ -166,7 +166,7 @@ impl ShowCreateInterpreter { if !opts.is_empty() { let mut v: Vec = opts .into_iter() - .map(|(k, v)| format!("{}='{}'", k, v)) + .map(|(k, v)| format!("{k}='{v}'")) .collect(); // sorted by option name v.sort(); diff --git a/interpreters/src/table_manipulator/meta_based.rs b/interpreters/src/table_manipulator/meta_based.rs index eb24ab1bce..63248e2555 100644 --- a/interpreters/src/table_manipulator/meta_based.rs +++ b/interpreters/src/table_manipulator/meta_based.rs @@ -44,8 +44,7 @@ impl TableManipulator for TableManipulatorImpl { .box_err() .with_context(|| CreateWithCause { msg: format!( - "fail to encode table schema, ctx:{:?}, plan:{:?}", - ctx, plan + "fail to encode table schema, ctx:{ctx:?}, plan:{plan:?}" ), })?; @@ -67,7 +66,7 @@ impl TableManipulator for TableManipulatorImpl { .await .box_err() .with_context(|| CreateWithCause { - msg: format!("failed to create table by meta client, req:{:?}", req), + msg: format!("failed to create table by meta client, req:{req:?}"), })?; info!( @@ -98,7 +97,7 @@ impl TableManipulator for TableManipulatorImpl { .await .box_err() .context(DropWithCause { - msg: format!("failed to create table by meta client, req:{:?}", req), + msg: format!("failed to create table by meta client, req:{req:?}"), })?; info!( diff --git a/interpreters/src/tests.rs b/interpreters/src/tests.rs index ef12347d1d..6cce5a0aa0 100644 --- a/interpreters/src/tests.rs +++ b/interpreters/src/tests.rs @@ -91,8 +91,8 @@ where .default_catalog_and_schema(DEFAULT_CATALOG.to_string(), DEFAULT_SCHEMA.to_string()) .enable_partition_table_access(enable_partition_table_access) .build(); - let sql= format!("CREATE TABLE IF NOT EXISTS {}(c1 string tag not null,ts timestamp not null, c3 string, timestamp key(ts),primary key(c1, ts)) \ - ENGINE=Analytic WITH (ttl='70d',update_mode='overwrite',arena_block_size='1KB')", table_name); + let sql= format!("CREATE TABLE IF NOT EXISTS {table_name}(c1 string tag not null,ts timestamp not null, c3 string, timestamp key(ts),primary key(c1, ts)) \ + ENGINE=Analytic WITH (ttl='70d',update_mode='overwrite',arena_block_size='1KB')"); let output = self.sql_to_output_with_context(&sql, ctx).await?; assert!( @@ -112,7 +112,7 @@ where .default_catalog_and_schema(DEFAULT_CATALOG.to_string(), DEFAULT_SCHEMA.to_string()) .enable_partition_table_access(enable_partition_table_access) .build(); - let sql = format!("INSERT INTO {}(key1, key2, field1,field2) VALUES('tagk', 1638428434000,100, 'hello3'),('tagk2', 1638428434000,100, 'hello3');", table_name); + let sql = format!("INSERT INTO {table_name}(key1, key2, field1,field2) VALUES('tagk', 1638428434000,100, 'hello3'),('tagk2', 1638428434000,100, 'hello3');"); let output = self.sql_to_output_with_context(&sql, ctx).await?; assert!( matches!(output, Output::AffectedRows(v) if v == 2), @@ -131,7 +131,7 @@ where .default_catalog_and_schema(DEFAULT_CATALOG.to_string(), DEFAULT_SCHEMA.to_string()) .enable_partition_table_access(enable_partition_table_access) .build(); - let sql = format!("select * from {}", table_name); + let sql = format!("select * from {table_name}"); let output = self.sql_to_output_with_context(&sql, ctx).await?; let records = output.try_into().unwrap(); let expected = vec![ @@ -310,13 +310,13 @@ where // Disable partition table access, all of create, insert and select about sub // table(in table partition) directly will failed. let res = self.create_table_and_check("__test_table", false).await; - assert!(format!("{:?}", res) + assert!(format!("{res:?}") .contains("only can process sub tables in table partition directly when enable partition table access")); let res1 = self.insert_table_and_check("__test_table", false).await; - assert!(format!("{:?}", res1) + assert!(format!("{res1:?}") .contains("only can process sub tables in table partition directly when enable partition table access")); let res2 = self.select_table_and_check("__test_table", false).await; - assert!(format!("{:?}", res2) + assert!(format!("{res2:?}") .contains("only can process sub tables in table partition directly when enable partition table access")); // Enable partition table access, operations above will success. diff --git a/query_engine/src/df_execution_extension/prom_align.rs b/query_engine/src/df_execution_extension/prom_align.rs index 09fda22b84..9bc6f12647 100644 --- a/query_engine/src/df_execution_extension/prom_align.rs +++ b/query_engine/src/df_execution_extension/prom_align.rs @@ -501,7 +501,7 @@ impl Stream for PromAlignReader { if !tsid_samples.is_empty() { Poll::Ready(Some( self.samples_to_record_batch(schema, tsid_samples) - .map_err(|e| DataFusionError::ArrowError(e)), + .map_err(DataFusionError::ArrowError), )) } else { Poll::Ready(Some(Ok(RecordBatch::new_empty(schema)))) @@ -516,7 +516,7 @@ impl Stream for PromAlignReader { if !tsid_samples.is_empty() { return Poll::Ready(Some( self.samples_to_record_batch(schema, tsid_samples) - .map_err(|e| DataFusionError::ArrowError(e)), + .map_err(DataFusionError::ArrowError), )); } } diff --git a/query_engine/src/logical_optimizer/order_by_primary_key.rs b/query_engine/src/logical_optimizer/order_by_primary_key.rs index d1977b6166..3932babe5f 100644 --- a/query_engine/src/logical_optimizer/order_by_primary_key.rs +++ b/query_engine/src/logical_optimizer/order_by_primary_key.rs @@ -66,8 +66,7 @@ impl OrderByPrimaryKeyRule { { let schema = Schema::try_from(source.schema()).map_err(|e| { let err_msg = format!( - "fail to convert arrow schema to schema, table:{}, err:{:?}", - table_name, e + "fail to convert arrow schema to schema, table:{table_name}, err:{e:?}" ); datafusion::error::DataFusionError::Plan(err_msg) })?; diff --git a/query_engine/src/logical_optimizer/tests.rs b/query_engine/src/logical_optimizer/tests.rs index ece6fcfa7b..950ec03f5f 100644 --- a/query_engine/src/logical_optimizer/tests.rs +++ b/query_engine/src/logical_optimizer/tests.rs @@ -163,7 +163,7 @@ impl LogicalPlanNodeBuilder { /// Check whether the logical plans are equal. pub fn assert_logical_plan_eq(left: &LogicalPlan, right: &LogicalPlan) { - let left_plan_str = format!("{:#?}", left); - let right_plan_str = format!("{:#?}", right); + let left_plan_str = format!("{left:#?}"); + let right_plan_str = format!("{right:#?}"); assert_eq!(left_plan_str, right_plan_str) } diff --git a/query_engine/src/logical_optimizer/type_conversion.rs b/query_engine/src/logical_optimizer/type_conversion.rs index 20b352c61b..77f2579903 100644 --- a/query_engine/src/logical_optimizer/type_conversion.rs +++ b/query_engine/src/logical_optimizer/type_conversion.rs @@ -169,8 +169,7 @@ impl<'a> TypeRewriter<'a> { ); if casted_right.is_null() { return Err(DataFusionError::Plan(format!( - "column:{:?} value:{:?} is invalid", - col, value + "column:{col:?} value:{value:?} is invalid" ))); } if reverse { diff --git a/remote_engine_client/src/channel.rs b/remote_engine_client/src/channel.rs index af5f3432ac..5776986d4b 100644 --- a/remote_engine_client/src/channel.rs +++ b/remote_engine_client/src/channel.rs @@ -86,5 +86,5 @@ impl ChannelBuilder { } fn make_formatted_endpoint(endpoint: &str) -> String { - format!("http://{}", endpoint) + format!("http://{endpoint}") } diff --git a/router/src/cluster_based.rs b/router/src/cluster_based.rs index e437e5cf67..757f96f84b 100644 --- a/router/src/cluster_based.rs +++ b/router/src/cluster_based.rs @@ -47,8 +47,7 @@ impl Router for ClusterBasedRouter { .box_err() .with_context(|| OtherWithCause { msg: format!( - "Failed to route tables by cluster, req:{:?}", - route_tables_req + "Failed to route tables by cluster, req:{route_tables_req:?}" ), })?; diff --git a/router/src/endpoint.rs b/router/src/endpoint.rs index 5d3214cc77..ef9c98bc58 100644 --- a/router/src/endpoint.rs +++ b/router/src/endpoint.rs @@ -38,7 +38,7 @@ impl FromStr for Endpoint { } }; let port = raw_port.parse().map_err(|e| { - let err_msg = format!("Fail to parse port:{}, err:{}", raw_port, e); + let err_msg = format!("Fail to parse port:{raw_port}, err:{e}"); Self::Err::from(err_msg) })?; diff --git a/server/src/grpc/forward.rs b/server/src/grpc/forward.rs index dd06936def..5ae458e2cd 100644 --- a/server/src/grpc/forward.rs +++ b/server/src/grpc/forward.rs @@ -493,15 +493,13 @@ mod tests { assert!(forwarder.is_local_endpoint(endpoint)); assert!( matches!(forward_res, ForwardResult::Original), - "endpoint is:{:?}", - endpoint + "endpoint is:{endpoint:?}" ); } else { assert!(!forwarder.is_local_endpoint(endpoint)); assert!( matches!(forward_res, ForwardResult::Forwarded(_)), - "endpoint is:{:?}", - endpoint + "endpoint is:{endpoint:?}" ); } } diff --git a/server/src/grpc/meta_event_service/error.rs b/server/src/grpc/meta_event_service/error.rs index 3f21697b1c..ff74f3d30c 100644 --- a/server/src/grpc/meta_event_service/error.rs +++ b/server/src/grpc/meta_event_service/error.rs @@ -40,7 +40,7 @@ impl Error { Error::ErrWithCause { msg, source, .. } => { let err_string = source.to_string(); let first_line = error_util::remove_backtrace_from_err(&err_string); - format!("{}. Caused by: {}", msg, first_line) + format!("{msg}. Caused by: {first_line}") } } } diff --git a/server/src/grpc/meta_event_service/mod.rs b/server/src/grpc/meta_event_service/mod.rs index 99052f0452..73c9910fc2 100644 --- a/server/src/grpc/meta_event_service/mod.rs +++ b/server/src/grpc/meta_event_service/mod.rs @@ -186,8 +186,7 @@ async fn handle_open_shard(ctx: HandlerContext, request: OpenShardRequest) -> Re .with_context(|| ErrWithCause { code: StatusCode::Internal, msg: format!( - "fail to get topology while opening shard, request:{:?}", - request + "fail to get topology while opening shard, request:{request:?}" ), })?; @@ -216,11 +215,11 @@ async fn handle_open_shard(ctx: HandlerContext, request: OpenShardRequest) -> Re .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to open table, open_request:{:?}", open_request), + msg: format!("fail to open table, open_request:{open_request:?}"), })? .with_context(|| ErrNoCause { code: StatusCode::Internal, - msg: format!("no table is opened, open_request:{:?}", open_request), + msg: format!("no table is opened, open_request:{open_request:?}"), })?; } @@ -260,7 +259,7 @@ async fn handle_close_shard(ctx: HandlerContext, request: CloseShardRequest) -> .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to close table, close_request:{:?}", close_request), + msg: format!("fail to close table, close_request:{close_request:?}"), })?; } @@ -278,8 +277,7 @@ async fn handle_create_table_on_shard( .with_context(|| ErrWithCause { code: StatusCode::Internal, msg: format!( - "fail to create table on shard in cluster, req:{:?}", - request + "fail to create table on shard in cluster, req:{request:?}" ), })?; @@ -291,8 +289,7 @@ async fn handle_create_table_on_shard( .with_context(|| ErrWithCause { code: StatusCode::Internal, msg: format!( - "fail to get topology while creating table, request:{:?}", - request + "fail to get topology while creating table, request:{request:?}" ), })?; @@ -334,7 +331,7 @@ async fn handle_create_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::BadRequest, - msg: format!("fail to parse partition info, partition_info:{:?}", v), + msg: format!("fail to parse partition info, partition_info:{v:?}"), })?, ), None => None, @@ -365,8 +362,7 @@ async fn handle_create_table_on_shard( .with_context(|| ErrWithCause { code: StatusCode::Internal, msg: format!( - "fail to create table with request:{:?}", - create_table_request + "fail to create table with request:{create_table_request:?}" ), })?; @@ -383,7 +379,7 @@ async fn handle_drop_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to drop table on shard in cluster, req:{:?}", request), + msg: format!("fail to drop table on shard in cluster, req:{request:?}"), })?; let table = request.table_info.context(ErrNoCause { @@ -414,7 +410,7 @@ async fn handle_drop_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to drop table with request:{:?}", drop_table_request), + msg: format!("fail to drop table with request:{drop_table_request:?}"), })?; Ok(()) @@ -430,7 +426,7 @@ async fn handle_open_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to open table on shard in cluster, req:{:?}", request), + msg: format!("fail to open table on shard in cluster, req:{request:?}"), })?; let topology = ctx @@ -441,8 +437,7 @@ async fn handle_open_table_on_shard( .with_context(|| ErrWithCause { code: StatusCode::Internal, msg: format!( - "fail to get topology while opening table, request:{:?}", - request + "fail to get topology while opening table, request:{request:?}" ), })?; @@ -488,7 +483,7 @@ async fn handle_open_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to open table with request:{:?}", open_table_request), + msg: format!("fail to open table with request:{open_table_request:?}"), })?; Ok(()) @@ -504,7 +499,7 @@ async fn handle_close_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to close table on shard in cluster, req:{:?}", request), + msg: format!("fail to close table on shard in cluster, req:{request:?}"), })?; let table = request.table_info.context(ErrNoCause { @@ -536,7 +531,7 @@ async fn handle_close_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to close table with request:{:?}", close_table_request), + msg: format!("fail to close table with request:{close_table_request:?}"), })?; Ok(()) @@ -549,11 +544,11 @@ fn find_schema(catalog: CatalogRef, schema_name: NameRef) -> Result { .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!("fail to get schema, schema:{:?}", schema_name), + msg: format!("fail to get schema, schema:{schema_name:?}"), })? .with_context(|| ErrNoCause { code: StatusCode::NotFound, - msg: format!("schema is not found, schema:{:?}", schema_name), + msg: format!("schema is not found, schema:{schema_name:?}"), }) } diff --git a/server/src/grpc/mod.rs b/server/src/grpc/mod.rs index 7d4844f50f..4ccdb89688 100644 --- a/server/src/grpc/mod.rs +++ b/server/src/grpc/mod.rs @@ -175,7 +175,7 @@ impl RpcServices { .serve_with_shutdown(serve_addr, stop_rx.map(drop)) .await .unwrap_or_else(|e| { - panic!("Grpc server listens failed, err:{:?}", e); + panic!("Grpc server listens failed, err:{e:?}"); }); }); self.join_handle = Some(join_handle); diff --git a/server/src/grpc/remote_engine_service/error.rs b/server/src/grpc/remote_engine_service/error.rs index 3f21697b1c..ff74f3d30c 100644 --- a/server/src/grpc/remote_engine_service/error.rs +++ b/server/src/grpc/remote_engine_service/error.rs @@ -40,7 +40,7 @@ impl Error { Error::ErrWithCause { msg, source, .. } => { let err_string = source.to_string(); let first_line = error_util::remove_backtrace_from_err(&err_string); - format!("{}. Caused by: {}", msg, first_line) + format!("{msg}. Caused by: {first_line}") } } } diff --git a/server/src/grpc/storage_service/error.rs b/server/src/grpc/storage_service/error.rs index c5c538f5c2..97cc262aeb 100644 --- a/server/src/grpc/storage_service/error.rs +++ b/server/src/grpc/storage_service/error.rs @@ -41,7 +41,7 @@ impl Error { Error::ErrWithCause { msg, source, .. } => { let err_string = source.to_string(); let first_line = error_util::remove_backtrace_from_err(&err_string); - format!("{}. Caused by: {}", msg, first_line) + format!("{msg}. Caused by: {first_line}") } } } diff --git a/server/src/grpc/storage_service/mod.rs b/server/src/grpc/storage_service/mod.rs index 591267209d..a46f98f430 100644 --- a/server/src/grpc/storage_service/mod.rs +++ b/server/src/grpc/storage_service/mod.rs @@ -293,7 +293,7 @@ impl StorageServiceImpl { if !has_err { resp.header = Some(error::build_ok_header()); - resp.success = total_success as u32; + resp.success = total_success; } GRPC_HANDLER_DURATION_HISTOGRAM_VEC @@ -498,7 +498,7 @@ fn build_schema_from_write_table_request( !write_entries.is_empty(), ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("empty write entires to write table:{}", table), + msg: format!("empty write entires to write table:{table}"), } ); @@ -512,8 +512,7 @@ fn build_schema_from_write_table_request( ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "tag index {} is not found in tag_names:{:?}, table:{}", - name_index, tag_names, table, + "tag index {name_index} is not found in tag_names:{tag_names:?}, table:{table}", ), } ); @@ -525,15 +524,14 @@ fn build_schema_from_write_table_request( .as_ref() .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Tag({}) value is needed, table_name:{} ", tag_name, table), + msg: format!("Tag({tag_name}) value is needed, table_name:{table} "), })? .value .as_ref() .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Tag({}) value type is not supported, table_name:{}", - tag_name, table + "Tag({tag_name}) value type is not supported, table_name:{table}" ), })?; @@ -556,15 +554,14 @@ fn build_schema_from_write_table_request( .as_ref() .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Field({}) value is needed, table:{}", field_name, table), + msg: format!("Field({field_name}) value is needed, table:{table}"), })? .value .as_ref() .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Field({}) value type is not supported, table:{}", - field_name, table + "Field({field_name}) value type is not supported, table:{table}" ), })?; @@ -653,8 +650,7 @@ fn ensure_data_type_compatible( ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Duplicated column: {} in fields and tags for table: {}", - column_name, table_name, + "Duplicated column: {column_name} in fields and tags for table: {table_name}", ), } ); diff --git a/server/src/grpc/storage_service/prom_query.rs b/server/src/grpc/storage_service/prom_query.rs index 96f64b67af..8912ab4f43 100644 --- a/server/src/grpc/storage_service/prom_query.rs +++ b/server/src/grpc/storage_service/prom_query.rs @@ -262,8 +262,7 @@ impl RecordConverter { ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Field type must be f64-compatibile type, current:{}", - field_type + "Field type must be f64-compatibile type, current:{field_type}" ) } ); diff --git a/server/src/grpc/storage_service/write.rs b/server/src/grpc/storage_service/write.rs index e4850c2574..c57d51b01a 100644 --- a/server/src/grpc/storage_service/write.rs +++ b/server/src/grpc/storage_service/write.rs @@ -53,7 +53,7 @@ pub(crate) async fn handle_write( .box_err() .with_context(|| ErrWithCause { code: StatusCode::INTERNAL_SERVER_ERROR, - msg: format!("fail to fetch schema config, schema_name:{}", schema), + msg: format!("fail to fetch schema config, schema_name:{schema}"), })?; debug!( @@ -207,7 +207,7 @@ pub async fn write_request_to_insert_plan( None => { return ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Table not found, schema:{}, table:{}", schema, table_name), + msg: format!("Table not found, schema:{schema}, table:{table_name}"), } .fail(); } @@ -229,27 +229,27 @@ fn try_get_table( .box_err() .with_context(|| ErrWithCause { code: StatusCode::INTERNAL_SERVER_ERROR, - msg: format!("Failed to find catalog, catalog_name:{}", catalog), + msg: format!("Failed to find catalog, catalog_name:{catalog}"), })? .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Catalog not found, catalog_name:{}", catalog), + msg: format!("Catalog not found, catalog_name:{catalog}"), })? .schema_by_name(schema) .box_err() .with_context(|| ErrWithCause { code: StatusCode::INTERNAL_SERVER_ERROR, - msg: format!("Failed to find schema, schema_name:{}", schema), + msg: format!("Failed to find schema, schema_name:{schema}"), })? .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Schema not found, schema_name:{}", schema), + msg: format!("Schema not found, schema_name:{schema}"), })? .table_by_name(table_name) .box_err() .with_context(|| ErrWithCause { code: StatusCode::INTERNAL_SERVER_ERROR, - msg: format!("Failed to find table, table:{}", table_name), + msg: format!("Failed to find table, table:{table_name}"), }) } @@ -386,8 +386,7 @@ fn write_entry_to_rows( ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "tag index {} is not found in tag_names:{:?}, table:{}", - name_index, tag_names, table_name, + "tag index {name_index} is not found in tag_names:{tag_names:?}, table:{table_name}", ), } ); @@ -396,8 +395,7 @@ fn write_entry_to_rows( let tag_index_in_schema = schema.index_of(tag_name).with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Can't find tag({}) in schema, table:{}", - tag_name, table_name + "Can't find tag({tag_name}) in schema, table:{table_name}" ), })?; @@ -407,8 +405,7 @@ fn write_entry_to_rows( ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "column({}) is a field rather than a tag, table:{}", - tag_name, table_name + "column({tag_name}) is a field rather than a tag, table:{table_name}" ), } ); @@ -417,14 +414,13 @@ fn write_entry_to_rows( .value .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Tag({}) value is needed, table:{}", tag_name, table_name), + msg: format!("Tag({tag_name}) value is needed, table:{table_name}"), })? .value .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Tag({}) value type is not supported, table_name:{}", - tag_name, table_name + "Tag({tag_name}) value type is not supported, table_name:{table_name}" ), })?; for row in &mut rows { @@ -455,8 +451,7 @@ fn write_entry_to_rows( schema.index_of(field_name).with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Can't find field in schema, table:{}, field_name:{}", - table_name, field_name + "Can't find field in schema, table:{table_name}, field_name:{field_name}" ), })?; field_name_index.insert(field_name.to_string(), index_in_schema); @@ -468,8 +463,7 @@ fn write_entry_to_rows( ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Column {} is a tag rather than a field, table:{}", - field_name, table_name + "Column {field_name} is a tag rather than a field, table:{table_name}" ) } ); @@ -477,14 +471,13 @@ fn write_entry_to_rows( .value .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!("Field({}) is needed, table:{}", field_name, table_name), + msg: format!("Field({field_name}) is needed, table:{table_name}"), })? .value .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Field({}) value type is not supported, table:{}", - field_name, table_name + "Field({field_name}) value type is not supported, table:{table_name}" ), })?; @@ -526,11 +519,7 @@ fn convert_proto_value_to_datum( (v, _) => ErrNoCause { code: StatusCode::BAD_REQUEST, msg: format!( - "Value type is not same, table:{}, value_name:{}, schema_type:{:?}, actual_value:{:?}", - table_name, - name, - data_type, - v + "Value type is not same, table:{table_name}, value_name:{name}, schema_type:{data_type:?}, actual_value:{v:?}" ), } .fail(), diff --git a/server/src/http.rs b/server/src/http.rs index fff8489ffc..128c499761 100644 --- a/server/src/http.rs +++ b/server/src/http.rs @@ -512,7 +512,7 @@ async fn handle_rejection( } else { error!("handle error: {:?}", rejection); code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNKNOWN_ERROR: {:?}", rejection); + message = format!("UNKNOWN_ERROR: {rejection:?}"); } let json = reply::json(&ErrorResponse { diff --git a/server/src/local_tables.rs b/server/src/local_tables.rs index 1c6810830e..b8ec20c852 100644 --- a/server/src/local_tables.rs +++ b/server/src/local_tables.rs @@ -56,10 +56,10 @@ impl LocalTablesRecoverer { .schema_by_name(&table_info.schema_name) .box_err() .context(RecoverWithCause { - msg: format!("failed to get schema of table, table_info:{:?}", table_info), + msg: format!("failed to get schema of table, table_info:{table_info:?}"), })? .with_context(|| RecoverNoCause { - msg: format!("schema of table not found, table_info:{:?}", table_info), + msg: format!("schema of table not found, table_info:{table_info:?}"), })?; let open_request = OpenTableRequest::from(table_info.clone()); @@ -68,10 +68,10 @@ impl LocalTablesRecoverer { .await .box_err() .context(RecoverWithCause { - msg: format!("failed to open table, open_request:{:?}", open_request), + msg: format!("failed to open table, open_request:{open_request:?}"), })? .with_context(|| RecoverNoCause { - msg: format!("no table is opened, open_request:{:?}", open_request), + msg: format!("no table is opened, open_request:{open_request:?}"), })?; } diff --git a/server/src/mysql/service.rs b/server/src/mysql/service.rs index 14c9769925..1c481bb8aa 100644 --- a/server/src/mysql/service.rs +++ b/server/src/mysql/service.rs @@ -76,7 +76,7 @@ impl MysqlService { let listener = tokio::net::TcpListener::bind(socket_addr) .await .unwrap_or_else(|e| { - panic!("Mysql server listens failed, err:{}", e); + panic!("Mysql server listens failed, err:{e}"); }); loop { tokio::select! { diff --git a/server/src/mysql/writer.rs b/server/src/mysql/writer.rs index 1bee891457..5694e2d477 100644 --- a/server/src/mysql/writer.rs +++ b/server/src/mysql/writer.rs @@ -84,7 +84,7 @@ impl<'a, W: std::io::Write> MysqlQueryResultWriter<'a, W> { } (_, v) => Err(std::io::Error::new( std::io::ErrorKind::Other, - format!("Unsupported column type, val: {:?}", v), + format!("Unsupported column type, val: {v:?}"), )), }? } diff --git a/sql/src/parser.rs b/sql/src/parser.rs index 06c171fc46..98fd7ea7b3 100644 --- a/sql/src/parser.rs +++ b/sql/src/parser.rs @@ -150,7 +150,7 @@ impl<'a> Parser<'a> { // Report unexpected token fn expected(&self, expected: &str, found: Token) -> Result { - parser_err!(format!("Expected {}, found: {}", expected, found)) + parser_err!(format!("Expected {expected}, found: {found}")) } // Parse a new expression @@ -258,8 +258,7 @@ impl<'a> Parser<'a> { let obj_type = match self.parser.expect_one_of_keywords(&[Keyword::TABLE])? { Keyword::TABLE => Ok(ShowCreateObject::Table), keyword => parser_err!(format!( - "Unable to map keyword to ShowCreateObject: {:?}", - keyword + "Unable to map keyword to ShowCreateObject: {keyword:?}" )), }?; @@ -599,7 +598,7 @@ impl<'a> Parser<'a> { .parser .parse_parenthesized_column_list(Mandatory, false) .map_err(|e| { - ParserError::ParserError(format!("Fail to parse partition key, err:{}", e)) + ParserError::ParserError(format!("Fail to parse partition key, err:{e}")) })?; // Ensure at least one column for partition key. @@ -650,10 +649,10 @@ impl<'a> Parser<'a> { sqlparser::ast::Value::Number(v, _) => match v.parse::() { Ok(v) => v, Err(e) => { - return parser_err!(format!("invalid partition num, raw:{}, err:{}", v, e)) + return parser_err!(format!("invalid partition num, raw:{v}, err:{e}")) } }, - v => return parser_err!(format!("expect partition number, found:{}", v)), + v => return parser_err!(format!("expect partition number, found:{v}")), } } else { 1 @@ -677,16 +676,16 @@ impl<'a> Parser<'a> { if check_column_expr_validity_in_hash(id, columns) { Ok(*inner) } else { - parser_err!(format!("Expect column(tag, type: int, tiny int, small int, big int), search by column name:{}", id)) + parser_err!(format!("Expect column(tag, type: int, tiny int, small int, big int), search by column name:{id}")) } }, other => parser_err!( - format!("Only column expr in hash partition now, example: HASH(column name), found:{:?}", other) + format!("Only column expr in hash partition now, example: HASH(column name), found:{other:?}") ), } } else { - parser_err!(format!("Expect nested expr, found:{:?}", expr)) + parser_err!(format!("Expect nested expr, found:{expr:?}")) } } @@ -827,17 +826,14 @@ mod tests { match Parser::parse_sql(sql) { Ok(statements) => { panic!( - "Expected parse error for '{}', but was successful: {:?}", - sql, statements + "Expected parse error for '{sql}', but was successful: {statements:?}" ); } Err(e) => { let error_message = e.to_string(); assert!( error_message.contains(expected_error), - "Expected error '{}' not found in actual error '{}'", - expected_error, - error_message + "Expected error '{expected_error}' not found in actual error '{error_message}'" ); } } @@ -1212,7 +1208,7 @@ mod tests { let statements = Parser::parse_sql(sql).unwrap(); assert!( if let Statement::Standard(standard_statement) = &statements[0] { - let standard_statement_str = format!("{}", standard_statement); + let standard_statement_str = format!("{standard_statement}"); assert!(standard_statement_str.contains("`testa`")); true @@ -1227,7 +1223,7 @@ mod tests { let statements = Parser::parse_sql(sql).unwrap(); assert!( if let Statement::Standard(standard_statement) = &statements[0] { - let standard_statement_str = format!("{}", standard_statement); + let standard_statement_str = format!("{standard_statement}"); assert!(standard_statement_str.contains("`testa`")); true @@ -1242,7 +1238,7 @@ mod tests { let statements = Parser::parse_sql(sql).unwrap(); assert!( if let Statement::Standard(standard_statement) = &statements[0] { - let standard_statement_str = format!("{}", standard_statement); + let standard_statement_str = format!("{standard_statement}"); assert!(standard_statement_str.contains("`testa`")); assert!(standard_statement_str.contains("`TEstB`")); assert!(standard_statement_str.contains("`TESTC`")); @@ -1337,22 +1333,22 @@ mod tests { // Unsupported expr let sql = r#"CREATE TABLE t(c1 string, c2 int TAG, c3 bigint TAG) PARTITION BY HASH(c2, c3) PARTITIONS 4"#; assert!( - matches!(Parser::parse_sql(sql), Err(e) if format!("{:?}", e).contains("ParserError") - && format!("{:?}", e).contains("Expect nested expr")) + matches!(Parser::parse_sql(sql), Err(e) if format!("{e:?}").contains("ParserError") + && format!("{e:?}").contains("Expect nested expr")) ); // Column of invalid type let sql = r#"CREATE TABLE t(c1 string, c2 int, c3 bigint) PARTITION BY HASH(c1) PARTITIONS 4"#; assert!( - matches!(Parser::parse_sql(sql), Err(e) if format!("{:?}", e).contains("ParserError") - && format!("{:?}", e).contains("Expect column")) + matches!(Parser::parse_sql(sql), Err(e) if format!("{e:?}").contains("ParserError") + && format!("{e:?}").contains("Expect column")) ); // Column not tag let sql = r#"CREATE TABLE t(c1 string, c2 int, c3 bigint) PARTITION BY HASH(c2) PARTITIONS 4"#; assert!( - matches!(Parser::parse_sql(sql), Err(e) if format!("{:?}", e).contains("ParserError") - && format!("{:?}", e).contains("Expect column")) + matches!(Parser::parse_sql(sql), Err(e) if format!("{e:?}").contains("ParserError") + && format!("{e:?}").contains("Expect column")) ); } @@ -1360,8 +1356,8 @@ mod tests { fn invalid_partitions_num() { let sql = r#"CREATE TABLE t(c1 string, c2 int TAG, c3 bigint) PARTITION BY HASH(c2) PARTITIONS 'string'"#; assert!( - matches!(Parser::parse_sql(sql), Err(e) if format!("{:?}", e).contains("ParserError") - && format!("{:?}", e).contains("Expected literal number")) + matches!(Parser::parse_sql(sql), Err(e) if format!("{e:?}").contains("ParserError") + && format!("{e:?}").contains("Expected literal number")) ); } } @@ -1440,9 +1436,8 @@ mod tests { fn create_sql_with_partition_num(partition_num: u64) -> String { format!( r#"CREATE TABLE `demo` (`name` string TAG, `value` double NOT NULL, - `t` timestamp NOT NULL, TIMESTAMP KEY(t)) PARTITION BY KEY(name) PARTITIONS {} - ENGINE=Analytic with (enable_ttl="false")"#, - partition_num + `t` timestamp NOT NULL, TIMESTAMP KEY(t)) PARTITION BY KEY(name) PARTITIONS {partition_num} + ENGINE=Analytic with (enable_ttl="false")"# ) } } diff --git a/sql/src/partition.rs b/sql/src/partition.rs index 06fc8050fa..7c6e7300b3 100644 --- a/sql/src/partition.rs +++ b/sql/src/partition.rs @@ -42,7 +42,7 @@ impl PartitionParser { if let SqlExpr::Identifier(id) = expr { let expr = Expr::Column(Column::from_name(id.value)); let expr = expr.to_bytes().box_err().context(ParsePartitionWithCause { - msg: format!("found invalid expr in hash, expr:{}", expr), + msg: format!("found invalid expr in hash, expr:{expr}"), })?; Ok(HashPartitionInfo { @@ -53,7 +53,7 @@ impl PartitionParser { }) } else { UnsupportedPartition { - msg: format!("unsupported expr:{}", expr), + msg: format!("unsupported expr:{expr}"), } .fail() } @@ -79,7 +79,6 @@ impl PartitionParser { fn make_partition_definitions(partition_num: u64) -> Vec { (0..partition_num) - .into_iter() .map(|p| PartitionDefinition { name: p.to_string(), origin_name: None, diff --git a/sql/src/planner.rs b/sql/src/planner.rs index a7d8eb292a..3ee78a7ab4 100644 --- a/sql/src/planner.rs +++ b/sql/src/planner.rs @@ -1071,7 +1071,7 @@ mod tests { let mut statements = Parser::parse_sql(sql).unwrap(); assert_eq!(statements.len(), 1); let plan = planner.statement_to_plan(statements.remove(0))?; - assert_eq!(format!("{:#?}", plan), expected); + assert_eq!(format!("{plan:#?}"), expected); Ok(()) } diff --git a/sql/src/promql/convert.rs b/sql/src/promql/convert.rs index ce600e02f6..f324249995 100644 --- a/sql/src/promql/convert.rs +++ b/sql/src/promql/convert.rs @@ -385,7 +385,7 @@ impl Expr { "avg" => avg(col(field)), _ => { return InvalidExpr { - msg: format!("aggr {} not supported now", aggr_op), + msg: format!("aggr {aggr_op} not supported now"), } .fail() } @@ -642,7 +642,7 @@ impl Selector { ); } else { return InvalidExpr { - msg: format!("field:{} not found", field), + msg: format!("field:{field} not found"), } .fail(); }; @@ -665,7 +665,7 @@ impl Selector { .tsid_column() .map(|c| col(&c.name)) .context(InvalidExpr { - msg: format!("{} not found", TSID_COLUMN), + msg: format!("{TSID_COLUMN} not found"), })?; let field_expr = col(field); projection.extend(vec![timestamp_expr, tsid_expr, field_expr]); diff --git a/sql/src/promql/datafusion_util.rs b/sql/src/promql/datafusion_util.rs index b1a28115a5..525b285c35 100644 --- a/sql/src/promql/datafusion_util.rs +++ b/sql/src/promql/datafusion_util.rs @@ -64,7 +64,7 @@ impl UserDefinedLogicalNode for PromAlignNode { } fn expressions(&self) -> Vec { - let qualified_name = |n| col(&format!("{}.{}", self.table_name, n)); + let qualified_name = |n| col(format!("{}.{}", self.table_name, n)); let mut exprs = self .column_name diff --git a/sql/src/promql/udf.rs b/sql/src/promql/udf.rs index 31ac1bb657..636bb1695e 100644 --- a/sql/src/promql/udf.rs +++ b/sql/src/promql/udf.rs @@ -42,7 +42,7 @@ pub fn regex_match_expr(input: Expr, pattern: String, matches: bool) -> Expr { let input_arr = &args[0].as_any().downcast_ref::().unwrap(); let pattern = regex::Regex::new(&pattern).map_err(|e| { - DataFusionError::Internal(format!("error compiling regex pattern: {}", e)) + DataFusionError::Internal(format!("error compiling regex pattern: {e}")) })?; let results = input_arr @@ -257,8 +257,7 @@ mod tests { assert_eq!( expected, actual, - "\n\nEXPECTED:\n{:#?}\nACTUAL:\n{:#?}\n", - expected, actual + "\n\nEXPECTED:\n{expected:#?}\nACTUAL:\n{actual:#?}\n" ); } } diff --git a/sql/src/provider.rs b/sql/src/provider.rs index 40e8456f04..81eb931720 100644 --- a/sql/src/provider.rs +++ b/sql/src/provider.rs @@ -389,13 +389,13 @@ impl CatalogProvider for CatalogProviderAdapter { /// [`Debug`] or implement [`std::fmt::Display`]. fn format_table_reference(table_ref: TableReference) -> String { match table_ref { - TableReference::Bare { table } => format!("table:{}", table), - TableReference::Partial { schema, table } => format!("schema:{}, table:{}", schema, table), + TableReference::Bare { table } => format!("table:{table}"), + TableReference::Partial { schema, table } => format!("schema:{schema}, table:{table}"), TableReference::Full { catalog, schema, table, - } => format!("catalog:{}, schema:{}, table:{}", catalog, schema, table), + } => format!("catalog:{catalog}, schema:{schema}, table:{table}"), } } diff --git a/src/bin/ceresdb-server.rs b/src/bin/ceresdb-server.rs index 9619dc1845..2696440591 100644 --- a/src/bin/ceresdb-server.rs +++ b/src/bin/ceresdb-server.rs @@ -21,8 +21,7 @@ fn fetch_version() -> String { let build_time = option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("NONE"); format!( - "\nCeresDB Version: {}\nGit branch: {}\nGit commit: {}\nBuild: {}", - build_version, git_branch, git_commit_id, build_time + "\nCeresDB Version: {build_version}\nGit branch: {git_branch}\nGit commit: {git_commit_id}\nBuild: {build_time}" ) } diff --git a/table_engine/src/partition/mod.rs b/table_engine/src/partition/mod.rs index 48527b0fa0..6ae9d5fc96 100644 --- a/table_engine/src/partition/mod.rs +++ b/table_engine/src/partition/mod.rs @@ -234,8 +234,7 @@ impl TryFrom for PartitionInfo { pub fn format_sub_partition_table_name(table_name: &str, partition_name: &str) -> String { format!( - "{}{}_{}", - PARTITION_TABLE_PREFIX, table_name, partition_name + "{PARTITION_TABLE_PREFIX}{table_name}_{partition_name}" ) } diff --git a/table_engine/src/partition/rule/df_adapter/mod.rs b/table_engine/src/partition/rule/df_adapter/mod.rs index 50958755c7..65328aa113 100644 --- a/table_engine/src/partition/rule/df_adapter/mod.rs +++ b/table_engine/src/partition/rule/df_adapter/mod.rs @@ -54,8 +54,7 @@ impl DfPartitionRuleAdapter { PartitionInfo::Key(_) => Ok(Box::new(KeyExtractor)), PartitionInfo::Hash(_) => BuildPartitionRule { msg: format!( - "unsupported partition strategy, strategy:{:?}", - partition_info + "unsupported partition strategy, strategy:{partition_info:?}" ), } .fail(), @@ -158,7 +157,7 @@ mod tests { .unwrap(); // Expected - let all_partitions = (0..partition_num).into_iter().collect::>(); + let all_partitions = (0..partition_num).collect::>(); assert_eq!(partitions_1, all_partitions); assert_eq!(partitions_2, all_partitions); } diff --git a/table_engine/src/partition/rule/factory.rs b/table_engine/src/partition/rule/factory.rs index 132f008fb1..ec5ed9bf63 100644 --- a/table_engine/src/partition/rule/factory.rs +++ b/table_engine/src/partition/rule/factory.rs @@ -21,8 +21,7 @@ impl PartitionRuleFactory { PartitionInfo::Key(key_info) => Self::create_key_rule(key_info, schema), _ => BuildPartitionRule { msg: format!( - "unsupported partition strategy, strategy:{:?}", - partition_info + "unsupported partition strategy, strategy:{partition_info:?}" ), } .fail(), @@ -47,8 +46,7 @@ impl PartitionRuleFactory { .column_with_name(col.as_str()) .with_context(|| BuildPartitionRule { msg: format!( - "column in key partition info not found in schema, column:{}", - col + "column in key partition info not found in schema, column:{col}" ), }) .map(|col_schema| ColumnWithType::new(col, col_schema.data_type)) diff --git a/table_engine/src/partition/rule/key.rs b/table_engine/src/partition/rule/key.rs index ed1eba891a..1718f351c4 100644 --- a/table_engine/src/partition/rule/key.rs +++ b/table_engine/src/partition/rule/key.rs @@ -164,7 +164,7 @@ impl PartitionRule for KeyRule { } fn locate_partitions_for_read(&self, filters: &[PartitionFilter]) -> Result> { - let all_partitions = (0..self.partition_num as usize).into_iter().collect(); + let all_partitions = (0..self.partition_num).collect(); // Filters are empty. if filters.is_empty() { @@ -221,7 +221,7 @@ fn expand_partition_keys_group( PartitionCondition::In(datums) => datums.clone(), _ => { return Internal { - msg: format!("invalid partition filter found, filter:{:?},", filter), + msg: format!("invalid partition filter found, filter:{filter:?},"), } .fail() } diff --git a/table_engine/src/predicate.rs b/table_engine/src/predicate.rs index f76cff013e..34f9fc8f58 100644 --- a/table_engine/src/predicate.rs +++ b/table_engine/src/predicate.rs @@ -94,7 +94,7 @@ impl TryFrom<&Predicate> for ceresdbproto::remote_engine::Predicate { let expr = expr .to_bytes() .context(PredicateToPb { - msg: format!("convert expr failed, expr:{}", expr), + msg: format!("convert expr failed, expr:{expr}"), })? .to_vec(); exprs.push(expr); diff --git a/table_engine/src/provider.rs b/table_engine/src/provider.rs index d57c48d21c..bfe7b06a10 100644 --- a/table_engine/src/provider.rs +++ b/table_engine/src/provider.rs @@ -61,7 +61,7 @@ impl ExtensionOptions for CeresdbOptions { "request_id" => { self.request_id = value.parse::().map_err(|e| { DataFusionError::External( - format!("could not parse request_id, input:{}, err:{:?}", value, e).into(), + format!("could not parse request_id, input:{value}, err:{e:?}").into(), ) })? } @@ -69,15 +69,14 @@ impl ExtensionOptions for CeresdbOptions { self.request_timeout = Some(value.parse::().map_err(|e| { DataFusionError::External( format!( - "could not parse request_timeout, input:{}, err:{:?}", - value, e + "could not parse request_timeout, input:{value}, err:{e:?}" ) .into(), ) })?) } _ => Err(DataFusionError::External( - format!("could not find key, key:{}", key).into(), + format!("could not find key, key:{key}").into(), ))?, } Ok(()) @@ -163,8 +162,7 @@ impl TableProviderAdapter { projected_schema: ProjectedSchema::new(self.read_schema.clone(), projection.cloned()) .map_err(|e| { DataFusionError::Internal(format!( - "Invalid projection, plan:{:?}, projection:{:?}, err:{:?}", - self, projection, e + "Invalid projection, plan:{self:?}, projection:{projection:?}, err:{e:?}" )) })?, table: self.table.clone(), @@ -278,16 +276,14 @@ impl ScanStreamState { fn take_stream(&mut self, index: usize) -> Result { if let Some(e) = &self.err { return Err(DataFusionError::Execution(format!( - "Failed to read table, partition:{}, err:{}", - index, e + "Failed to read table, partition:{index}, err:{e}" ))); } // TODO(yingwen): Return an empty stream if index is out of bound. self.streams[index].take().ok_or_else(|| { DataFusionError::Execution(format!( - "Read partition multiple times is not supported, partition:{}", - index + "Read partition multiple times is not supported, partition:{index}" )) }) } @@ -369,8 +365,7 @@ impl ExecutionPlan for ScanTable { _: Vec>, ) -> Result> { Err(DataFusionError::Internal(format!( - "Children cannot be replaced in {:?}", - self + "Children cannot be replaced in {self:?}" ))) } diff --git a/tools/src/bin/sst-convert.rs b/tools/src/bin/sst-convert.rs index 41ca453a2f..cc1076deb3 100644 --- a/tools/src/bin/sst-convert.rs +++ b/tools/src/bin/sst-convert.rs @@ -63,7 +63,7 @@ fn main() { let rt2 = rt.clone(); rt.block_on(async move { if let Err(e) = run(args, rt2).await { - eprintln!("Convert failed, err:{}", e); + eprintln!("Convert failed, err:{e}"); } }); } @@ -119,7 +119,7 @@ async fn run(args: Args, runtime: Arc) -> Result<()> { .write(RequestId::next_id(), &sst_meta, sst_stream) .await?; - println!("Write success, info:{:?}", sst_info); + println!("Write success, info:{sst_info:?}"); Ok(()) } diff --git a/wal/src/message_queue_impl/encoding.rs b/wal/src/message_queue_impl/encoding.rs index 3a285b584c..8613981978 100644 --- a/wal/src/message_queue_impl/encoding.rs +++ b/wal/src/message_queue_impl/encoding.rs @@ -92,13 +92,13 @@ define_result!(Error); /// Generate wal data topic name #[allow(unused)] pub fn format_wal_data_topic_name(namespace: &str, region_id: u64) -> String { - format!("{}_data_{}", namespace, region_id) + format!("{namespace}_data_{region_id}") } /// Generate wal meta topic name #[allow(unused)] pub fn format_wal_meta_topic_name(namespace: &str, region_id: u64) -> String { - format!("{}_meta_{}", namespace, region_id) + format!("{namespace}_meta_{region_id}") } #[allow(unused)] diff --git a/wal/src/message_queue_impl/log_cleaner.rs b/wal/src/message_queue_impl/log_cleaner.rs index dd2d495642..b1c4f05a85 100644 --- a/wal/src/message_queue_impl/log_cleaner.rs +++ b/wal/src/message_queue_impl/log_cleaner.rs @@ -79,7 +79,7 @@ impl LogCleaner { region_id: self.region_id, topic: self.log_topic.clone(), msg: format!("the new safe delete offset should be larger than the last deleted offset, inner state inconsistent, - safe delete offset:{}, last deleted offset:{}", safe_delete_offset, last_deleted_offset), + safe delete offset:{safe_delete_offset}, last deleted offset:{last_deleted_offset}"), }); if safe_delete_offset == last_deleted_offset { diff --git a/wal/src/message_queue_impl/region.rs b/wal/src/message_queue_impl/region.rs index aa3eb720fc..7175d62c2d 100644 --- a/wal/src/message_queue_impl/region.rs +++ b/wal/src/message_queue_impl/region.rs @@ -259,7 +259,7 @@ impl Region { ensure!(returned_high_watermark == high_watermark, OpenNoCause { namespace , region_id, msg: format!( "failed while recover from meta, high watermark shouldn't changed while opening region, - origin high watermark:{}, returned high watermark:{}", high_watermark, returned_high_watermark) + origin high watermark:{high_watermark}, returned high watermark:{returned_high_watermark}") }); // Decode and apply it to builder. @@ -347,8 +347,7 @@ impl Region { })?; ensure!(start_offset <= high_watermark, OpenNoCause { namespace , region_id, msg: format!( - "failed while recover from log, start offset should be less than or equal to high watermark, now are:{} and {}", - start_offset, high_watermark) + "failed while recover from log, start offset should be less than or equal to high watermark, now are:{start_offset} and {high_watermark}") }); if start_offset == high_watermark { @@ -377,7 +376,7 @@ impl Region { ensure!(returned_high_watermark == high_watermark, OpenNoCause { namespace , region_id, msg: format!( "failed while recover from log, high watermark shouldn't changed while opening region, - origin high watermark:{}, returned high watermark:{}", high_watermark, returned_high_watermark) + origin high watermark:{high_watermark}, returned high watermark:{returned_high_watermark}") }); // Decode and apply it to builder. @@ -418,8 +417,7 @@ impl Region { namespace, region_id, msg: format!( - "failed while recover from log, region meta delta:{:?}", - region_meta_delta + "failed while recover from log, region meta delta:{region_meta_delta:?}" ), })?; @@ -502,8 +500,7 @@ impl Region { region_id: inner.region_context.region_id(), table_id: None, msg: format!( - "failed while creating iterator, scan range:{:?}", - scan_range + "failed while creating iterator, scan range:{scan_range:?}" ), })?, )) @@ -562,8 +559,7 @@ impl Region { region_id: inner.region_context.region_id(), table_id: Some(table_id), msg: format!( - "failed while creating iterator, scan range:{:?}", - scan_range + "failed while creating iterator, scan range:{scan_range:?}" ), })?, )) @@ -820,8 +816,7 @@ impl MessageQueueLogIterator { ensure!(*terminate_offset <= high_watermark, ScanNoCause { region_id: self.region_id, table_id: self.table_id, - msg: format!("the setting terminate offset is invalid, it should be less than or equals to high watermark, terminate offset:{}, high watermark:{}", - terminate_offset, high_watermark), + msg: format!("the setting terminate offset is invalid, it should be less than or equals to high watermark, terminate offset:{terminate_offset}, high watermark:{high_watermark}"), }); if message_and_offset.offset + 1 == *terminate_offset { diff --git a/wal/src/message_queue_impl/region_context.rs b/wal/src/message_queue_impl/region_context.rs index d2423fe6d7..3b611e5997 100644 --- a/wal/src/message_queue_impl/region_context.rs +++ b/wal/src/message_queue_impl/region_context.rs @@ -145,8 +145,7 @@ impl RegionContext { region_id: self.region_id, table_id, msg: format!( - "table not found while mark it delete to, table id:{}, sequence number:{}", - table_id, sequence_num + "table not found while mark it delete to, table id:{table_id}, sequence number:{sequence_num}" ), })?; @@ -494,7 +493,7 @@ impl RegionContextBuilder { .table_metas .insert(entry.table_id, entry.clone().into()); ensure!(old_meta.is_none(), - Build { msg: format!("apply snapshot failed, shouldn't exist duplicated entry in snapshot, duplicated entry:{:?}", entry) } + Build { msg: format!("apply snapshot failed, shouldn't exist duplicated entry in snapshot, duplicated entry:{entry:?}") } ); } @@ -679,7 +678,7 @@ impl TableWriter { WriteNoCause { region_id, table_id, - msg: format!("invalid offset range, offset range:{:?}", offset_range), + msg: format!("invalid offset range, offset range:{offset_range:?}"), } ); diff --git a/wal/src/table_kv_impl/encoding.rs b/wal/src/table_kv_impl/encoding.rs index 66c128c6d3..7c44f10f6d 100644 --- a/wal/src/table_kv_impl/encoding.rs +++ b/wal/src/table_kv_impl/encoding.rs @@ -38,12 +38,12 @@ pub fn scan_request_for_prefix(prefix: &str) -> ScanRequest { #[inline] pub fn format_namespace_key(namespace: &str) -> String { - format!("{}/{}", META_NAMESPACE_PREFIX, namespace) + format!("{META_NAMESPACE_PREFIX}/{namespace}") } #[inline] pub fn bucket_key_prefix(namespace: &str) -> String { - format!("{}/{}/", META_BUCKET_PREFIX, namespace) + format!("{META_BUCKET_PREFIX}/{namespace}/") } pub fn format_timed_bucket_key( @@ -69,12 +69,12 @@ pub fn format_timed_bucket_key( } pub fn format_permanent_bucket_key(namespace: &str) -> String { - format!("{}/{}/permanent", META_BUCKET_PREFIX, namespace) + format!("{META_BUCKET_PREFIX}/{namespace}/permanent") } #[inline] pub fn format_table_unit_meta_name(namespace: &str, shard_id: usize) -> String { - format!("table_unit_meta_{}_{:0>6}", namespace, shard_id) + format!("table_unit_meta_{namespace}_{shard_id:0>6}") } #[inline] @@ -100,12 +100,12 @@ pub fn format_timed_wal_name( #[inline] pub fn format_permanent_wal_name(namespace: &str, shard_id: usize) -> String { - format!("wal_{}_permanent_{:0>6}", namespace, shard_id) + format!("wal_{namespace}_permanent_{shard_id:0>6}") } #[inline] pub fn format_table_unit_key(table_id: TableId) -> String { - format!("{}/{}", TABLE_UNIT_META_PREFIX, table_id) + format!("{TABLE_UNIT_META_PREFIX}/{table_id}") } #[cfg(test)] diff --git a/wal/src/table_kv_impl/namespace.rs b/wal/src/table_kv_impl/namespace.rs index 2b43300e7e..07a8fc5fb1 100644 --- a/wal/src/table_kv_impl/namespace.rs +++ b/wal/src/table_kv_impl/namespace.rs @@ -1223,9 +1223,7 @@ impl BucketSet { assert!( old_bucket.is_none(), - "Try to overwrite old bucket, old_bucket:{:?}, new_bucket:{:?}", - old_bucket, - bucket, + "Try to overwrite old bucket, old_bucket:{old_bucket:?}, new_bucket:{bucket:?}", ); } diff --git a/wal/src/table_kv_impl/table_unit.rs b/wal/src/table_kv_impl/table_unit.rs index c7b5f471c0..56357423bd 100644 --- a/wal/src/table_kv_impl/table_unit.rs +++ b/wal/src/table_kv_impl/table_unit.rs @@ -458,8 +458,7 @@ impl TableUnit { table_id, msg: format!( "found last sequence, but last sequence + 1 < start sequence, - last sequence:{}, start sequence:{}", - sequence, start_sequence, + last sequence:{sequence}, start sequence:{start_sequence}", ) } ); From 572455e54b1bc62e7b216e8e4af34c6a38c59e63 Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 13:57:21 +0800 Subject: [PATCH 07/11] auto fix --- Makefile | 4 +++- analytic_engine/src/manifest/details.rs | 2 +- analytic_engine/src/sampler.rs | 3 +-- analytic_engine/src/sst/parquet/async_reader.rs | 2 -- analytic_engine/src/sst/parquet/hybrid.rs | 2 +- server/src/grpc/remote_engine_service/mod.rs | 2 +- server/src/grpc/storage_service/mod.rs | 15 ++++++++------- sql/src/planner.rs | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index 3254aef115..61af139a16 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,9 @@ check-license: cd $(DIR); sh scripts/check-license.sh clippy: - cd $(DIR); cargo clippy --all-targets --all-features --workspace -- -D warnings + cd $(DIR); cargo clippy --all-targets --all-features --workspace -- -D warnings \ + -A clippy::result_large_err -A clippy::box_default -A clippy::extra-unused-lifetimes \ + -A clippy::only-used-in-recursion # test with address sanitizer asan-test: diff --git a/analytic_engine/src/manifest/details.rs b/analytic_engine/src/manifest/details.rs index fabcaa9514..06efb17390 100644 --- a/analytic_engine/src/manifest/details.rs +++ b/analytic_engine/src/manifest/details.rs @@ -786,7 +786,7 @@ mod tests { } fn table_name_from_id(table_id: TableId) -> String { - format!("table_{:?}", table_id) + format!("table_{table_id:?}") } async fn open_manifest(&self) -> ManifestImpl { diff --git a/analytic_engine/src/sampler.rs b/analytic_engine/src/sampler.rs index 304d052327..86729dba2b 100644 --- a/analytic_engine/src/sampler.rs +++ b/analytic_engine/src/sampler.rs @@ -257,8 +257,7 @@ mod tests { assert_eq!( *expect, pick_duration(*interval).as_millis() as u64, - "Case {}", - i + "Case {i}" ); } } diff --git a/analytic_engine/src/sst/parquet/async_reader.rs b/analytic_engine/src/sst/parquet/async_reader.rs index 70d2efb327..9753028723 100644 --- a/analytic_engine/src/sst/parquet/async_reader.rs +++ b/analytic_engine/src/sst/parquet/async_reader.rs @@ -752,7 +752,6 @@ mod tests { fn gen_test_data(amount: usize) -> Vec { (0..amount) - .into_iter() .map(|_| rand::random::()) .collect() } @@ -768,7 +767,6 @@ mod tests { let channel_cap_per_sub_reader = 10; let reader_num = 5; let (tx_group, rx_group): (Vec<_>, Vec<_>) = (0..reader_num) - .into_iter() .map(|_| mpsc::channel::(channel_cap_per_sub_reader)) .unzip(); diff --git a/analytic_engine/src/sst/parquet/hybrid.rs b/analytic_engine/src/sst/parquet/hybrid.rs index 9a4f60cb67..bc01017fbb 100644 --- a/analytic_engine/src/sst/parquet/hybrid.rs +++ b/analytic_engine/src/sst/parquet/hybrid.rs @@ -740,7 +740,7 @@ mod tests { true, )))) .len(2) - .add_buffer(Buffer::from_slice_ref(&offsets)) + .add_buffer(Buffer::from_slice_ref(offsets)) .add_child_data(string_data.data().to_owned()) .build() .unwrap(); diff --git a/server/src/grpc/remote_engine_service/mod.rs b/server/src/grpc/remote_engine_service/mod.rs index 5b1675b461..1bdc655b6f 100644 --- a/server/src/grpc/remote_engine_service/mod.rs +++ b/server/src/grpc/remote_engine_service/mod.rs @@ -70,7 +70,7 @@ impl RemoteEngineServiceImpl { }) }); let tx = tx.clone(); - let _ = self.runtimes.read_runtime.spawn(async move { + self.runtimes.read_runtime.spawn(async move { while let Some(batch) = stream.next().await { if let Err(e) = tx.send(batch).await { error!("Failed to send handler result, err:{}.", e); diff --git a/server/src/grpc/storage_service/mod.rs b/server/src/grpc/storage_service/mod.rs index a46f98f430..fb54258a87 100644 --- a/server/src/grpc/storage_service/mod.rs +++ b/server/src/grpc/storage_service/mod.rs @@ -21,7 +21,7 @@ use common_types::{ datum::DatumKind, schema::{Builder as SchemaBuilder, Schema, TSID_COLUMN}, }; -use common_util::{error::BoxError, runtime::JoinHandle, time::InstantExt}; +use common_util::{error::BoxError, time::InstantExt}; use futures::stream::{self, BoxStream, StreamExt}; use http::StatusCode; use interpreters::interpreter::Output; @@ -36,7 +36,10 @@ use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::metadata::{KeyAndValueRef, MetadataMap}; -use self::sql_query::{QueryResponseBuilder, QueryResponseWriter}; +use self::{ + error::Error, + sql_query::{QueryResponseBuilder, QueryResponseWriter}, +}; use crate::{ grpc::{ forward::ForwarderRef, @@ -317,7 +320,7 @@ impl StorageServiceImpl { let resp_compress_min_length = self.resp_compress_min_length; let (tx, rx) = mpsc::channel(STREAM_QUERY_CHANNEL_LEN); - let _: JoinHandle> = self.runtimes.read_runtime.spawn(async move { + self.runtimes.read_runtime.spawn(async move { let handler_ctx = HandlerContext::new( header, router, @@ -357,7 +360,7 @@ impl StorageServiceImpl { } } - Ok(()) + Ok::<(), Error>(()) }); GRPC_HANDLER_DURATION_HISTOGRAM_VEC @@ -530,9 +533,7 @@ fn build_schema_from_write_table_request( .as_ref() .with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!( - "Tag({tag_name}) value type is not supported, table_name:{table}" - ), + msg: format!("Tag({tag_name}) value type is not supported, table_name:{table}"), })?; let data_type = try_get_data_type_from_value(tag_value)?; diff --git a/sql/src/planner.rs b/sql/src/planner.rs index 3ee78a7ab4..72fa98e0c1 100644 --- a/sql/src/planner.rs +++ b/sql/src/planner.rs @@ -990,7 +990,7 @@ fn parse_column(col: &ColumnDef) -> Result { // Ensure default value option of columns are valid. fn ensure_column_default_value_valid<'a, P: MetaProvider>( columns: &[ColumnSchema], - meta_provider: &ContextProviderAdapter<'a, P>, + meta_provider: &ContextProviderAdapter<'_, P>, ) -> Result<()> { let df_planner = SqlToRel::new(meta_provider); let mut df_schema = DFSchema::empty(); From 7c43380067aedb592b33f394bbc14e67c0082d49 Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 14:10:50 +0800 Subject: [PATCH 08/11] create SqlToRel with false ident --- .../cases/local/00_dummy/select_1.result | 2 +- sql/src/planner.rs | 26 ++++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/integration_tests/cases/local/00_dummy/select_1.result b/integration_tests/cases/local/00_dummy/select_1.result index 52be9a215e..efda875069 100644 --- a/integration_tests/cases/local/00_dummy/select_1.result +++ b/integration_tests/cases/local/00_dummy/select_1.result @@ -22,7 +22,7 @@ Boolean(false), SELECT NOT(1); -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to execute interpreter, sql: SELECT NOT(1);. Caused by: Failed to execute select, err:Failed to execute logical plan, err:Failed to collect record batch stream, err:Stream error, msg:convert from arrow record batch, err:External error: Internal error: NOT 'Literal { value: Int64(1) }' can't be evaluated because the expression's type is Int64, not boolean or NULL. This was likely caused by a bug in DataFusion's code and we would welcome that you file an bug report in our issue tracker" }) +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to execute interpreter, sql: SELECT NOT(1);. Caused by: Failed to execute select, err:Failed to execute logical plan, err:Failed to collect record batch stream, err:Stream error, msg:convert from arrow record batch, err:Internal error: NOT 'Literal { value: Int64(1) }' can't be evaluated because the expression's type is Int64, not boolean or NULL. This was likely caused by a bug in DataFusion's code and we would welcome that you file an bug report in our issue tracker" }) SELECT TRUE; diff --git a/sql/src/planner.rs b/sql/src/planner.rs index 72fa98e0c1..f33d229715 100644 --- a/sql/src/planner.rs +++ b/sql/src/planner.rs @@ -27,7 +27,7 @@ use datafusion::{ error::DataFusionError, optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext}, physical_expr::{create_physical_expr, execution_props::ExecutionProps}, - sql::planner::{PlannerContext, SqlToRel}, + sql::planner::{ParserOptions, PlannerContext, SqlToRel}, }; use log::{debug, trace}; use snafu::{ensure, Backtrace, OptionExt, ResultExt, Snafu}; @@ -334,7 +334,13 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { } fn sql_statement_to_datafusion_plan(self, sql_stmt: SqlStatement) -> Result { - let df_planner = SqlToRel::new(&self.meta_provider); + let df_planner = SqlToRel::new_with_options( + &self.meta_provider, + ParserOptions { + parse_float_as_decimal: false, + enable_ident_normalization: false, + }, + ); let df_plan = df_planner .sql_statement_to_plan(sql_stmt) @@ -642,7 +648,13 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { .collect::>(); let df_schema = DFSchema::new_with_metadata(df_fields, HashMap::new()) .context(CreateDatafusionSchema)?; - let df_planner = SqlToRel::new(&self.meta_provider); + let df_planner = SqlToRel::new_with_options( + &self.meta_provider, + ParserOptions { + parse_float_as_decimal: false, + enable_ident_normalization: false, + }, + ); // Index in insert values stmt of each column in table schema let mut column_index_in_insert = Vec::with_capacity(schema.num_columns()); @@ -992,7 +1004,13 @@ fn ensure_column_default_value_valid<'a, P: MetaProvider>( columns: &[ColumnSchema], meta_provider: &ContextProviderAdapter<'_, P>, ) -> Result<()> { - let df_planner = SqlToRel::new(meta_provider); + let df_planner = SqlToRel::new_with_options( + meta_provider, + ParserOptions { + parse_float_as_decimal: false, + enable_ident_normalization: false, + }, + ); let mut df_schema = DFSchema::empty(); let mut arrow_schema = ArrowSchema::empty(); From c18579703a34ffeed07880793babf9af35254e4d Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 21:12:18 +0800 Subject: [PATCH 09/11] fix table reference --- server/src/limiter.rs | 3 +-- sql/src/planner.rs | 35 +++++++++++++++++++++++++++-------- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/server/src/limiter.rs b/server/src/limiter.rs index 98d25960ca..b47723f255 100644 --- a/server/src/limiter.rs +++ b/server/src/limiter.rs @@ -2,7 +2,6 @@ use std::{collections::HashSet, sync::RwLock}; -use datafusion::catalog::TableReference; use datafusion_expr::logical_plan::LogicalPlan; use serde::Serialize; use serde_derive::Deserialize; @@ -106,7 +105,7 @@ impl Limiter { .try_for_each(|blocked_table| { if query .tables - .get(TableReference::from(blocked_table.as_str())) + .get(sql::planner::get_table_ref(blocked_table)) .is_some() { BlockedTable { diff --git a/sql/src/planner.rs b/sql/src/planner.rs index f33d229715..3c879cd9c7 100644 --- a/sql/src/planner.rs +++ b/sql/src/planner.rs @@ -3,6 +3,7 @@ //! Planner converts a SQL AST into logical plans use std::{ + borrow::Cow, collections::{BTreeMap, HashMap}, convert::TryFrom, mem, @@ -14,6 +15,7 @@ use arrow::{ datatypes::{DataType as ArrowDataType, Field as ArrowField, Schema as ArrowSchema}, error::ArrowError, }; +use catalog::consts::{DEFAULT_CATALOG, DEFAULT_SCHEMA}; use common_types::{ column_schema::{self, ColumnSchema}, datum::{Datum, DatumKind}, @@ -27,7 +29,10 @@ use datafusion::{ error::DataFusionError, optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext}, physical_expr::{create_physical_expr, execution_props::ExecutionProps}, - sql::planner::{ParserOptions, PlannerContext, SqlToRel}, + sql::{ + planner::{ParserOptions, PlannerContext, SqlToRel}, + ResolvedTableReference, + }, }; use log::{debug, trace}; use snafu::{ensure, Backtrace, OptionExt, ResultExt, Snafu}; @@ -550,10 +555,9 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { // ensure default value options are valid ensure_column_default_value_valid(table_schema.columns(), &self.meta_provider)?; - // TODO(yingwen): Maybe support create table on other schema? + // TODO: support create table on other catalog/schema let table_name = stmt.table_name.to_string(); - let table_ref = TableReference::from(table_name.as_str()); - // Now we only takes the table name and ignore the schema and catalog name + let table_ref = get_table_ref(&table_name); let table = table_ref.table().to_string(); let plan = CreateTablePlan { @@ -572,6 +576,8 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { } fn drop_table_to_plan(&self, stmt: DropTable) -> Result { + debug!("Drop table to plan, stmt:{:?}", stmt); + let (table_name, partition_info) = if let Some(table) = self.find_table(&stmt.table_name.to_string())? { let table_name = table.name().to_string(); @@ -587,12 +593,15 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { .fail(); }; - Ok(Plan::Drop(DropTablePlan { + let plan = DropTablePlan { engine: stmt.engine, if_exists: stmt.if_exists, table: table_name, partition_info, - })) + }; + debug!("Drop table to plan, plan:{:?}", plan); + + Ok(Plan::Drop(plan)) } fn describe_table_to_plan(&self, stmt: DescribeTable) -> Result { @@ -773,8 +782,7 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { } fn find_table(&self, table_name: &str) -> Result> { - let table_ref = TableReference::from(table_name); - + let table_ref = get_table_ref(table_name); self.meta_provider .table(table_ref) .context(MetaProviderFindTable) @@ -1071,6 +1079,17 @@ fn ensure_column_default_value_valid<'a, P: MetaProvider>( Ok(()) } +// Workaroud for TableReference::from(&str) +// it will always convert table to lowercase when not quoted +// TODO: support catalog/schema +pub fn get_table_ref(table_name: &str) -> TableReference { + TableReference::from(ResolvedTableReference { + catalog: Cow::from(DEFAULT_CATALOG), + schema: Cow::from(DEFAULT_SCHEMA), + table: Cow::from(table_name), + }) +} + #[cfg(test)] mod tests { From 709870f2994824f20e1eba8b3f604f4761752ecd Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Tue, 14 Feb 2023 21:19:44 +0800 Subject: [PATCH 10/11] fix rust client --- Cargo.lock | 2 +- integration_tests/Cargo.toml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 243a798b5a..3f30ee184c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1084,7 +1084,7 @@ dependencies = [ [[package]] name = "ceresdb-client-rs" version = "0.1.0" -source = "git+https://github.com/jiacai2050/ceresdb-client-rs.git?rev=80411cd1597105c19a1a41582b929c58b8bae11f#80411cd1597105c19a1a41582b929c58b8bae11f" +source = "git+https://github.com/CeresDB/ceresdb-client-rs.git?rev=69948b9963597ccdb7c73756473393972dfdebd3#69948b9963597ccdb7c73756473393972dfdebd3" dependencies = [ "arrow 23.0.0", "async-trait", diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index 25d26d0ee4..7bd7dd9769 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -8,7 +8,6 @@ workspace = true [dependencies] anyhow = "1.0.58" async-trait = "0.1" -# ceresdb-client-rs = { git = "https://github.com/CeresDB/ceresdb-client-rs.git", rev = "5fbd1a1526c3ddd25bb1f38f63f869c892052f7c" } -ceresdb-client-rs = { git = "https://github.com/jiacai2050/ceresdb-client-rs.git", rev = "80411cd1597105c19a1a41582b929c58b8bae11f" } +ceresdb-client-rs = { git = "https://github.com/CeresDB/ceresdb-client-rs.git", rev = "69948b9963597ccdb7c73756473393972dfdebd3" } sqlness = "0.3" tokio = { workspace = true } From ee489d9eb0403d6a285c5f4bc4b21e18de409e8a Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Wed, 15 Feb 2023 07:25:31 +0800 Subject: [PATCH 11/11] fix fmt --- analytic_engine/src/compaction/mod.rs | 5 +- .../src/sst/parquet/async_reader.rs | 4 +- analytic_engine/src/sst/parquet/hybrid.rs | 4 +- benchmarks/src/config.rs | 4 +- catalog_impls/src/table_based.rs | 4 +- catalog_impls/src/volatile.rs | 4 +- common_util/src/avro.rs | 8 +- components/table_kv/src/config.rs | 2 - .../local/03_dml/case_insensitive.result | 99 ------------------- .../cases/local/03_dml/case_insensitive.sql | 51 ---------- .../cases/local/03_dml/case_sensitive.result | 99 +++++++++++++++++++ .../cases/local/03_dml/case_sensitive.sql | 51 ++++++++++ integration_tests/src/database.rs | 4 +- .../src/table_manipulator/meta_based.rs | 4 +- router/src/cluster_based.rs | 4 +- server/src/grpc/meta_event_service/mod.rs | 20 +--- server/src/grpc/storage_service/prom_query.rs | 4 +- server/src/grpc/storage_service/write.rs | 8 +- sql/src/parser.rs | 4 +- sql/src/planner.rs | 29 ++---- table_engine/src/partition/mod.rs | 4 +- .../src/partition/rule/df_adapter/mod.rs | 4 +- table_engine/src/partition/rule/factory.rs | 4 +- table_engine/src/provider.rs | 5 +- 24 files changed, 186 insertions(+), 243 deletions(-) delete mode 100644 integration_tests/cases/local/03_dml/case_insensitive.result delete mode 100644 integration_tests/cases/local/03_dml/case_insensitive.sql create mode 100644 integration_tests/cases/local/03_dml/case_sensitive.result create mode 100644 integration_tests/cases/local/03_dml/case_sensitive.sql diff --git a/analytic_engine/src/compaction/mod.rs b/analytic_engine/src/compaction/mod.rs index 9b8506ac38..8b5979c269 100644 --- a/analytic_engine/src/compaction/mod.rs +++ b/analytic_engine/src/compaction/mod.rs @@ -57,8 +57,7 @@ pub enum Error { InvalidOption { error: String, backtrace: Backtrace }, } -#[derive(Debug, Clone, Copy, Deserialize, PartialEq)] -#[derive(Default)] +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Default)] pub enum CompactionStrategy { #[default] Default, @@ -115,8 +114,6 @@ impl Default for TimeWindowCompactionOptions { } } - - const BUCKET_LOW_KEY: &str = "compaction_bucket_low"; const BUCKET_HIGH_KEY: &str = "compaction_bucket_high"; const MIN_THRESHOLD_KEY: &str = "compaction_min_threshold"; diff --git a/analytic_engine/src/sst/parquet/async_reader.rs b/analytic_engine/src/sst/parquet/async_reader.rs index 9753028723..3e22842161 100644 --- a/analytic_engine/src/sst/parquet/async_reader.rs +++ b/analytic_engine/src/sst/parquet/async_reader.rs @@ -751,9 +751,7 @@ mod tests { } fn gen_test_data(amount: usize) -> Vec { - (0..amount) - .map(|_| rand::random::()) - .collect() + (0..amount).map(|_| rand::random::()).collect() } // We mock a thread model same as the one in `ThreadedReader` to check its diff --git a/analytic_engine/src/sst/parquet/hybrid.rs b/analytic_engine/src/sst/parquet/hybrid.rs index bc01017fbb..6331a6239e 100644 --- a/analytic_engine/src/sst/parquet/hybrid.rs +++ b/analytic_engine/src/sst/parquet/hybrid.rs @@ -395,9 +395,7 @@ impl ListArrayBuilder { let start = array.value_offsets()[slice_arg.offset]; let end = array.value_offsets()[slice_arg.offset + slice_arg.length]; - for i in - slice_arg.offset..(slice_arg.offset + slice_arg.length) - { + for i in slice_arg.offset..(slice_arg.offset + slice_arg.length) { inner_length_so_far += array.value_length(i); inner_offsets.push(inner_length_so_far); } diff --git a/benchmarks/src/config.rs b/benchmarks/src/config.rs index 0b92b61e3e..5b38ee7d82 100644 --- a/benchmarks/src/config.rs +++ b/benchmarks/src/config.rs @@ -31,9 +31,7 @@ pub struct BenchConfig { pub fn bench_config_from_env() -> BenchConfig { let path = match env::var(BENCH_CONFIG_PATH_KEY) { Ok(v) => v, - Err(e) => panic!( - "Env {BENCH_CONFIG_PATH_KEY} is required to run benches, err:{e}." - ), + Err(e) => panic!("Env {BENCH_CONFIG_PATH_KEY} is required to run benches, err:{e}."), }; let mut toml_buf = String::new(); diff --git a/catalog_impls/src/table_based.rs b/catalog_impls/src/table_based.rs index 6c5f6e582c..71f9874e6b 100644 --- a/catalog_impls/src/table_based.rs +++ b/catalog_impls/src/table_based.rs @@ -894,7 +894,9 @@ impl Schema for SchemaImpl { .tables .read() .unwrap() - .tables_by_name.values().cloned() + .tables_by_name + .values() + .cloned() .collect()) } } diff --git a/catalog_impls/src/volatile.rs b/catalog_impls/src/volatile.rs index 06b9e1a49f..ff8fb7dfb5 100644 --- a/catalog_impls/src/volatile.rs +++ b/catalog_impls/src/volatile.rs @@ -67,7 +67,9 @@ impl Manager for ManagerImpl { fn all_catalogs(&self) -> manager::Result> { Ok(self - .catalogs.values().map(|v| v.clone() as CatalogRef) + .catalogs + .values() + .map(|v| v.clone() as CatalogRef) .collect()) } } diff --git a/common_util/src/avro.rs b/common_util/src/avro.rs index 2ed4f72ac7..de7cfe325a 100644 --- a/common_util/src/avro.rs +++ b/common_util/src/avro.rs @@ -236,9 +236,7 @@ pub fn row_group_to_avro_rows(row_group: RowGroup) -> Result>> { // Convert `Row` to `Record` in avro. let row = row_group.get_row(row_idx).unwrap(); let mut avro_record = Record::new(&avro_schema).context(RowGroupToAvroRowsNoCause { - msg: format!( - "new avro record with schema failed, schema:{avro_schema:?}" - ), + msg: format!("new avro record with schema failed, schema:{avro_schema:?}"), })?; for (col_idx, column_schema) in column_schemas.iter().enumerate() { @@ -250,9 +248,7 @@ pub fn row_group_to_avro_rows(row_group: RowGroup) -> Result>> { let row_bytes = avro_rs::to_avro_datum(&avro_schema, avro_record) .box_err() .context(RowGroupToAvroRowsWithCause { - msg: format!( - "new avro record with schema failed, schema:{avro_schema:?}" - ), + msg: format!("new avro record with schema failed, schema:{avro_schema:?}"), })?; rows.push(row_bytes); } diff --git a/components/table_kv/src/config.rs b/components/table_kv/src/config.rs index 6b0a0e4c1d..bc4323f955 100644 --- a/components/table_kv/src/config.rs +++ b/components/table_kv/src/config.rs @@ -79,8 +79,6 @@ pub enum ObLogLevel { Debug = 5, } - - impl From for ObLogLevel { fn from(level: u16) -> Self { match level { diff --git a/integration_tests/cases/local/03_dml/case_insensitive.result b/integration_tests/cases/local/03_dml/case_insensitive.result deleted file mode 100644 index 9104e25a37..0000000000 --- a/integration_tests/cases/local/03_dml/case_insensitive.result +++ /dev/null @@ -1,99 +0,0 @@ -DROP TABLE IF EXISTS case_insensitive_table1; - -affected_rows: 0 - -CREATE TABLE case_insensitive_table1 ( - ts timestamp NOT NULL, - value1 double, - timestamp KEY (ts)) ENGINE=Analytic -WITH( - enable_ttl='false' -); - -affected_rows: 0 - -INSERT INTO case_insensitive_table1 (ts, value1) - VALUES (1, 10), (2, 20), (3, 30); - -affected_rows: 3 - -SELECT - * -FROM - case_insensitive_table1; - -tsid,ts,value1, -UInt64(0),Timestamp(1),Double(10.0), -UInt64(0),Timestamp(2),Double(20.0), -UInt64(0),Timestamp(3),Double(30.0), - - -SELECT - * -FROM - CASE_INSENSITIVE_TABLE1; - -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SELECT \n * \n FROM \n CASE_INSENSITIVE_TABLE1;. Caused by: Failed to create plan, err:Failed to generate datafusion plan, err:Execution error: Table is not found, \"table:CASE_INSENSITIVE_TABLE1\"" }) - -SELECT - * -FROM - `case_insensitive_table1`; - -tsid,ts,value1, -UInt64(0),Timestamp(1),Double(10.0), -UInt64(0),Timestamp(2),Double(20.0), -UInt64(0),Timestamp(3),Double(30.0), - - -SELECT - * -FROM - `CASE_INSENSITIVE_TABLE1`; - -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SELECT \n * \n FROM \n `CASE_INSENSITIVE_TABLE1`;. Caused by: Failed to create plan, err:Failed to generate datafusion plan, err:Execution error: Table is not found, \"table:CASE_INSENSITIVE_TABLE1\"" }) - -SHOW CREATE TABLE case_insensitive_table1; - -Table,Create Table, -String("case_insensitive_table1"),String("CREATE TABLE `case_insensitive_table1` (`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `value1` double, PRIMARY KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='false', num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"), - - -SHOW CREATE TABLE CASE_INSENSITIVE_TABLE1; - -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SHOW CREATE TABLE CASE_INSENSITIVE_TABLE1;. Caused by: Failed to create plan, err:Table not found, table:CASE_INSENSITIVE_TABLE1" }) - -SHOW CREATE TABLE `case_insensitive_table1`; - -Table,Create Table, -String("case_insensitive_table1"),String("CREATE TABLE `case_insensitive_table1` (`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `value1` double, PRIMARY KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='false', num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"), - - -SHOW CREATE TABLE `CASE_INSENSITIVE_TABLE1`; - -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SHOW CREATE TABLE `CASE_INSENSITIVE_TABLE1`;. Caused by: Failed to create plan, err:Table not found, table:CASE_INSENSITIVE_TABLE1" }) - -DESC case_insensitive_table1; - -name,type,is_primary,is_nullable,is_tag, -String("tsid"),String("uint64"),Boolean(true),Boolean(false),Boolean(false), -String("ts"),String("timestamp"),Boolean(true),Boolean(false),Boolean(false), -String("value1"),String("double"),Boolean(false),Boolean(true),Boolean(false), - - -DESC CASE_INSENSITIVE_TABLE1; - -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: DESC CASE_INSENSITIVE_TABLE1;. Caused by: Failed to create plan, err:Table not found, table:CASE_INSENSITIVE_TABLE1" }) - -DESC `case_insensitive_table1`; - -name,type,is_primary,is_nullable,is_tag, -String("tsid"),String("uint64"),Boolean(true),Boolean(false),Boolean(false), -String("ts"),String("timestamp"),Boolean(true),Boolean(false),Boolean(false), -String("value1"),String("double"),Boolean(false),Boolean(true),Boolean(false), - - -DESC `CASE_INSENSITIVE_TABLE1`; - -Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: DESC `CASE_INSENSITIVE_TABLE1`;. Caused by: Failed to create plan, err:Table not found, table:CASE_INSENSITIVE_TABLE1" }) - diff --git a/integration_tests/cases/local/03_dml/case_insensitive.sql b/integration_tests/cases/local/03_dml/case_insensitive.sql deleted file mode 100644 index 823f0244af..0000000000 --- a/integration_tests/cases/local/03_dml/case_insensitive.sql +++ /dev/null @@ -1,51 +0,0 @@ - -DROP TABLE IF EXISTS case_insensitive_table1; - - -CREATE TABLE case_insensitive_table1 ( - ts timestamp NOT NULL, - value1 double, - timestamp KEY (ts)) ENGINE=Analytic -WITH( - enable_ttl='false' -); - -INSERT INTO case_insensitive_table1 (ts, value1) - VALUES (1, 10), (2, 20), (3, 30); - - -SELECT - * -FROM - case_insensitive_table1; - -SELECT - * -FROM - CASE_INSENSITIVE_TABLE1; - -SELECT - * -FROM - `case_insensitive_table1`; - -SELECT - * -FROM - `CASE_INSENSITIVE_TABLE1`; - -SHOW CREATE TABLE case_insensitive_table1; - -SHOW CREATE TABLE CASE_INSENSITIVE_TABLE1; - -SHOW CREATE TABLE `case_insensitive_table1`; - -SHOW CREATE TABLE `CASE_INSENSITIVE_TABLE1`; - -DESC case_insensitive_table1; - -DESC CASE_INSENSITIVE_TABLE1; - -DESC `case_insensitive_table1`; - -DESC `CASE_INSENSITIVE_TABLE1`; diff --git a/integration_tests/cases/local/03_dml/case_sensitive.result b/integration_tests/cases/local/03_dml/case_sensitive.result new file mode 100644 index 0000000000..ef39e3ef14 --- /dev/null +++ b/integration_tests/cases/local/03_dml/case_sensitive.result @@ -0,0 +1,99 @@ +DROP TABLE IF EXISTS case_SENSITIVE_table1; + +affected_rows: 0 + +CREATE TABLE case_SENSITIVE_table1 ( + ts timestamp NOT NULL, + VALUE1 double, + timestamp KEY (ts)) ENGINE=Analytic +WITH( + enable_ttl='false' +); + +affected_rows: 0 + +INSERT INTO case_SENSITIVE_table1 (ts, VALUE1) + VALUES (1, 10), (2, 20), (3, 30); + +affected_rows: 3 + +SELECT + * +FROM + case_SENSITIVE_table1; + +tsid,ts,VALUE1, +UInt64(0),Timestamp(1),Double(10.0), +UInt64(0),Timestamp(2),Double(20.0), +UInt64(0),Timestamp(3),Double(30.0), + + +SELECT + * +FROM + CASE_SENSITIVE_TABLE1; + +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SELECT \n * \n FROM \n CASE_SENSITIVE_TABLE1;. Caused by: Failed to create plan, err:Failed to generate datafusion plan, err:Execution error: Table is not found, \"table:CASE_SENSITIVE_TABLE1\"" }) + +SELECT + * +FROM + `case_SENSITIVE_table1`; + +tsid,ts,VALUE1, +UInt64(0),Timestamp(1),Double(10.0), +UInt64(0),Timestamp(2),Double(20.0), +UInt64(0),Timestamp(3),Double(30.0), + + +SELECT + * +FROM + `CASE_SENSITIVE_TABLE1`; + +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SELECT \n * \n FROM \n `CASE_SENSITIVE_TABLE1`;. Caused by: Failed to create plan, err:Failed to generate datafusion plan, err:Execution error: Table is not found, \"table:CASE_SENSITIVE_TABLE1\"" }) + +SHOW CREATE TABLE case_SENSITIVE_table1; + +Table,Create Table, +String("case_SENSITIVE_table1"),String("CREATE TABLE `case_SENSITIVE_table1` (`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `VALUE1` double, PRIMARY KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='false', num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"), + + +SHOW CREATE TABLE CASE_SENSITIVE_TABLE1; + +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SHOW CREATE TABLE CASE_SENSITIVE_TABLE1;. Caused by: Failed to create plan, err:Table not found, table:CASE_SENSITIVE_TABLE1" }) + +SHOW CREATE TABLE `case_SENSITIVE_table1`; + +Table,Create Table, +String("case_SENSITIVE_table1"),String("CREATE TABLE `case_SENSITIVE_table1` (`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `VALUE1` double, PRIMARY KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='false', num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"), + + +SHOW CREATE TABLE `CASE_SENSITIVE_TABLE1`; + +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: SHOW CREATE TABLE `CASE_SENSITIVE_TABLE1`;. Caused by: Failed to create plan, err:Table not found, table:CASE_SENSITIVE_TABLE1" }) + +DESC case_SENSITIVE_table1; + +name,type,is_primary,is_nullable,is_tag, +String("tsid"),String("uint64"),Boolean(true),Boolean(false),Boolean(false), +String("ts"),String("timestamp"),Boolean(true),Boolean(false),Boolean(false), +String("VALUE1"),String("double"),Boolean(false),Boolean(true),Boolean(false), + + +DESC CASE_SENSITIVE_TABLE1; + +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: DESC CASE_SENSITIVE_TABLE1;. Caused by: Failed to create plan, err:Table not found, table:CASE_SENSITIVE_TABLE1" }) + +DESC `case_SENSITIVE_table1`; + +name,type,is_primary,is_nullable,is_tag, +String("tsid"),String("uint64"),Boolean(true),Boolean(false),Boolean(false), +String("ts"),String("timestamp"),Boolean(true),Boolean(false),Boolean(false), +String("VALUE1"),String("double"),Boolean(false),Boolean(true),Boolean(false), + + +DESC `CASE_SENSITIVE_TABLE1`; + +Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to create plan, query: DESC `CASE_SENSITIVE_TABLE1`;. Caused by: Failed to create plan, err:Table not found, table:CASE_SENSITIVE_TABLE1" }) + diff --git a/integration_tests/cases/local/03_dml/case_sensitive.sql b/integration_tests/cases/local/03_dml/case_sensitive.sql new file mode 100644 index 0000000000..ecde3ab659 --- /dev/null +++ b/integration_tests/cases/local/03_dml/case_sensitive.sql @@ -0,0 +1,51 @@ + +DROP TABLE IF EXISTS case_SENSITIVE_table1; + + +CREATE TABLE case_SENSITIVE_table1 ( + ts timestamp NOT NULL, + VALUE1 double, + timestamp KEY (ts)) ENGINE=Analytic +WITH( + enable_ttl='false' +); + +INSERT INTO case_SENSITIVE_table1 (ts, VALUE1) + VALUES (1, 10), (2, 20), (3, 30); + + +SELECT + * +FROM + case_SENSITIVE_table1; + +SELECT + * +FROM + CASE_SENSITIVE_TABLE1; + +SELECT + * +FROM + `case_SENSITIVE_table1`; + +SELECT + * +FROM + `CASE_SENSITIVE_TABLE1`; + +SHOW CREATE TABLE case_SENSITIVE_table1; + +SHOW CREATE TABLE CASE_SENSITIVE_TABLE1; + +SHOW CREATE TABLE `case_SENSITIVE_table1`; + +SHOW CREATE TABLE `CASE_SENSITIVE_TABLE1`; + +DESC case_SENSITIVE_table1; + +DESC CASE_SENSITIVE_TABLE1; + +DESC `case_SENSITIVE_table1`; + +DESC `CASE_SENSITIVE_TABLE1`; diff --git a/integration_tests/src/database.rs b/integration_tests/src/database.rs index 4ae636971d..8b0103b271 100644 --- a/integration_tests/src/database.rs +++ b/integration_tests/src/database.rs @@ -55,9 +55,7 @@ impl CeresDB { // Wait for a while std::thread::sleep(std::time::Duration::from_secs(5)); let endpoint = env::var(SERVER_ENDPOINT_ENV).unwrap_or_else(|_| { - panic!( - "Cannot read server endpoint from env {SERVER_ENDPOINT_ENV:?}" - ) + panic!("Cannot read server endpoint from env {SERVER_ENDPOINT_ENV:?}") }); let db_client = Builder::new(endpoint, Mode::Proxy).build(); diff --git a/interpreters/src/table_manipulator/meta_based.rs b/interpreters/src/table_manipulator/meta_based.rs index 63248e2555..c18e8fbb3b 100644 --- a/interpreters/src/table_manipulator/meta_based.rs +++ b/interpreters/src/table_manipulator/meta_based.rs @@ -43,9 +43,7 @@ impl TableManipulator for TableManipulatorImpl { .encode(&plan.table_schema) .box_err() .with_context(|| CreateWithCause { - msg: format!( - "fail to encode table schema, ctx:{ctx:?}, plan:{plan:?}" - ), + msg: format!("fail to encode table schema, ctx:{ctx:?}, plan:{plan:?}"), })?; let partition_table_info = create_partition_table_info(&plan.table, &plan.partition_info); diff --git a/router/src/cluster_based.rs b/router/src/cluster_based.rs index 757f96f84b..d3be086dea 100644 --- a/router/src/cluster_based.rs +++ b/router/src/cluster_based.rs @@ -46,9 +46,7 @@ impl Router for ClusterBasedRouter { .await .box_err() .with_context(|| OtherWithCause { - msg: format!( - "Failed to route tables by cluster, req:{route_tables_req:?}" - ), + msg: format!("Failed to route tables by cluster, req:{route_tables_req:?}"), })?; let mut routes = Vec::with_capacity(route_resp.entries.len()); diff --git a/server/src/grpc/meta_event_service/mod.rs b/server/src/grpc/meta_event_service/mod.rs index 73c9910fc2..14372d6b5a 100644 --- a/server/src/grpc/meta_event_service/mod.rs +++ b/server/src/grpc/meta_event_service/mod.rs @@ -185,9 +185,7 @@ async fn handle_open_shard(ctx: HandlerContext, request: OpenShardRequest) -> Re .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!( - "fail to get topology while opening shard, request:{request:?}" - ), + msg: format!("fail to get topology while opening shard, request:{request:?}"), })?; let shard_info = tables_of_shard.shard_info; @@ -276,9 +274,7 @@ async fn handle_create_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!( - "fail to create table on shard in cluster, req:{request:?}" - ), + msg: format!("fail to create table on shard in cluster, req:{request:?}"), })?; let topology = ctx @@ -288,9 +284,7 @@ async fn handle_create_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!( - "fail to get topology while creating table, request:{request:?}" - ), + msg: format!("fail to get topology while creating table, request:{request:?}"), })?; let shard_info = request @@ -361,9 +355,7 @@ async fn handle_create_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!( - "fail to create table with request:{create_table_request:?}" - ), + msg: format!("fail to create table with request:{create_table_request:?}"), })?; Ok(()) @@ -436,9 +428,7 @@ async fn handle_open_table_on_shard( .box_err() .with_context(|| ErrWithCause { code: StatusCode::Internal, - msg: format!( - "fail to get topology while opening table, request:{request:?}" - ), + msg: format!("fail to get topology while opening table, request:{request:?}"), })?; let shard_info = request diff --git a/server/src/grpc/storage_service/prom_query.rs b/server/src/grpc/storage_service/prom_query.rs index 8912ab4f43..843e7a96d1 100644 --- a/server/src/grpc/storage_service/prom_query.rs +++ b/server/src/grpc/storage_service/prom_query.rs @@ -261,9 +261,7 @@ impl RecordConverter { field_type.is_f64_castable(), ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!( - "Field type must be f64-compatibile type, current:{field_type}" - ) + msg: format!("Field type must be f64-compatibile type, current:{field_type}") } ); diff --git a/server/src/grpc/storage_service/write.rs b/server/src/grpc/storage_service/write.rs index c57d51b01a..ae2bae6e23 100644 --- a/server/src/grpc/storage_service/write.rs +++ b/server/src/grpc/storage_service/write.rs @@ -394,9 +394,7 @@ fn write_entry_to_rows( let tag_name = &tag_names[name_index]; let tag_index_in_schema = schema.index_of(tag_name).with_context(|| ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!( - "Can't find tag({tag_name}) in schema, table:{table_name}" - ), + msg: format!("Can't find tag({tag_name}) in schema, table:{table_name}"), })?; let column_schema = schema.column(tag_index_in_schema); @@ -404,9 +402,7 @@ fn write_entry_to_rows( column_schema.is_tag, ErrNoCause { code: StatusCode::BAD_REQUEST, - msg: format!( - "column({tag_name}) is a field rather than a tag, table:{table_name}" - ), + msg: format!("column({tag_name}) is a field rather than a tag, table:{table_name}"), } ); diff --git a/sql/src/parser.rs b/sql/src/parser.rs index 98fd7ea7b3..6a50063200 100644 --- a/sql/src/parser.rs +++ b/sql/src/parser.rs @@ -825,9 +825,7 @@ mod tests { fn expect_parse_error(sql: &str, expected_error: &str) { match Parser::parse_sql(sql) { Ok(statements) => { - panic!( - "Expected parse error for '{sql}', but was successful: {statements:?}" - ); + panic!("Expected parse error for '{sql}', but was successful: {statements:?}"); } Err(e) => { let error_message = e.to_string(); diff --git a/sql/src/planner.rs b/sql/src/planner.rs index 3c879cd9c7..68f403a5fd 100644 --- a/sql/src/planner.rs +++ b/sql/src/planner.rs @@ -252,6 +252,10 @@ pub enum Error { define_result!(Error); const DEFAULT_QUOTE_CHAR: char = '`'; +const DEFAULT_PARSER_OPTS: ParserOptions = ParserOptions { + parse_float_as_decimal: false, + enable_ident_normalization: false, +}; /// Planner produces logical plans from SQL AST // TODO(yingwen): Rewrite Planner instead of using datafusion's planner @@ -339,13 +343,7 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { } fn sql_statement_to_datafusion_plan(self, sql_stmt: SqlStatement) -> Result { - let df_planner = SqlToRel::new_with_options( - &self.meta_provider, - ParserOptions { - parse_float_as_decimal: false, - enable_ident_normalization: false, - }, - ); + let df_planner = SqlToRel::new_with_options(&self.meta_provider, DEFAULT_PARSER_OPTS); let df_plan = df_planner .sql_statement_to_plan(sql_stmt) @@ -657,13 +655,8 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { .collect::>(); let df_schema = DFSchema::new_with_metadata(df_fields, HashMap::new()) .context(CreateDatafusionSchema)?; - let df_planner = SqlToRel::new_with_options( - &self.meta_provider, - ParserOptions { - parse_float_as_decimal: false, - enable_ident_normalization: false, - }, - ); + let df_planner = + SqlToRel::new_with_options(&self.meta_provider, DEFAULT_PARSER_OPTS); // Index in insert values stmt of each column in table schema let mut column_index_in_insert = Vec::with_capacity(schema.num_columns()); @@ -1012,13 +1005,7 @@ fn ensure_column_default_value_valid<'a, P: MetaProvider>( columns: &[ColumnSchema], meta_provider: &ContextProviderAdapter<'_, P>, ) -> Result<()> { - let df_planner = SqlToRel::new_with_options( - meta_provider, - ParserOptions { - parse_float_as_decimal: false, - enable_ident_normalization: false, - }, - ); + let df_planner = SqlToRel::new_with_options(meta_provider, DEFAULT_PARSER_OPTS); let mut df_schema = DFSchema::empty(); let mut arrow_schema = ArrowSchema::empty(); diff --git a/table_engine/src/partition/mod.rs b/table_engine/src/partition/mod.rs index 6ae9d5fc96..68f3bc23a2 100644 --- a/table_engine/src/partition/mod.rs +++ b/table_engine/src/partition/mod.rs @@ -233,9 +233,7 @@ impl TryFrom for PartitionInfo { } pub fn format_sub_partition_table_name(table_name: &str, partition_name: &str) -> String { - format!( - "{PARTITION_TABLE_PREFIX}{table_name}_{partition_name}" - ) + format!("{PARTITION_TABLE_PREFIX}{table_name}_{partition_name}") } pub fn is_sub_partition_table(table_name: &str) -> bool { diff --git a/table_engine/src/partition/rule/df_adapter/mod.rs b/table_engine/src/partition/rule/df_adapter/mod.rs index 65328aa113..f6a47c4d44 100644 --- a/table_engine/src/partition/rule/df_adapter/mod.rs +++ b/table_engine/src/partition/rule/df_adapter/mod.rs @@ -53,9 +53,7 @@ impl DfPartitionRuleAdapter { match partition_info { PartitionInfo::Key(_) => Ok(Box::new(KeyExtractor)), PartitionInfo::Hash(_) => BuildPartitionRule { - msg: format!( - "unsupported partition strategy, strategy:{partition_info:?}" - ), + msg: format!("unsupported partition strategy, strategy:{partition_info:?}"), } .fail(), } diff --git a/table_engine/src/partition/rule/factory.rs b/table_engine/src/partition/rule/factory.rs index ec5ed9bf63..64692dcdf9 100644 --- a/table_engine/src/partition/rule/factory.rs +++ b/table_engine/src/partition/rule/factory.rs @@ -20,9 +20,7 @@ impl PartitionRuleFactory { match partition_info { PartitionInfo::Key(key_info) => Self::create_key_rule(key_info, schema), _ => BuildPartitionRule { - msg: format!( - "unsupported partition strategy, strategy:{partition_info:?}" - ), + msg: format!("unsupported partition strategy, strategy:{partition_info:?}"), } .fail(), } diff --git a/table_engine/src/provider.rs b/table_engine/src/provider.rs index bfe7b06a10..d966017da5 100644 --- a/table_engine/src/provider.rs +++ b/table_engine/src/provider.rs @@ -68,10 +68,7 @@ impl ExtensionOptions for CeresdbOptions { "request_timeout" => { self.request_timeout = Some(value.parse::().map_err(|e| { DataFusionError::External( - format!( - "could not parse request_timeout, input:{value}, err:{e:?}" - ) - .into(), + format!("could not parse request_timeout, input:{value}, err:{e:?}").into(), ) })?) }