diff --git a/docs/example.toml b/docs/example.toml deleted file mode 100644 index f906a1c456..0000000000 --- a/docs/example.toml +++ /dev/null @@ -1,27 +0,0 @@ -bind_addr = "0.0.0.0" -http_port = 5440 -grpc_port = 8831 -log_level = "info" -enable_cluster = true - -[query] -read_parallelism = 8 - -[analytic] -wal_path = "/tmp/ceresdb" -sst_data_cache_cap = 10000 -sst_meta_cache_cap = 10000 - -[analytic.storage] -type = "Local" -data_path = "/tmp/ceresdb" - -[[meta_client.cluster_view.schema_shards]] -schema = 'public' - -[[meta_client.cluster_view.schema_shards.shard_views]] -shard_id = 0 - -[meta_client.cluster_view.schema_shards.shard_views.node] -addr = "127.0.0.1" -port = 8831 diff --git a/docs/guides/src/deploy/static_routing.md b/docs/guides/src/deploy/static_routing.md index 4950961617..5d77cafc08 100644 --- a/docs/guides/src/deploy/static_routing.md +++ b/docs/guides/src/deploy/static_routing.md @@ -63,29 +63,29 @@ Then we should define the common part -- schema&shard declaration and routing ru Here is the config for schema&shard declaration: ```toml -[[meta_client.cluster_view.schema_shards]] +[[static_route.topology.schema_shards]] schema = 'public_0' -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 0 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 1 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards]] -schema = 'public_1' -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards]] +schema = 'public_0' +[[static_route.topology.schema_shards.shard_views]] shard_id = 0 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 1 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 18831 ``` @@ -127,38 +127,38 @@ wal_path = "/tmp/ceresdb_0" type = "Local" data_path = "/tmp/ceresdb_0" -[[meta_client.cluster_view.schema_shards]] +[[static_route.topology.schema_shards]] schema = 'public_0' -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 0 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 1 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards]] -schema = 'public_1' -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards]] +schema = 'public_0' +[[static_route.topology.schema_shards.shard_views]] shard_id = 0 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 1 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 18831 -[[route_rules.prefix_rules]] +[[static_route.rules.prefix_rules]] schema = 'public_0' prefix = 'prod_' shard = 0 -[[route_rules.hash_rules]] +[[static_route.rules.hash_rules]] schema = 'public_1' shards = [0, 1] ``` @@ -177,38 +177,38 @@ wal_path = "/tmp/ceresdb_1" type = "Local" data_path = "/tmp/ceresdb_1" -[[meta_client.cluster_view.schema_shards]] +[[static_route.topology.schema_shards]] schema = 'public_0' -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 0 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 1 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards]] -schema = 'public_1' -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards]] +schema = 'public_0' +[[static_route.topology.schema_shards.shard_views]] shard_id = 0 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 8831 -[[meta_client.cluster_view.schema_shards.shard_views]] +[[static_route.topology.schema_shards.shard_views]] shard_id = 1 -[meta_client.cluster_view.schema_shards.shard_views.node] +[static_route.topology.schema_shards.shard_views.endpoint] addr = '127.0.0.1' port = 18831 -[[route_rules.prefix_rules]] +[[static_route.rules.prefix_rules]] schema = 'public_0' prefix = 'prod_' shard = 0 -[[route_rules.hash_rules]] +[[static_route.rules.hash_rules]] schema = 'public_1' shards = [0, 1] ``` @@ -224,4 +224,4 @@ sudo docker run -d -t --name ceresdb_0 -p 5440:5440 -p 8831:8831 -v $(pwd)/confi sudo docker run -d -t --name ceresdb_1 -p 15440:15440 -p 18831:18831 -v $(pwd)/config_1.toml:/etc/ceresdb/ceresdb.toml ceresdb/ceresdb-server:v0.1.0-alpha ``` -After the two containers are created and starting running, read and write requests can be served by the two-instances CeresDB cluster. \ No newline at end of file +After the two containers are created and starting running, read and write requests can be served by the two-instances CeresDB cluster. diff --git a/server/src/config.rs b/server/src/config.rs index 7c34fa0048..dc69df11a5 100644 --- a/server/src/config.rs +++ b/server/src/config.rs @@ -43,7 +43,7 @@ pub struct RuntimeConfig { #[derive(Clone, Debug, Default, Deserialize)] #[serde(default)] pub struct StaticRouteConfig { - pub rule_list: RuleList, + pub rules: RuleList, pub topology: StaticTopologyConfig, } diff --git a/src/setup.rs b/src/setup.rs index 015e21c193..76eea40c7a 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -195,7 +195,7 @@ async fn build_in_standalone_mode( let schema_configs = cluster_view.schema_configs.clone(); let router = Arc::new(RuleBasedRouter::new( cluster_view, - config.static_route.rule_list.clone(), + config.static_route.rules.clone(), )); let schema_config_provider = Arc::new(ConfigBasedProvider::new(schema_configs));