From 7fba9616eacd33f7c8dbad7052c24321cb555070 Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 19 Nov 2024 16:50:05 +0000 Subject: [PATCH 1/2] split 2-4 instead of 4-8 --- test_runner/regress/test_sharding.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 0a4a53356d94..3a7878e9ab1b 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -517,8 +517,8 @@ def test_sharding_split_smoke( # We will start with 4 shards and split into 8, then migrate all those # 8 shards onto separate pageservers - shard_count = 4 - split_shard_count = 8 + shard_count = 2 + split_shard_count = 4 neon_env_builder.num_pageservers = split_shard_count * 2 # 1MiB stripes: enable getting some meaningful data distribution without @@ -591,7 +591,7 @@ def test_sharding_split_smoke( workload.validate() - assert len(pre_split_pageserver_ids) == 4 + assert len(pre_split_pageserver_ids) == shard_count def shards_on_disk(shard_ids): for pageserver in env.pageservers: @@ -728,14 +728,6 @@ def check_effective_tenant_config(): 6: 1, 7: 1, 8: 1, - 9: 1, - 10: 1, - 11: 1, - 12: 1, - 13: 1, - 14: 1, - 15: 1, - 16: 1, } # The controller is not required to lay out the attached locations in any particular way, but From d470a03b4ea72acf9ed1bea402ea1ba7b19da897 Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 19 Nov 2024 16:55:39 +0000 Subject: [PATCH 2/2] two shards per pageserver --- test_runner/regress/test_sharding.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 3a7878e9ab1b..84737fc81e2b 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -515,11 +515,12 @@ def test_sharding_split_smoke( """ - # We will start with 4 shards and split into 8, then migrate all those - # 8 shards onto separate pageservers + # Shard count we start with shard_count = 2 + # Shard count we split into split_shard_count = 4 - neon_env_builder.num_pageservers = split_shard_count * 2 + # We will have 2 shards per pageserver once done (including secondaries) + neon_env_builder.num_pageservers = split_shard_count # 1MiB stripes: enable getting some meaningful data distribution without # writing large quantities of data in this test. The stripe size is given @@ -654,9 +655,9 @@ def shards_on_disk(shard_ids): # - shard_count reconciles for the original setup of the tenant # - shard_count reconciles for detaching the original secondary locations during split # - split_shard_count reconciles during shard splitting, for setting up secondaries. - # - shard_count of the child shards will need to fail over to their secondaries - # - shard_count of the child shard secondary locations will get moved to emptier nodes - expect_reconciles = shard_count * 2 + split_shard_count + shard_count * 2 + # - split_shard_count/2 of the child shards will need to fail over to their secondaries (since we have 8 shards and 4 pageservers, only 4 will move) + expect_reconciles = shard_count * 2 + split_shard_count + split_shard_count / 2 + reconcile_ok = env.storage_controller.get_metric_value( "storage_controller_reconcile_complete_total", filter={"status": "ok"} ) @@ -720,14 +721,10 @@ def check_effective_tenant_config(): # dominated by shard count. log.info(f"total: {total}") assert total == { - 1: 1, - 2: 1, - 3: 1, - 4: 1, - 5: 1, - 6: 1, - 7: 1, - 8: 1, + 1: 2, + 2: 2, + 3: 2, + 4: 2, } # The controller is not required to lay out the attached locations in any particular way, but