From 6c6b1ac56b8a42eeb16a30daf5ab4add674e6d9c Mon Sep 17 00:00:00 2001 From: Piotr Sarna Date: Mon, 16 Oct 2023 12:18:44 +0200 Subject: [PATCH] bottomless: increase the max batch size to 10000 The reasoning is as follows: 10000 uncompressed frames weigh 40MiB. Gzip is expected to create a ~20MiB file from them, while xz can compress it down to ~800KiB. The previous limit would make xz create a 50KiB file, which is less than the minimum 128KiB that S3-like services charge for when writing to an object store. --- bottomless/src/replicator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bottomless/src/replicator.rs b/bottomless/src/replicator.rs index 08c0b561..ec08f1a2 100644 --- a/bottomless/src/replicator.rs +++ b/bottomless/src/replicator.rs @@ -171,7 +171,7 @@ impl Options { let secret_access_key = env_var("LIBSQL_BOTTOMLESS_AWS_SECRET_ACCESS_KEY").ok(); let region = env_var("LIBSQL_BOTTOMLESS_AWS_DEFAULT_REGION").ok(); let max_frames_per_batch = - env_var_or("LIBSQL_BOTTOMLESS_BATCH_MAX_FRAMES", 500).parse::()?; + env_var_or("LIBSQL_BOTTOMLESS_BATCH_MAX_FRAMES", 10000).parse::()?; let s3_upload_max_parallelism = env_var_or("LIBSQL_BOTTOMLESS_S3_PARALLEL_MAX", 32).parse::()?; let restore_transaction_page_swap_after =