From 24bb6f598daee23f0c23da0d4bda125b542d4d20 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Tue, 18 Jun 2024 21:22:21 +0200 Subject: [PATCH 01/10] pruning and snapshots recovery documentation Signed-off-by: tomg10 --- core/bin/external_node/src/config/mod.rs | 2 +- .../external-node/07_snapshots_recovery.md | 21 ++++++++++ docs/guides/external-node/08_pruning.md | 38 +++++++++++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 docs/guides/external-node/07_snapshots_recovery.md create mode 100644 docs/guides/external-node/08_pruning.md diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index e329150721c0..9956b6476aee 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -541,7 +541,7 @@ impl OptionalENConfig { } fn default_pruning_data_retention_sec() -> u64 { - 3_600 // 1 hour + 3_600 * 24 * 7 // 7 days } fn from_env() -> anyhow::Result { diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md new file mode 100644 index 000000000000..c955512625ff --- /dev/null +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -0,0 +1,21 @@ +# Snapshots Recovery + +Instead of starting node using DB snapshots, it's possible to configure them to start from a protocol-level snapshots. +This process is much faster and requires way less storage. Postgres database of a mainnet node recovered from a snapshot is only about 300GB. +> [!NOTE] +> Nodes recovered from snapshot don't have any historical data from before the recovery! + +## Configuration +To enable snapshots-recovery on mainnet, you need to set: + + EN_SNAPSHOTS_RECOVERY_ENABLED: "true" + EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-mainnet-external-node-snapshots" + EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" + +For sepolia testnet, use: + + EN_SNAPSHOTS_RECOVERY_ENABLED: "true" + EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-boojnet-external-node-snapshots" + EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" + +For a working examples of a fully configured Nodes recovering from snapshots, see docker-compose-examples directory and 00_quick_start.md diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md new file mode 100644 index 000000000000..2cede38e58ed --- /dev/null +++ b/docs/guides/external-node/08_pruning.md @@ -0,0 +1,38 @@ +# Pruning + +It is possible to configure Node to periodically remove all data from batches older than a threshold, both from Postgres +and from tree. + +> [!NOTE] +> If you need a node with data retention period of up to a few days, please set up a node from +> a snapshot (see previous chapter) and wait for it to have enough data. Pruning an archival node can take +> unpractical amount of time. In the future we will be offering pre-pruned DB snapshots with a few months of data. + + +You can enable pruning by setting + +``` +EN_PRUNING_ENABLED=true +``` + +By default, it will keep history for 7 days. You can configure retention period using: + +``` +EN_PRUNING_DATA_RETENTION_SEC: '259200' // 3 days +``` + +The data retention can be set to any value, but values under 21h will as the batch can only be pruned as soon as it has +been executed on Ethereum. + +## Storage requirements for pruned nodes + +The storage requirements depend on how long you configure to retain the data, but are roughly: + + 40GB + ~5GB/data-retention-day space needed on machine that runs the node + 300GB + ~15GB/day-retention-day for Postgres + +> [!NOTE] +> When pruning an existing archival node, Postgre will be unable to reclaim disk space automatically, +> to reclaim disk space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, +> you can read more about it in https://www.postgresql.org/docs/current/sql-vacuum.html + From 73410f94087fdfa74d12f04d3884151efb33ec9e Mon Sep 17 00:00:00 2001 From: tomg10 Date: Tue, 18 Jun 2024 21:26:54 +0200 Subject: [PATCH 02/10] fmt Signed-off-by: tomg10 --- .../external-node/07_snapshots_recovery.md | 11 +++++++--- docs/guides/external-node/08_pruning.md | 20 +++++++++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index c955512625ff..0b5e3cbfe9cc 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -1,11 +1,15 @@ # Snapshots Recovery Instead of starting node using DB snapshots, it's possible to configure them to start from a protocol-level snapshots. -This process is much faster and requires way less storage. Postgres database of a mainnet node recovered from a snapshot is only about 300GB. -> [!NOTE] +This process is much faster and requires way less storage. Postgres database of a mainnet node recovered from a snapshot +is only about 300GB. + +> [!NOTE] +> > Nodes recovered from snapshot don't have any historical data from before the recovery! ## Configuration + To enable snapshots-recovery on mainnet, you need to set: EN_SNAPSHOTS_RECOVERY_ENABLED: "true" @@ -18,4 +22,5 @@ For sepolia testnet, use: EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-boojnet-external-node-snapshots" EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" -For a working examples of a fully configured Nodes recovering from snapshots, see docker-compose-examples directory and 00_quick_start.md +For a working examples of a fully configured Nodes recovering from snapshots, see docker-compose-examples directory and +00_quick_start.md diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index 2cede38e58ed..2e34537bf052 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -3,11 +3,11 @@ It is possible to configure Node to periodically remove all data from batches older than a threshold, both from Postgres and from tree. -> [!NOTE] -> If you need a node with data retention period of up to a few days, please set up a node from -> a snapshot (see previous chapter) and wait for it to have enough data. Pruning an archival node can take -> unpractical amount of time. In the future we will be offering pre-pruned DB snapshots with a few months of data. - +> [!NOTE] +> +> If you need a node with data retention period of up to a few days, please set up a node from a snapshot (see +> previous chapter) and wait for it to have enough data. Pruning an archival node can take unpractical amount of time. +> In the future we will be offering pre-pruned DB snapshots with a few months of data. You can enable pruning by setting @@ -31,8 +31,8 @@ The storage requirements depend on how long you configure to retain the data, bu 40GB + ~5GB/data-retention-day space needed on machine that runs the node 300GB + ~15GB/day-retention-day for Postgres -> [!NOTE] -> When pruning an existing archival node, Postgre will be unable to reclaim disk space automatically, -> to reclaim disk space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, -> you can read more about it in https://www.postgresql.org/docs/current/sql-vacuum.html - +> [!NOTE] +> +> When pruning an existing archival node, Postgre will be unable to reclaim disk space automatically, to reclaim +> disk space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, you can read more about it +> in https://www.postgresql.org/docs/current/sql-vacuum.html From a2a3176241ce8ac44a83175541b3f11d7d4cd565 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Tue, 18 Jun 2024 21:27:34 +0200 Subject: [PATCH 03/10] fmt Signed-off-by: tomg10 --- .../external-node/07_snapshots_recovery.md | 4 ++-- docs/guides/external-node/08_pruning.md | 20 +++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index 0b5e3cbfe9cc..54ff29be0370 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -4,8 +4,8 @@ Instead of starting node using DB snapshots, it's possible to configure them to This process is much faster and requires way less storage. Postgres database of a mainnet node recovered from a snapshot is only about 300GB. -> [!NOTE] -> +> [!NOTE] +> > Nodes recovered from snapshot don't have any historical data from before the recovery! ## Configuration diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index 2e34537bf052..c0314e92444b 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -3,11 +3,11 @@ It is possible to configure Node to periodically remove all data from batches older than a threshold, both from Postgres and from tree. -> [!NOTE] -> -> If you need a node with data retention period of up to a few days, please set up a node from a snapshot (see -> previous chapter) and wait for it to have enough data. Pruning an archival node can take unpractical amount of time. -> In the future we will be offering pre-pruned DB snapshots with a few months of data. +> [!NOTE] +> +> If you need a node with data retention period of up to a few days, please set up a node from a snapshot (see previous +> chapter) and wait for it to have enough data. Pruning an archival node can take unpractical amount of time. In the +> future we will be offering pre-pruned DB snapshots with a few months of data. You can enable pruning by setting @@ -31,8 +31,8 @@ The storage requirements depend on how long you configure to retain the data, bu 40GB + ~5GB/data-retention-day space needed on machine that runs the node 300GB + ~15GB/day-retention-day for Postgres -> [!NOTE] -> -> When pruning an existing archival node, Postgre will be unable to reclaim disk space automatically, to reclaim -> disk space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, you can read more about it -> in https://www.postgresql.org/docs/current/sql-vacuum.html +> [!NOTE] +> +> When pruning an existing archival node, Postgre will be unable to reclaim disk space automatically, to reclaim disk +> space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, you can read more about it in +> https://www.postgresql.org/docs/current/sql-vacuum.html From 19002836bc38ad0fc82e37bb9e5dc1355e410d7a Mon Sep 17 00:00:00 2001 From: tomg10 Date: Wed, 19 Jun 2024 10:44:11 +0200 Subject: [PATCH 04/10] PR feedback Signed-off-by: tomg10 --- .../external-node/07_snapshots_recovery.md | 16 ++++++------ docs/guides/external-node/08_pruning.md | 26 ++++++++++--------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index 54ff29be0370..4653af4e05a6 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -2,7 +2,7 @@ Instead of starting node using DB snapshots, it's possible to configure them to start from a protocol-level snapshots. This process is much faster and requires way less storage. Postgres database of a mainnet node recovered from a snapshot -is only about 300GB. +is only about 300GB. Without [*pruning*](08_pruning.md) enabled, the state will continuously grow about 15GB per day. > [!NOTE] > @@ -10,17 +10,17 @@ is only about 300GB. ## Configuration -To enable snapshots-recovery on mainnet, you need to set: - +To enable snapshots-recovery on mainnet, you need to set environment variables: +```yaml EN_SNAPSHOTS_RECOVERY_ENABLED: "true" EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-mainnet-external-node-snapshots" EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" - +``` For sepolia testnet, use: - +```yaml EN_SNAPSHOTS_RECOVERY_ENABLED: "true" EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-boojnet-external-node-snapshots" EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" - -For a working examples of a fully configured Nodes recovering from snapshots, see docker-compose-examples directory and -00_quick_start.md +``` +For a working examples of a fully configured Nodes recovering from snapshots, see [*docker compose examples*](docker-compose-examples) directory and +[*Quick Start*](00_quick_start.md) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index c0314e92444b..a061068f4615 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -1,35 +1,37 @@ # Pruning -It is possible to configure Node to periodically remove all data from batches older than a threshold, both from Postgres -and from tree. +It is possible to configure ZKSync Node to periodically remove all data from batches older than a configurable +threshold. Data is pruned both from Postgres and from tree(RocksDB). > [!NOTE] > -> If you need a node with data retention period of up to a few days, please set up a node from a snapshot (see previous -> chapter) and wait for it to have enough data. Pruning an archival node can take unpractical amount of time. In the +> If you need a node with data retention period of up to a few days, please set up a node from a [*snapshot*](07_snapshots_recovery.md) +> and wait for it to have enough data. Pruning an archival node can take unpractical amount of time. In the > future we will be offering pre-pruned DB snapshots with a few months of data. -You can enable pruning by setting +## Configuration +You can enable pruning by setting the environment variable -``` -EN_PRUNING_ENABLED=true +``` yaml +EN_PRUNING_ENABLED: "true" ``` By default, it will keep history for 7 days. You can configure retention period using: -``` -EN_PRUNING_DATA_RETENTION_SEC: '259200' // 3 days +``` yaml +EN_PRUNING_DATA_RETENTION_SEC: "259200" // 3 days ``` -The data retention can be set to any value, but values under 21h will as the batch can only be pruned as soon as it has +The data retention can be set to any value, but for mainnet values under 21h will be ignored as the batch can only be +pruned as soon as it has been executed on Ethereum. ## Storage requirements for pruned nodes The storage requirements depend on how long you configure to retain the data, but are roughly: - 40GB + ~5GB/data-retention-day space needed on machine that runs the node - 300GB + ~15GB/day-retention-day for Postgres ++ **40GB + ~5GB/day of retained data** of disk space needed on machine that runs the node ++ **300GB + ~15GB/day of retained data** of disk space for Postgres > [!NOTE] > From d5f7068e62a29bc6071e4e3d063c824c1a98c3d6 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Wed, 19 Jun 2024 10:45:02 +0200 Subject: [PATCH 05/10] zk fmt Signed-off-by: tomg10 --- .../external-node/07_snapshots_recovery.md | 22 +++++++++++-------- docs/guides/external-node/08_pruning.md | 22 +++++++++---------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index 4653af4e05a6..94d279e358de 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -2,7 +2,7 @@ Instead of starting node using DB snapshots, it's possible to configure them to start from a protocol-level snapshots. This process is much faster and requires way less storage. Postgres database of a mainnet node recovered from a snapshot -is only about 300GB. Without [*pruning*](08_pruning.md) enabled, the state will continuously grow about 15GB per day. +is only about 300GB. Without [_pruning_](08_pruning.md) enabled, the state will continuously grow about 15GB per day. > [!NOTE] > @@ -11,16 +11,20 @@ is only about 300GB. Without [*pruning*](08_pruning.md) enabled, the state will ## Configuration To enable snapshots-recovery on mainnet, you need to set environment variables: + ```yaml - EN_SNAPSHOTS_RECOVERY_ENABLED: "true" - EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-mainnet-external-node-snapshots" - EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" +EN_SNAPSHOTS_RECOVERY_ENABLED: 'true' +EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: 'zksync-era-mainnet-external-node-snapshots' +EN_SNAPSHOTS_OBJECT_STORE_MODE: 'GCSAnonymousReadOnly' ``` + For sepolia testnet, use: + ```yaml - EN_SNAPSHOTS_RECOVERY_ENABLED: "true" - EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: "zksync-era-boojnet-external-node-snapshots" - EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" +EN_SNAPSHOTS_RECOVERY_ENABLED: 'true' +EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL: 'zksync-era-boojnet-external-node-snapshots' +EN_SNAPSHOTS_OBJECT_STORE_MODE: 'GCSAnonymousReadOnly' ``` -For a working examples of a fully configured Nodes recovering from snapshots, see [*docker compose examples*](docker-compose-examples) directory and -[*Quick Start*](00_quick_start.md) + +For a working examples of a fully configured Nodes recovering from snapshots, see +[_docker compose examples_](docker-compose-examples) directory and [_Quick Start_](00_quick_start.md) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index a061068f4615..a846bf7ee182 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -1,37 +1,37 @@ # Pruning It is possible to configure ZKSync Node to periodically remove all data from batches older than a configurable -threshold. Data is pruned both from Postgres and from tree(RocksDB). +threshold. Data is pruned both from Postgres and from tree(RocksDB). > [!NOTE] > -> If you need a node with data retention period of up to a few days, please set up a node from a [*snapshot*](07_snapshots_recovery.md) -> and wait for it to have enough data. Pruning an archival node can take unpractical amount of time. In the -> future we will be offering pre-pruned DB snapshots with a few months of data. +> If you need a node with data retention period of up to a few days, please set up a node from a +> [_snapshot_](07_snapshots_recovery.md) and wait for it to have enough data. Pruning an archival node can take +> unpractical amount of time. In the future we will be offering pre-pruned DB snapshots with a few months of data. ## Configuration + You can enable pruning by setting the environment variable -``` yaml -EN_PRUNING_ENABLED: "true" +```yaml +EN_PRUNING_ENABLED: 'true' ``` By default, it will keep history for 7 days. You can configure retention period using: -``` yaml +```yaml EN_PRUNING_DATA_RETENTION_SEC: "259200" // 3 days ``` The data retention can be set to any value, but for mainnet values under 21h will be ignored as the batch can only be -pruned as soon as it has -been executed on Ethereum. +pruned as soon as it has been executed on Ethereum. ## Storage requirements for pruned nodes The storage requirements depend on how long you configure to retain the data, but are roughly: -+ **40GB + ~5GB/day of retained data** of disk space needed on machine that runs the node -+ **300GB + ~15GB/day of retained data** of disk space for Postgres +- **40GB + ~5GB/day of retained data** of disk space needed on machine that runs the node +- **300GB + ~15GB/day of retained data** of disk space for Postgres > [!NOTE] > From 4a4f72795703ebd70583186ce5e71a3a326292f0 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 20 Jun 2024 10:33:56 +0200 Subject: [PATCH 06/10] PR feedback Signed-off-by: tomg10 --- docs/guides/external-node/08_pruning.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index a846bf7ee182..b8e4fed522ae 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -1,7 +1,7 @@ # Pruning It is possible to configure ZKSync Node to periodically remove all data from batches older than a configurable -threshold. Data is pruned both from Postgres and from tree(RocksDB). +threshold. Data is pruned both from Postgres and from tree (RocksDB). > [!NOTE] > @@ -20,7 +20,7 @@ EN_PRUNING_ENABLED: 'true' By default, it will keep history for 7 days. You can configure retention period using: ```yaml -EN_PRUNING_DATA_RETENTION_SEC: "259200" // 3 days +EN_PRUNING_DATA_RETENTION_SEC: "259200" # 3 days ``` The data retention can be set to any value, but for mainnet values under 21h will be ignored as the batch can only be From 3704e65e5daf8985283d8dfb357da8cdb1409d79 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 20 Jun 2024 10:44:08 +0200 Subject: [PATCH 07/10] fix Signed-off-by: tomg10 --- docs/guides/external-node/08_pruning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index b8e4fed522ae..ef888444668b 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -35,6 +35,6 @@ The storage requirements depend on how long you configure to retain the data, bu > [!NOTE] > -> When pruning an existing archival node, Postgre will be unable to reclaim disk space automatically, to reclaim disk +> When pruning an existing archival node, Postgres will be unable to reclaim disk space automatically, to reclaim disk > space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, you can read more about it in > https://www.postgresql.org/docs/current/sql-vacuum.html From 7d66c37a1d0cc31fc1cbd15de8f670761069914a Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 20 Jun 2024 10:59:21 +0200 Subject: [PATCH 08/10] fix Signed-off-by: tomg10 --- docs/guides/external-node/08_pruning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index ef888444668b..7d6e006f7937 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -37,4 +37,4 @@ The storage requirements depend on how long you configure to retain the data, bu > > When pruning an existing archival node, Postgres will be unable to reclaim disk space automatically, to reclaim disk > space, you need to manually run VACUUM FULL, which requires an ACCESS EXCLUSIVE lock, you can read more about it in -> https://www.postgresql.org/docs/current/sql-vacuum.html +> [_postgres docs_](https://www.postgresql.org/docs/current/sql-vacuum.html) From 2a00fd69cade99046863c77945ee1e529befd763 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 20 Jun 2024 10:59:40 +0200 Subject: [PATCH 09/10] fix Signed-off-by: tomg10 --- docs/guides/external-node/08_pruning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index 7d6e006f7937..bcd2ee0944bb 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -20,7 +20,7 @@ EN_PRUNING_ENABLED: 'true' By default, it will keep history for 7 days. You can configure retention period using: ```yaml -EN_PRUNING_DATA_RETENTION_SEC: "259200" # 3 days +EN_PRUNING_DATA_RETENTION_SEC: '259200' # 3 days ``` The data retention can be set to any value, but for mainnet values under 21h will be ignored as the batch can only be From 8bfac09a050a4ad5b3cbbdc5e8a7406d75dc2c92 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 20 Jun 2024 13:23:38 +0200 Subject: [PATCH 10/10] PR feedback Signed-off-by: tomg10 --- docs/guides/external-node/08_pruning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index bcd2ee0944bb..c7f834214ae7 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -1,6 +1,6 @@ # Pruning -It is possible to configure ZKSync Node to periodically remove all data from batches older than a configurable +It is possible to configure ZKsync Node to periodically remove all data from batches older than a configurable threshold. Data is pruned both from Postgres and from tree (RocksDB). > [!NOTE]