diff --git a/postgresql/postgres.nix b/postgresql/postgres.nix index 1a39e39..fbbb787 100644 --- a/postgresql/postgres.nix +++ b/postgresql/postgres.nix @@ -1,4 +1,27 @@ { + "t3a.nano" = { + max_connections = 40; + shared_buffers = "128MB"; + work_mem = "655kB"; + maintenance_work_mem = "32MB"; + max_worker_processes = 2; + max_parallel_workers_per_gather = 1; + max_parallel_workers = 2; + max_parallel_maintenance_workers = 1; + + effective_cache_size = "384MB"; + default_statistics_target = 100; + effective_io_concurrency = 200; + + wal_level = "minimal"; + archive_mode = "off"; + max_wal_senders = 0; + wal_buffers = "3932kB"; + min_wal_size = "2GB"; + max_wal_size = "8GB"; + random_page_cost = 1.1; + checkpoint_completion_target = 0.9; + }; "t3a.micro" = { max_connections = 60; shared_buffers = "256MB"; @@ -43,6 +66,58 @@ max_wal_size = "4GB"; min_wal_size = "1GB"; }; + "t3a.xlarge" = { + max_connections = 200; + shared_buffers = "4GB"; + effective_cache_size = "12GB"; + maintenance_work_mem = "1GB"; + checkpoint_completion_target = 0.9; + wal_buffers = "16MB"; + random_page_cost = 1.1; + effective_io_concurrency = 200; + work_mem = "10485kB"; + max_worker_processes = 4; + max_parallel_workers_per_gather = 2; + max_parallel_workers = 4; + max_parallel_maintenance_workers = 2; + }; + "t3a.2xlarge" = { + max_connections = 200; + shared_buffers = "8GB"; + effective_cache_size = "24GB"; + maintenance_work_mem = "2GB"; + checkpoint_completion_target = 0.9; + wal_buffers = "16MB"; + random_page_cost = 1.1; + effective_io_concurrency = 200; + work_mem = "10485kB"; + max_worker_processes = 8; + max_parallel_workers_per_gather = 4; + max_parallel_workers = 8; + max_parallel_maintenance_workers = 4; + }; + "m5a.large" = { + max_connections = 200; + shared_buffers = "2GB"; + work_mem = "10485kB"; + maintenance_work_mem = "512MB"; + max_worker_processes = 2; + max_parallel_maintenance_workers = 1; + max_parallel_workers_per_gather = 1; + max_parallel_workers = 2; + + effective_cache_size = "6GB"; + default_statistics_target = 100; + effective_io_concurrency = 200; + + wal_level = "minimal"; + archive_mode = "off"; + max_wal_senders = 0; + wal_compression = "on"; + wal_buffers = "16MB"; + min_wal_size = "1GB"; + max_wal_size = "4GB"; + }; "m5a.xlarge" = { max_connections = 240; shared_buffers = "4GB"; @@ -101,6 +176,72 @@ default_statistics_target = 100; effective_io_concurrency = 200; + wal_level = "minimal"; + archive_mode = "off"; + max_wal_senders = 0; + wal_compression = "on"; + wal_buffers = "16MB"; + max_wal_size = "4GB"; + min_wal_size = "2GB"; + }; + "m5a.8xlarge" = { + max_connections = 600; + shared_buffers = "32GB"; + work_mem = "27962kB"; + maintenance_work_mem = "2GB"; + max_worker_processes = 32; + max_parallel_maintenance_workers = 4; + max_parallel_workers_per_gather = 4; + max_parallel_workers = 32; + + effective_cache_size = "96GB"; + default_statistics_target = 100; + effective_io_concurrency = 200; + + wal_level = "minimal"; + archive_mode = "off"; + max_wal_senders = 0; + wal_compression = "on"; + wal_buffers = "16MB"; + min_wal_size = "2GB"; + max_wal_size = "8GB"; + }; + "m5a.12xlarge" = { + max_connections = 800; + shared_buffers = "48GB"; + work_mem = "41943kB"; + maintenance_work_mem = "2GB"; + max_worker_processes = 48; + max_parallel_maintenance_workers = 4; + max_parallel_workers_per_gather = 4; + max_parallel_workers = 48; + + effective_cache_size = "144GB"; + default_statistics_target = 100; + effective_io_concurrency = 200; + + wal_level = "minimal"; + archive_mode = "off"; + max_wal_senders = 0; + wal_compression = "on"; + wal_buffers = "16MB"; + max_wal_size = "4GB"; + min_wal_size = "2GB"; + }; + "m5a.16xlarge" = { + max_connections = 1000; + shared_buffers = "64GB"; + work_mem = "83886kB"; + maintenance_work_mem = "2GB"; + max_worker_processes = 64; + max_parallel_maintenance_workers = 4; + max_parallel_workers_per_gather = 4; + max_parallel_workers = 64; + + effective_cache_size = "192GB"; + default_statistics_target = 100; + effective_io_concurrency = 200; + wal_level = "minimal"; archive_mode = "off"; max_wal_senders = 0; diff --git a/postgrest/PGBENCH_RESULTS.md b/postgrest/PGBENCH_RESULTS.md new file mode 100644 index 0000000..f3ae17f --- /dev/null +++ b/postgrest/PGBENCH_RESULTS.md @@ -0,0 +1,211 @@ + + +Running on a m5a.large + + +Run 1: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 45492 +latency average = 66.235 ms +tps = 1509.766608 (including connections establishing) +tps = 1510.166881 (excluding connections establishing) + +Run 2: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 50828 +latency average = 59.331 ms +tps = 1685.471195 (including connections establishing) +tps = 1686.528635 (excluding connections establishing) + +Run 3: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 53997 +latency average = 55.817 ms +tps = 1791.559195 (including connections establishing) +tps = 1792.247708 (excluding connections establishing) + +Running on a m5a.xlarge + + +Run 1: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 92804 +latency average = 32.448 ms +tps = 3081.890468 (including connections establishing) +tps = 3082.342735 (excluding connections establishing) + +Run 2: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 114382 +latency average = 26.316 ms +tps = 3799.897694 (including connections establishing) +tps = 3800.595158 (excluding connections establishing) + +Run 3: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 115656 +latency average = 26.068 ms +tps = 3836.188151 (including connections establishing) +tps = 3836.812069 (excluding connections establishing) + +Running on a m5a.2xlarge + + +Run 1: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 142970 +latency average = 21.036 ms +tps = 4753.743995 (including connections establishing) +tps = 4754.249103 (excluding connections establishing) + +Run 2: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 233928 +latency average = 12.860 ms +tps = 7775.855491 (including connections establishing) +tps = 7776.714704 (excluding connections establishing) + +Run 3: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 235969 +latency average = 12.761 ms +tps = 7836.620631 (including connections establishing) +tps = 7837.585665 (excluding connections establishing) + +Running on a m5a.4xlarge + + +Run 1: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 171468 +latency average = 17.536 ms +tps = 5702.618662 (including connections establishing) +tps = 5703.228351 (excluding connections establishing) + +Run 2: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 351608 +latency average = 8.555 ms +tps = 11688.693390 (including connections establishing) +tps = 11690.031243 (excluding connections establishing) + +Run 3: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 355595 +latency average = 8.454 ms +tps = 11828.751758 (including connections establishing) +tps = 11830.103914 (excluding connections establishing) + +Running on a m5a.8xlarge + + +Run 1: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 176047 +latency average = 17.082 ms +tps = 5854.134383 (including connections establishing) +tps = 5854.820377 (excluding connections establishing) + +Run 2: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 372931 +latency average = 8.062 ms +tps = 12403.998820 (including connections establishing) +tps = 12405.350491 (excluding connections establishing) + +Run 3: + +transaction type: +scaling factor: 50 +query mode: prepared +number of clients: 100 +number of threads: 8 +duration: 30 s +number of transactions actually processed: 377333 +latency average = 7.968 ms +tps = 12550.028498 (including connections establishing) +tps = 12551.435376 (excluding connections establishing) diff --git a/postgrest/deploy.nix b/postgrest/deploy.nix index 71f7f43..b40fcd1 100644 --- a/postgrest/deploy.nix +++ b/postgrest/deploy.nix @@ -11,6 +11,7 @@ let pgrstPool = builtins.getEnv "PGRBENCH_PGRST_POOL"; }; pkgs = import {}; + postgresConfigs = import ../postgresql/postgres.nix; in { network.description = "postgrest benchmark"; @@ -187,12 +188,13 @@ in { client = {nodes, resources, ...}: { environment.systemPackages = [ pkgs.k6 + pkgs.postgresql_12 # only used for getting pgbench, no postgresql is started here ]; deployment = { targetEnv = "ec2"; ec2 = { inherit region accessKeyId; - instanceType = "t3a.xlarge"; + instanceType = "t3a.2xlarge"; associatePublicIpAddress = true; keyPair = resources.ec2KeyPairs.pgrstBenchKeyPair; subnetId = resources.vpcSubnets.pgrstBenchSubnet; @@ -208,12 +210,13 @@ in { ]; networking.hosts = { "${nodes.pgrstServer.config.networking.privateIPv4}" = [ "pgrst" ]; + "${nodes.pg.config.networking.privateIPv4}" = [ "pg" ]; }; }; } // pkgs.lib.optionalAttrs env.withSeparatePg { - pg = {resources, ...}: rec { + pg = {resources, config, ...}: rec { deployment = { targetEnv = "ec2"; ec2 = { @@ -242,55 +245,24 @@ in { ''; enableTCPIP = true; # listen_adresses = * # Tuned according to https://pgtune.leopard.in.ua - settings = - if deployment.ec2.instanceType == "t3a.nano" then { - max_connections = 200; - shared_buffers = "128MB"; - effective_cache_size = "384MB"; - maintenance_work_mem = "32MB"; - checkpoint_completion_target = 0.9; - wal_buffers = "3932kB"; - random_page_cost = 1.1; - effective_io_concurrency = 200; - work_mem = "655kB"; - max_worker_processes = 2; - max_parallel_workers_per_gather = 1; - max_parallel_workers = 2; - max_parallel_maintenance_workers = 1; - } - else if deployment.ec2.instanceType == "t3a.xlarge" then { - max_connections = 200; - shared_buffers = "4GB"; - effective_cache_size = "12GB"; - maintenance_work_mem = "1GB"; - checkpoint_completion_target = 0.9; - wal_buffers = "16MB"; - random_page_cost = 1.1; - effective_io_concurrency = 200; - work_mem = "10485kB"; - max_worker_processes = 4; - max_parallel_workers_per_gather = 2; - max_parallel_workers = 4; - max_parallel_maintenance_workers = 2; - } - else if deployment.ec2.instanceType == "t3a.2xlarge" then { - max_connections = 200; - shared_buffers = "8GB"; - effective_cache_size = "24GB"; - maintenance_work_mem = "2GB"; - checkpoint_completion_target = 0.9; - wal_buffers = "16MB"; - random_page_cost = 1.1; - effective_io_concurrency = 200; - work_mem = "10485kB"; - max_worker_processes = 8; - max_parallel_workers_per_gather = 4; - max_parallel_workers = 8; - max_parallel_maintenance_workers = 4; - } - else {}; + settings = builtins.getAttr config.deployment.ec2.instanceType postgresConfigs; initialScript = ../schemas/chinook/chinook.sql; # Here goes the sample db }; + # initialize the pgbench db by prepending to the default postgresql systemd post start + # https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/databases/postgresql.nix#L342-L353 + systemd.services.postgresql.postStart = + with config.services.postgresql; + pkgs.lib.mkBefore '' + PSQL="psql --port=${toString port}" + while ! $PSQL -d postgres -c "" 2> /dev/null; do + if ! kill -0 "$MAINPID"; then exit 1; fi + sleep 0.1 + done + if test -e "${dataDir}/.first_startup"; then + createdb example + pgbench example -i -s 50 --foreign-keys + fi + ''; networking.firewall.allowedTCPPorts = [ 5432 ]; }; diff --git a/postgrest/shell.nix b/postgrest/shell.nix index b05d69d..bdbfa0d 100644 --- a/postgrest/shell.nix +++ b/postgrest/shell.nix @@ -36,6 +36,41 @@ let nixops ssh -d pgrbench client k6 run --summary-export=$filename.json - < $1 ''; + clientPgBench = + pkgs.writeShellScriptBin "pgrbench-pgbench" + '' + set -euo pipefail + + # uses the full cores of the instance and prepared statements + nixops ssh -d pgrbench client pgbench example -h pg -U postgres -j 8 -T 30 -M prepared $* + ''; + repeat = + pkgs.writeShellScriptBin "repeat" + '' + set -euo pipefail + + number=$1 + shift + + for i in `seq $number`; do + echo -e "\nRun $i:\n" + $@ + done + ''; + pgBenchAllPgInstances = + pkgs.writeShellScriptBin "pgrbench-all-pg-instances" + '' + set -euo pipefail + + for instance in 'm5a.large' 'm5a.xlarge' 'm5a.2xlarge' 'm5a.4xlarge' 'm5a.8xlarge'; do + export PGRBENCH_PG_INSTANCE_TYPE="$instance" + + pgrbench-deploy + + echo -e "\nRunning on a $PGRBENCH_PG_INSTANCE_TYPE\n" + $@ + done + ''; ssh = pkgs.writeShellScriptBin "pgrbench-ssh" '' @@ -63,6 +98,9 @@ pkgs.mkShell { k6 ssh destroy + clientPgBench + repeat + pgBenchAllPgInstances ]; shellHook = '' export NIX_PATH="nixpkgs=${nixpkgs}:." @@ -71,5 +109,8 @@ pkgs.mkShell { export PGRBENCH_WITH_NGINX="true" export PGRBENCH_WITH_UNIX_SOCKET="true" export PGRBENCH_SEPARATE_PG="true" + + export PGRBENCH_PG_INSTANCE_TYPE="t3a.nano" + export PGRBENCH_PGRST_INSTANCE_TYPE="t3a.nano" ''; }