From f7e91539db881176ec604e8c522e76829ae63735 Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Mon, 2 Apr 2018 18:15:14 +0200 Subject: [PATCH 1/9] First version of separating memory and runtime measurements Introduces a new `Measurer` behaviour that can be adopted by `Time` and `Memory` and in the future `Reduction`. Also switches over to memory having its own configuration for time. Like how this ended up in general. This isn't merge ready yet, it's a first implementation already up for review and CI. TODO: - [ ] README/documentation adjustments - [ ] respect `memory_time` during calculation of estimated total runtime - [ ] allow measurer to return `nil` when a a measurement "failed" and handle it appropriately (memory measurement is less than 0 case) --- lib/benchee/benchmark/measurer.ex | 10 ++ .../measurer/memory.ex} | 12 +- lib/benchee/benchmark/measurer/time.ex | 11 ++ lib/benchee/benchmark/runner.ex | 111 +++++++++--------- lib/benchee/configuration.ex | 17 ++- lib/benchee/formatters/console.ex | 16 +-- lib/benchee/formatters/console/memory.ex | 16 ++- lib/benchee/statistics.ex | 8 +- samples/macro_benchmark.exs | 2 +- samples/measure_memory.exs | 2 +- .../measurer/memory.ex} | 11 +- test/benchee/benchmark/runner_test.exs | 8 +- .../formatters/console/memory_test.exs | 36 ++++-- test/benchee/formatters/console_test.exs | 20 ++-- test/benchee/statistics_test.exs | 10 +- test/benchee_test.exs | 2 +- 16 files changed, 165 insertions(+), 127 deletions(-) create mode 100644 lib/benchee/benchmark/measurer.ex rename lib/benchee/{memory_measure.ex => benchmark/measurer/memory.ex} (87%) create mode 100644 lib/benchee/benchmark/measurer/time.ex rename test/benchee/{memory_measure_test.exs => benchmark/measurer/memory.ex} (90%) diff --git a/lib/benchee/benchmark/measurer.ex b/lib/benchee/benchmark/measurer.ex new file mode 100644 index 00000000..1b66077b --- /dev/null +++ b/lib/benchee/benchmark/measurer.ex @@ -0,0 +1,10 @@ +defmodule Benchee.Benchmark.Measurer do + @moduledoc """ + A thing that measures something about a function execution - like time or + memory needed. + + Callback is `measure` which takes an anonymous 0 arity function to measure + and returns the measurement and the return value of the function in a tuple. + """ + @callback measure((() -> any)) :: {non_neg_integer, any} +end diff --git a/lib/benchee/memory_measure.ex b/lib/benchee/benchmark/measurer/memory.ex similarity index 87% rename from lib/benchee/memory_measure.ex rename to lib/benchee/benchmark/measurer/memory.ex index 308834a0..657a1314 100644 --- a/lib/benchee/memory_measure.ex +++ b/lib/benchee/benchmark/measurer/memory.ex @@ -1,13 +1,11 @@ -defmodule Benchee.MemoryMeasure do +defmodule Benchee.Benchmark.Measurer.Memory do @moduledoc """ - This exposes two functions, apply/1 and apply/3. Both execute a given function - and report on the memory used by monitoring the garbage collection process for - a single process. + Measure memory consumption of a function. """ - import Kernel, except: [apply: 3, apply: 2] - @spec apply(fun) :: no_return() | tuple() - def apply(fun) do + @behaviour Benchee.Benchmark.Measurer + + def measure(fun) do ref = make_ref() Process.flag(:trap_exit, true) start_runner(fun, ref) diff --git a/lib/benchee/benchmark/measurer/time.ex b/lib/benchee/benchmark/measurer/time.ex new file mode 100644 index 00000000..f05a4b2d --- /dev/null +++ b/lib/benchee/benchmark/measurer/time.ex @@ -0,0 +1,11 @@ +defmodule Benchee.Benchmark.Measurer.Time do + @moduledoc """ + Measure the time consumed by a executing function. + """ + + @behaviour Benchee.Benchmark.Measurer + + def measure(function) do + :timer.tc(function) + end +end diff --git a/lib/benchee/benchmark/runner.ex b/lib/benchee/benchmark/runner.ex index c1ec9af6..12e53da6 100644 --- a/lib/benchee/benchmark/runner.ex +++ b/lib/benchee/benchmark/runner.ex @@ -4,10 +4,11 @@ defmodule Benchee.Benchmark.Runner do run time and memory usage to each scenario. """ - alias Benchee.{Benchmark, MemoryMeasure} + alias Benchee.Benchmark alias Benchee.Benchmark.{Scenario, ScenarioContext} alias Benchee.Utility.{RepeatN, Parallel} alias Benchee.Configuration + alias Benchee.Benchmark.Measurer @doc """ Executes the benchmarks defined before by first running the defined functions @@ -34,7 +35,7 @@ defmodule Benchee.Benchmark.Runner do defp pre_check(scenario, scenario_context = %ScenarioContext{config: %{pre_check: true}}) do scenario_input = run_before_scenario(scenario, scenario_context) scenario_context = %ScenarioContext{scenario_context | scenario_input: scenario_input} - _ = measure_iteration(scenario, scenario_context) + _ = measure_iteration(scenario, scenario_context, Measurer.Time) _ = run_after_scenario(scenario, scenario_context) nil end @@ -58,7 +59,6 @@ defmodule Benchee.Benchmark.Runner do defp measure_scenario_parallel(config, scenario, scenario_context) do 1..config.parallel |> Parallel.map(fn _ -> measure_scenario(scenario, scenario_context) end) - |> List.flatten() end defp add_measurements_to_scenario(measurements, scenario) do @@ -72,9 +72,11 @@ defmodule Benchee.Benchmark.Runner do scenario_input = run_before_scenario(scenario, scenario_context) scenario_context = %ScenarioContext{scenario_context | scenario_input: scenario_input} _ = run_warmup(scenario, scenario_context) - measurements = run_benchmark(scenario, scenario_context) + runtimes = run_runtime_benchmark(scenario, scenario_context) + memory_usages = run_memory_benchmark(scenario, scenario_context) run_after_scenario(scenario, scenario_context) - measurements + + {runtimes, memory_usages} end defp run_before_scenario( @@ -103,7 +105,7 @@ defmodule Benchee.Benchmark.Runner do measure_runtimes(scenario, scenario_context, warmup, false) end - defp run_benchmark( + defp run_runtime_benchmark( scenario, scenario_context = %ScenarioContext{ config: %Configuration{ @@ -115,6 +117,29 @@ defmodule Benchee.Benchmark.Runner do measure_runtimes(scenario, scenario_context, run_time, fast_warning) end + defp run_memory_benchmark(_, %ScenarioContext{config: %{memory_time: 0}}) do + [] + end + defp run_memory_benchmark( + scenario, + scenario_context = %ScenarioContext{ + config: %Configuration{ + memory_time: memory_time + } + } + ) do + + end_time = current_time() + memory_time + + new_context = %ScenarioContext{ + scenario_context + | current_time: current_time(), + end_time: end_time + } + + do_benchmark(scenario, new_context, Measurer.Memory, []) + end + defp run_after_scenario( %{ after_scenario: local_after_scenario @@ -135,7 +160,7 @@ defmodule Benchee.Benchmark.Runner do end_time = current_time() + run_time :erlang.garbage_collect() - {num_iterations, initial_run_time, initial_memory_usage} = + {num_iterations, initial_run_time} = determine_n_times(scenario, scenario_context, fast_warning) new_context = %ScenarioContext{ @@ -145,10 +170,7 @@ defmodule Benchee.Benchmark.Runner do num_iterations: num_iterations } - do_benchmark( - scenario, - new_context, - {[initial_run_time], updated_memory_usages(initial_memory_usage, [])}) + do_benchmark(scenario, new_context, Measurer.Time, [initial_run_time]) end defp current_time, do: :erlang.system_time(:micro_seconds) @@ -166,12 +188,10 @@ defmodule Benchee.Benchmark.Runner do }, fast_warning ) do - {run_time, memory_usage} = measure_iteration(scenario, scenario_context) + run_time = measure_iteration(scenario, scenario_context, Measurer.Time) if run_time >= @minimum_execution_time do - {adjusted_run_time, adjusted_memory_usage} = - adjust_for_iterations(run_time, memory_usage, num_iterations) - {num_iterations, adjusted_run_time, adjusted_memory_usage} + {num_iterations, adjust_for_iterations(run_time, num_iterations)} else if fast_warning, do: printer.fast_warning() @@ -195,22 +215,19 @@ defmodule Benchee.Benchmark.Runner do current_time: current_time, end_time: end_time }, - {run_times, memory_usages} + _measurer, + measurements ) when current_time > end_time do # restore correct order - important for graphing - {Enum.reverse(run_times), Enum.reverse(memory_usages)} + Enum.reverse(measurements) end - defp do_benchmark(scenario, scenario_context, {run_times, memory_usages}) do - {run_time, memory_usage} = iteration_measurements(scenario, scenario_context) + defp do_benchmark(scenario, scenario_context, measurer, measurements) do + measurement = iteration_measurements(scenario, scenario_context, measurer) updated_context = %ScenarioContext{scenario_context | current_time: current_time()} - do_benchmark( - scenario, - updated_context, - {[run_time | run_times], updated_memory_usages(memory_usage, memory_usages)} - ) + do_benchmark(scenario, updated_context, measurer, [measurement | measurements]) end # We return nil if no memory measurement is performed so keep it empty @@ -222,43 +239,39 @@ defmodule Benchee.Benchmark.Runner do scenario, scenario_context = %ScenarioContext{ num_iterations: num_iterations - } + }, + measurer ) do - {run_time, memory_usage} = measure_iteration(scenario, scenario_context) - - adjust_for_iterations(run_time, memory_usage, num_iterations) - end + measurement = measure_iteration(scenario, scenario_context, measurer) - defp adjust_for_iterations(run_time, nil, num_iterations) do - {run_time / num_iterations, nil} + adjust_for_iterations(measurement, num_iterations) end - defp adjust_for_iterations(run_time, memory_usage, num_iterations) do - {run_time / num_iterations, memory_usage / num_iterations} - end + defp adjust_for_iterations(measurement, 1), do: measurement + defp adjust_for_iterations(measurement, num_iterations), do: measurement / num_iterations defp measure_iteration( scenario = %Scenario{function: function}, scenario_context = %ScenarioContext{ num_iterations: 1, - config: %{measure_memory: measure_memory} - } + }, + measurer ) do new_input = run_before_each(scenario, scenario_context) function = main_function(function, new_input) - {microseconds, memory_usage, return_value} = measure_time_and_memory(function, measure_memory) + {measurement, return_value} = measurer.measure(function) run_after_each(return_value, scenario, scenario_context) - {microseconds, memory_usage} + measurement end defp measure_iteration( scenario, scenario_context = %ScenarioContext{ - num_iterations: iterations, - config: %{measure_memory: measure_memory} - } + num_iterations: iterations + }, + measurer ) when iterations > 1 do # When we have more than one iteration, then the repetition and calling @@ -266,21 +279,9 @@ defmodule Benchee.Benchmark.Runner do # `build_benchmarking_function/2` function = build_benchmarking_function(scenario, scenario_context) - {microseconds, memory_usage, _return_value} = - measure_time_and_memory(function, measure_memory) - - {microseconds, memory_usage} - end - - defp measure_time_and_memory(function, true) do - {microseconds, return_value} = :timer.tc(function) - {memory_usage, _} = MemoryMeasure.apply(function) - {microseconds, memory_usage, return_value} - end + {measurement, _return_value} = measurer.measure(function) - defp measure_time_and_memory(function, false) do - {microseconds, return_value} = :timer.tc(function) - {microseconds, nil, return_value} + measurement end @no_input Benchmark.no_input() diff --git a/lib/benchee/configuration.ex b/lib/benchee/configuration.ex index 706e5090..dbcb2f5c 100644 --- a/lib/benchee/configuration.ex +++ b/lib/benchee/configuration.ex @@ -13,9 +13,9 @@ defmodule Benchee.Configuration do } defstruct parallel: 1, - measure_memory: false, time: 5, warmup: 2, + memory_time: 0, pre_check: false, formatters: [Console], print: %{ @@ -44,9 +44,9 @@ defmodule Benchee.Configuration do @type t :: %__MODULE__{ parallel: integer, - measure_memory: boolean, time: number, warmup: number, + memory_time: number, pre_check: boolean, formatters: [(Suite.t() -> Suite.t())], print: map, @@ -63,7 +63,7 @@ defmodule Benchee.Configuration do } @type user_configuration :: map | keyword - @time_keys [:time, :warmup] + @time_keys [:time, :warmup, :memory_time] @doc """ Returns the initial benchmark configuration for Benchee, composed of defaults @@ -327,10 +327,15 @@ defmodule Benchee.Configuration do end) end - defp update_measure_memory(config = %{measure_memory: measure_memory}) do + defp update_measure_memory(config = %{memory_time: memory_time}) do otp_version = List.to_integer(:erlang.system_info(:otp_release)) - if measure_memory and otp_version <= 18, do: print_memory_measure_warning() - Map.put(config, :measure_memory, measure_memory and otp_version > 18) + + if (memory_time > 0) and otp_version <= 18 do + print_memory_measure_warning() + Map.put(config, :memory_time, 0) + else + config + end end defp print_memory_measure_warning do diff --git a/lib/benchee/formatters/console.ex b/lib/benchee/formatters/console.ex index 1b34be9b..8bddb27d 100644 --- a/lib/benchee/formatters/console.ex +++ b/lib/benchee/formatters/console.ex @@ -40,7 +40,6 @@ defmodule Benchee.Formatters.Console do ...> console: %{comparison: false, extended_statistics: false} ...> }, ...> unit_scaling: :best, - ...> measure_memory: false ...> } ...> } iex> Benchee.Formatters.Console.format(suite) @@ -53,8 +52,6 @@ defmodule Benchee.Formatters.Console do """ @spec format(Suite.t()) :: [any] def format(%Suite{scenarios: scenarios, configuration: config}) do - %{measure_memory: measure_memory} = config - config = console_configuration(config) scenarios @@ -62,7 +59,7 @@ defmodule Benchee.Formatters.Console do |> Enum.map(fn {input, scenarios} -> scenarios |> Statistics.sort() - |> generate_output(config, input, measure_memory) + |> generate_output(config, input) end) end @@ -92,9 +89,7 @@ defmodule Benchee.Formatters.Console do ) end - defp generate_output(scenarios, config, input, measure_memory) - - defp generate_output(scenarios, config, input, true) do + defp generate_output(scenarios, config, input) do [ input_header(input) | RunTime.format_scenarios(scenarios, config) ++ @@ -102,13 +97,6 @@ defmodule Benchee.Formatters.Console do ] end - defp generate_output(scenarios, config, input, false) do - [ - input_header(input) | - RunTime.format_scenarios(scenarios, config) - ] - end - @no_input_marker Benchee.Benchmark.no_input() defp input_header(input) when input == @no_input_marker, do: "" defp input_header(input), do: "\n##### With input #{input} #####" diff --git a/lib/benchee/formatters/console/memory.ex b/lib/benchee/formatters/console/memory.ex index 33a44892..d9a1c259 100644 --- a/lib/benchee/formatters/console/memory.ex +++ b/lib/benchee/formatters/console/memory.ex @@ -35,7 +35,21 @@ defmodule Benchee.Formatters.Console.Memory do """ @spec format_scenarios([Scenario.t()], map) :: [String.t(), ...] def format_scenarios(scenarios, config) do - %{unit_scaling: scaling_strategy} = config + if memory_measurements_present?(scenarios) do + render(scenarios, config) + else + [] + end + end + + defp memory_measurements_present?(scenarios) do + Enum.any?(scenarios, fn scenario -> + scenario.memory_usage_statistics.sample_size > 0 + end) + end + + defp render(scenarios, config) do + scaling_strategy = config.unit_scaling units = Conversion.units(scenarios, scaling_strategy) label_width = Helpers.label_width(scenarios) hide_statistics = all_have_deviation_of_0?(scenarios) diff --git a/lib/benchee/statistics.ex b/lib/benchee/statistics.ex index 862e2110..c4a2d2f1 100644 --- a/lib/benchee/statistics.ex +++ b/lib/benchee/statistics.ex @@ -18,7 +18,7 @@ defmodule Benchee.Statistics do :mode, :minimum, :maximum, - :sample_size + sample_size: 0 ] @type mode :: [number] | number | nil @@ -107,10 +107,9 @@ defmodule Benchee.Statistics do ...> input: "Input" ...> } ...> ] - iex> suite = %Benchee.Suite{scenarios: scenarios, configuration: %{measure_memory: true}} + iex> suite = %Benchee.Suite{scenarios: scenarios} iex> Benchee.Statistics.statistics(suite) %Benchee.Suite{ - configuration: %{measure_memory: true}, scenarios: [ %Benchee.Benchmark.Scenario{ job_name: "My Job", @@ -270,11 +269,10 @@ defmodule Benchee.Statistics do ...> input: "Input" ...> } ...> ] - iex> %Benchee.Suite{scenarios: scenarios, configuration: %{measure_memory: true}} + iex> %Benchee.Suite{scenarios: scenarios} ...> |> Benchee.Statistics.statistics ...> |> Benchee.Statistics.add_percentiles([25, 75]) %Benchee.Suite{ - configuration: %{measure_memory: true}, scenarios: [ %Benchee.Benchmark.Scenario{ job_name: "My Job", diff --git a/samples/macro_benchmark.exs b/samples/macro_benchmark.exs index 773e26c4..b7869d62 100644 --- a/samples/macro_benchmark.exs +++ b/samples/macro_benchmark.exs @@ -7,7 +7,7 @@ end Benchee.run(%{ "35 fibonacci numbers" => fn -> Fib.fib(35) end, "43 fibonacci numbers" => fn -> Fib.fib(43) end -}, time: 10, warmup: 0, measure_memory: true) +}, time: 10, warmup: 0, memory_time: 10) # Operating System: macOS # CPU Information: Intel(R) Core(TM) i5-4260U CPU @ 1.40GHz diff --git a/samples/measure_memory.exs b/samples/measure_memory.exs index 5ff43b04..1304f861 100644 --- a/samples/measure_memory.exs +++ b/samples/measure_memory.exs @@ -7,7 +7,7 @@ Benchee.run(%{ inputs: %{ "Small" => Enum.to_list(1..1000), "Bigger" => Enum.to_list(1..100_000) -}, measure_memory: true) +}, memory_time: 2) #Operating System: macOS #CPU Information: Intel(R) Core(TM) i5-4260U CPU @ 1.40GHz diff --git a/test/benchee/memory_measure_test.exs b/test/benchee/benchmark/measurer/memory.ex similarity index 90% rename from test/benchee/memory_measure_test.exs rename to test/benchee/benchmark/measurer/memory.ex index 0ad72635..5876ec7b 100644 --- a/test/benchee/memory_measure_test.exs +++ b/test/benchee/benchmark/measurer/memory.ex @@ -3,14 +3,15 @@ defmodule Benchee.MemoryMeasureTest do # there aren't any leaked processes if functions fail while we're tracing # them. use ExUnit.Case - @moduletag :memory_measure - alias Benchee.MemoryMeasure + alias Benchee.Benchmark.Measurer.Memory import ExUnit.CaptureIO - describe "apply/1" do + @moduletag :memory_measure + + describe "measure/1" do test "returns the result of the function and the memory used (in bytes)" do fun_to_run = fn -> Enum.to_list(1..10) end - assert {memory_used, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} = MemoryMeasure.apply(fun_to_run) + assert {memory_used, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} = Memory.measure(fun_to_run) # We need to have some wiggle room here because memory used varies from # system to system. It's consistent in an environment, but changes # between environments. @@ -27,7 +28,7 @@ defmodule Benchee.MemoryMeasureTest do # a separate process, so we need to wait for that to emit so we can # capture it. capture_io(fn -> - MemoryMeasure.apply(fn -> exit(:kill) end) + Memory.measure(fn -> exit(:kill) end) Process.sleep(1) end) diff --git a/test/benchee/benchmark/runner_test.exs b/test/benchee/benchmark/runner_test.exs index aa9344fc..2e2cf00f 100644 --- a/test/benchee/benchmark/runner_test.exs +++ b/test/benchee/benchmark/runner_test.exs @@ -100,7 +100,7 @@ defmodule Benchee.Benchmark.RunnerTest do @tag :memory_measure test "measures the memory usage of a scenario" do suite = - test_suite(%Suite{configuration: %{time: 60_000, warmup: 10_000, measure_memory: true}}) + test_suite(%Suite{configuration: %{time: 60_000, warmup: 10_000, memory_time: 10_000}}) new_suite = suite @@ -117,7 +117,7 @@ defmodule Benchee.Benchmark.RunnerTest do @tag :memory_measure test "records memory when the function only runs once" do suite = - test_suite(%Suite{configuration: %{time: 0.001, warmup: 0, measure_memory: true}}) + test_suite(%Suite{configuration: %{time: 0.001, warmup: 0, memory_time: 0.001}}) new_suite = suite @@ -134,7 +134,7 @@ defmodule Benchee.Benchmark.RunnerTest do @tag :memory_measure test "correctly scales down memory usage of very fast functions" do suite = - test_suite(%Suite{configuration: %{time: 1_000, warmup: 1, measure_memory: true}}) + test_suite(%Suite{configuration: %{time: 1_000, warmup: 1, memory_time: 1_000}}) new_suite = suite @@ -146,7 +146,7 @@ defmodule Benchee.Benchmark.RunnerTest do assert [memory_consumption] = Enum.uniq(memory_usages) assert memory_consumption >= 1 # depending on the number iterations determined, there can be spikes/changes - assert memory_consumption <= 100 + assert memory_consumption <= 1_000 end test "very fast functions print a warning" do diff --git a/test/benchee/formatters/console/memory_test.exs b/test/benchee/formatters/console/memory_test.exs index 1edd7ec7..4c6a7d5a 100644 --- a/test/benchee/formatters/console/memory_test.exs +++ b/test/benchee/formatters/console/memory_test.exs @@ -20,7 +20,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} }, @@ -31,7 +32,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 2_500.0, std_dev_ratio: 0.1, median: 375.0, - percentiles: %{99 => 400.1} + percentiles: %{99 => 400.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } @@ -56,7 +58,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 2_500.0, std_dev_ratio: 0.1, median: 375.0, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } @@ -82,7 +85,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} }, @@ -93,7 +97,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } @@ -116,7 +121,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} }, @@ -127,7 +133,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } @@ -158,7 +165,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} }, @@ -169,7 +177,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } @@ -193,7 +202,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } @@ -212,7 +222,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do std_dev: 0.0, std_dev_ratio: 0.0, median: 100.0, - percentiles: %{99 => 100.0} + percentiles: %{99 => 100.0}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} }, @@ -223,7 +234,8 @@ defmodule Benchee.Formatters.Console.MemoryTest do std_dev: 0.0, std_dev_ratio: 0.0, median: 200.0, - percentiles: %{99 => 200.0} + percentiles: %{99 => 200.0}, + sample_size: 10 }, run_time_statistics: %Statistics{average: 100.0, ips: 1_000.0} } diff --git a/test/benchee/formatters/console_test.exs b/test/benchee/formatters/console_test.exs index fb68d716..f2bf5a58 100644 --- a/test/benchee/formatters/console_test.exs +++ b/test/benchee/formatters/console_test.exs @@ -29,7 +29,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 195.5, percentiles: %{99 => 400.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "First", @@ -42,7 +42,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 90.0, percentiles: %{99 => 300.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} } ] @@ -77,7 +77,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 195.5, percentiles: %{99 => 400.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "Job", @@ -90,7 +90,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 395.0, percentiles: %{99 => 500.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} } ] @@ -120,7 +120,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 195.5, percentiles: %{99 => 300.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "Other Job", @@ -133,7 +133,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 98.0, percentiles: %{99 => 200.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "Job", @@ -146,7 +146,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 395.0, percentiles: %{99 => 500.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "Other Job", @@ -159,7 +159,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 225.5, percentiles: %{99 => 300.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} } ] @@ -193,7 +193,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 195.5, percentiles: %{99 => 300.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "job (improved)", @@ -206,7 +206,7 @@ defmodule Benchee.Formatters.ConsoleTest do median: 90.0, percentiles: %{99 => 200.1} }, - memory_usage_statistics: %Statistics{average: 200.0} + memory_usage_statistics: %Statistics{} } ] diff --git a/test/benchee/statistics_test.exs b/test/benchee/statistics_test.exs index 0371350d..6041c4d3 100644 --- a/test/benchee/statistics_test.exs +++ b/test/benchee/statistics_test.exs @@ -12,7 +12,7 @@ defmodule Benchee.StatistcsTest do %Scenario{input: "Input", input_name: "Input", job_name: "Job 2", run_times: @sample_2, memory_usages: @sample_2} ] - suite = %Suite{scenarios: scenarios, configuration: %{measure_memory: true}} + suite = %Suite{scenarios: scenarios} new_suite = Statistics.statistics(suite) stats_1 = stats_for(new_suite, "Job 1", "Input") @@ -28,7 +28,7 @@ defmodule Benchee.StatistcsTest do %Scenario{input: "Input 2", input_name: "Input 2", job_name: "Job", run_times: @sample_2, memory_usages: @sample_2} ] - suite = %Suite{scenarios: scenarios, configuration: %{measure_memory: true}} + suite = %Suite{scenarios: scenarios} new_suite = Statistics.statistics(suite) stats_1 = stats_for(new_suite, "Job", "Input 1") @@ -41,7 +41,7 @@ defmodule Benchee.StatistcsTest do @mode_sample [55, 40, 67, 55, 44, 40, 10, 8, 55, 90, 67] test "mode is calculated correctly" do scenarios = [%Scenario{run_times: @mode_sample, memory_usages: @mode_sample}] - suite = Statistics.statistics(%Suite{scenarios: scenarios, configuration: %{measure_memory: false}}) + suite = Statistics.statistics(%Suite{scenarios: scenarios}) [%Scenario{run_time_statistics: stats}] = suite.scenarios assert stats.mode == 55 @@ -50,7 +50,7 @@ defmodule Benchee.StatistcsTest do @standard_deviation_sample [600, 470, 170, 430, 300] test "statistical standard deviation is calculated correctly" do scenarios = [%Scenario{run_times: @standard_deviation_sample, memory_usages: @standard_deviation_sample}] - suite = Statistics.statistics(%Suite{scenarios: scenarios, configuration: %{measure_memory: false}}) + suite = Statistics.statistics(%Suite{scenarios: scenarios}) [%Scenario{run_time_statistics: stats}] = suite.scenarios assert_in_delta stats.std_dev, 164.7, 0.1 @@ -60,7 +60,7 @@ defmodule Benchee.StatistcsTest do test "preserves all other keys in the map handed to it" do suite = %Suite{ scenarios: [], - configuration: %{formatters: [], measure_memory: false} + configuration: %{formatters: []} } assert %Suite{configuration: %{formatters: []}} = Statistics.statistics(suite) diff --git a/test/benchee_test.exs b/test/benchee_test.exs index 78e1622f..7b567293 100644 --- a/test/benchee_test.exs +++ b/test/benchee_test.exs @@ -484,7 +484,7 @@ defmodule BencheeTest do test "measures memory usage when instructed to do so" do output = capture_io fn -> Benchee.run(%{"To List" => fn -> Enum.to_list(1..100) end}, - time: 0.01, warmup: 0.005, measure_memory: true) + time: 0.01, warmup: 0.005, memory_time: 0.001) end assert Regex.match?(~r/Memory usage statistics:/, output) From b95b1981a97a3866e15a3da49be459fac89cffbf Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Mon, 2 Apr 2018 18:28:03 +0200 Subject: [PATCH 2/9] temporarily comment out function until I need it again (see TODO) --- lib/benchee/benchmark/runner.ex | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/benchee/benchmark/runner.ex b/lib/benchee/benchmark/runner.ex index 12e53da6..c7586055 100644 --- a/lib/benchee/benchmark/runner.ex +++ b/lib/benchee/benchmark/runner.ex @@ -230,10 +230,11 @@ defmodule Benchee.Benchmark.Runner do do_benchmark(scenario, updated_context, measurer, [measurement | measurements]) end + # Hush now until I need you again # We return nil if no memory measurement is performed so keep it empty - defp updated_memory_usages(nil, memory_usages), do: memory_usages - defp updated_memory_usages(memory_usage, memory_usages) when memory_usage < 0, do: memory_usages - defp updated_memory_usages(memory_usage, memory_usages), do: [memory_usage | memory_usages] + # defp updated_memory_usages(nil, memory_usages), do: memory_usages + # defp updated_memory_usages(memory_usage, memory_usages) when memory_usage < 0, do: memory_usages + # defp updated_memory_usages(memory_usage, memory_usages), do: [memory_usage | memory_usages] defp iteration_measurements( scenario, From eec8a42106cdebad48354bca8060719e3865f347 Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Mon, 2 Apr 2018 19:20:22 +0200 Subject: [PATCH 3/9] Drop bad memory measurements and fix test --- lib/benchee/benchmark/measurer.ex | 11 ++++++++--- lib/benchee/benchmark/measurer/memory.ex | 7 ++++++- lib/benchee/benchmark/runner.ex | 15 +++++++++------ test/benchee/benchmark/runner_test.exs | 2 +- 4 files changed, 24 insertions(+), 11 deletions(-) diff --git a/lib/benchee/benchmark/measurer.ex b/lib/benchee/benchmark/measurer.ex index 1b66077b..ae3ab929 100644 --- a/lib/benchee/benchmark/measurer.ex +++ b/lib/benchee/benchmark/measurer.ex @@ -2,9 +2,14 @@ defmodule Benchee.Benchmark.Measurer do @moduledoc """ A thing that measures something about a function execution - like time or memory needed. + """ + + @doc """ + Takes an anonymous 0 arity function to measure and returns the measurement + and the return value of the function in a tuple. - Callback is `measure` which takes an anonymous 0 arity function to measure - and returns the measurement and the return value of the function in a tuple. + The returned measurement may be `nil` if the measurement failed for some + reason - it will then be ignored and not counted. """ - @callback measure((() -> any)) :: {non_neg_integer, any} + @callback measure((() -> any)) :: {non_neg_integer | nil, any} end diff --git a/lib/benchee/benchmark/measurer/memory.ex b/lib/benchee/benchmark/measurer/memory.ex index 657a1314..7854aa05 100644 --- a/lib/benchee/benchmark/measurer/memory.ex +++ b/lib/benchee/benchmark/measurer/memory.ex @@ -1,6 +1,8 @@ defmodule Benchee.Benchmark.Measurer.Memory do @moduledoc """ Measure memory consumption of a function. + + Returns `{nil, return_value}` in case the memory measurement went bad. """ @behaviour Benchee.Benchmark.Measurer @@ -11,7 +13,7 @@ defmodule Benchee.Benchmark.Measurer.Memory do start_runner(fun, ref) receive do - {^ref, memory_usage_info} -> memory_usage_info + {^ref, memory_usage_info} -> return_memory(memory_usage_info) :shutdown -> nil end end @@ -33,6 +35,9 @@ defmodule Benchee.Benchmark.Measurer.Memory do end) end + defp return_memory({memory_usage, result}) when memory_usage < 0, do: {nil, result} + defp return_memory({memory_usage, result}), do: {memory_usage, result} + defp measure_memory(fun, tracer) do word_size = :erlang.system_info(:wordsize) {:garbage_collection_info, info_before} = Process.info(self(), :garbage_collection_info) diff --git a/lib/benchee/benchmark/runner.ex b/lib/benchee/benchmark/runner.ex index c7586055..c36d0f0f 100644 --- a/lib/benchee/benchmark/runner.ex +++ b/lib/benchee/benchmark/runner.ex @@ -227,14 +227,17 @@ defmodule Benchee.Benchmark.Runner do measurement = iteration_measurements(scenario, scenario_context, measurer) updated_context = %ScenarioContext{scenario_context | current_time: current_time()} - do_benchmark(scenario, updated_context, measurer, [measurement | measurements]) + do_benchmark( + scenario, + updated_context, + measurer, + updated_measurements(measurement, measurements) + ) end - # Hush now until I need you again - # We return nil if no memory measurement is performed so keep it empty - # defp updated_memory_usages(nil, memory_usages), do: memory_usages - # defp updated_memory_usages(memory_usage, memory_usages) when memory_usage < 0, do: memory_usages - # defp updated_memory_usages(memory_usage, memory_usages), do: [memory_usage | memory_usages] + # We return `nil` if memory measurement failed so keep it empty + defp updated_measurements(nil, measurements), do: measurements + defp updated_measurements(measurement, measurements), do: [measurement | measurements] defp iteration_measurements( scenario, diff --git a/test/benchee/benchmark/runner_test.exs b/test/benchee/benchmark/runner_test.exs index 2e2cf00f..26fdb6ea 100644 --- a/test/benchee/benchmark/runner_test.exs +++ b/test/benchee/benchmark/runner_test.exs @@ -117,7 +117,7 @@ defmodule Benchee.Benchmark.RunnerTest do @tag :memory_measure test "records memory when the function only runs once" do suite = - test_suite(%Suite{configuration: %{time: 0.001, warmup: 0, memory_time: 0.001}}) + test_suite(%Suite{configuration: %{time: 0, warmup: 0, memory_time: 1_000}}) new_suite = suite From fc2974d59a0ee4154a24d187c92286b87c6d8f07 Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Wed, 4 Apr 2018 20:08:31 +0200 Subject: [PATCH 4/9] Feedback from Eric! :) --- lib/benchee/benchmark/measurer.ex | 4 ++-- lib/benchee/benchmark/measurer/memory.ex | 4 ++-- lib/benchee/benchmark/measurer/time.ex | 4 ++-- lib/benchee/benchmark/runner.ex | 11 +++++------ test/benchee/benchmark/measurer/memory.ex | 4 ++-- 5 files changed, 13 insertions(+), 14 deletions(-) diff --git a/lib/benchee/benchmark/measurer.ex b/lib/benchee/benchmark/measurer.ex index ae3ab929..464514f4 100644 --- a/lib/benchee/benchmark/measurer.ex +++ b/lib/benchee/benchmark/measurer.ex @@ -1,4 +1,4 @@ -defmodule Benchee.Benchmark.Measurer do +defmodule Benchee.Benchmark.Measure do @moduledoc """ A thing that measures something about a function execution - like time or memory needed. @@ -8,7 +8,7 @@ defmodule Benchee.Benchmark.Measurer do Takes an anonymous 0 arity function to measure and returns the measurement and the return value of the function in a tuple. - The returned measurement may be `nil` if the measurement failed for some + The returned measurement may be `nil` if the measurement failed for some reason - it will then be ignored and not counted. """ @callback measure((() -> any)) :: {non_neg_integer | nil, any} diff --git a/lib/benchee/benchmark/measurer/memory.ex b/lib/benchee/benchmark/measurer/memory.ex index 7854aa05..3d6beb31 100644 --- a/lib/benchee/benchmark/measurer/memory.ex +++ b/lib/benchee/benchmark/measurer/memory.ex @@ -1,11 +1,11 @@ -defmodule Benchee.Benchmark.Measurer.Memory do +defmodule Benchee.Benchmark.Measure.Memory do @moduledoc """ Measure memory consumption of a function. Returns `{nil, return_value}` in case the memory measurement went bad. """ - @behaviour Benchee.Benchmark.Measurer + @behaviour Benchee.Benchmark.Measure def measure(fun) do ref = make_ref() diff --git a/lib/benchee/benchmark/measurer/time.ex b/lib/benchee/benchmark/measurer/time.ex index f05a4b2d..2623caf8 100644 --- a/lib/benchee/benchmark/measurer/time.ex +++ b/lib/benchee/benchmark/measurer/time.ex @@ -1,9 +1,9 @@ -defmodule Benchee.Benchmark.Measurer.Time do +defmodule Benchee.Benchmark.Measure.Time do @moduledoc """ Measure the time consumed by a executing function. """ - @behaviour Benchee.Benchmark.Measurer + @behaviour Benchee.Benchmark.Measure def measure(function) do :timer.tc(function) diff --git a/lib/benchee/benchmark/runner.ex b/lib/benchee/benchmark/runner.ex index c36d0f0f..3b1c0ef8 100644 --- a/lib/benchee/benchmark/runner.ex +++ b/lib/benchee/benchmark/runner.ex @@ -5,10 +5,9 @@ defmodule Benchee.Benchmark.Runner do """ alias Benchee.Benchmark - alias Benchee.Benchmark.{Scenario, ScenarioContext} + alias Benchee.Benchmark.{Scenario, ScenarioContext, Measure} alias Benchee.Utility.{RepeatN, Parallel} alias Benchee.Configuration - alias Benchee.Benchmark.Measurer @doc """ Executes the benchmarks defined before by first running the defined functions @@ -35,7 +34,7 @@ defmodule Benchee.Benchmark.Runner do defp pre_check(scenario, scenario_context = %ScenarioContext{config: %{pre_check: true}}) do scenario_input = run_before_scenario(scenario, scenario_context) scenario_context = %ScenarioContext{scenario_context | scenario_input: scenario_input} - _ = measure_iteration(scenario, scenario_context, Measurer.Time) + _ = measure_iteration(scenario, scenario_context, Measure.Time) _ = run_after_scenario(scenario, scenario_context) nil end @@ -137,7 +136,7 @@ defmodule Benchee.Benchmark.Runner do end_time: end_time } - do_benchmark(scenario, new_context, Measurer.Memory, []) + do_benchmark(scenario, new_context, Measure.Memory, []) end defp run_after_scenario( @@ -170,7 +169,7 @@ defmodule Benchee.Benchmark.Runner do num_iterations: num_iterations } - do_benchmark(scenario, new_context, Measurer.Time, [initial_run_time]) + do_benchmark(scenario, new_context, Measure.Time, [initial_run_time]) end defp current_time, do: :erlang.system_time(:micro_seconds) @@ -188,7 +187,7 @@ defmodule Benchee.Benchmark.Runner do }, fast_warning ) do - run_time = measure_iteration(scenario, scenario_context, Measurer.Time) + run_time = measure_iteration(scenario, scenario_context, Measure.Time) if run_time >= @minimum_execution_time do {num_iterations, adjust_for_iterations(run_time, num_iterations)} diff --git a/test/benchee/benchmark/measurer/memory.ex b/test/benchee/benchmark/measurer/memory.ex index 5876ec7b..3a0965cc 100644 --- a/test/benchee/benchmark/measurer/memory.ex +++ b/test/benchee/benchmark/measurer/memory.ex @@ -3,11 +3,11 @@ defmodule Benchee.MemoryMeasureTest do # there aren't any leaked processes if functions fail while we're tracing # them. use ExUnit.Case - alias Benchee.Benchmark.Measurer.Memory + alias Benchee.Benchmark.Measure.Memory import ExUnit.CaptureIO @moduletag :memory_measure - + describe "measure/1" do test "returns the result of the function and the memory used (in bytes)" do fun_to_run = fn -> Enum.to_list(1..10) end From c3b82c7ce2fd4d5221d988037704782a957907cd Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Sat, 7 Apr 2018 11:59:26 +0200 Subject: [PATCH 5/9] take memory time into account during calculation of estimated times etc. --- lib/benchee/output/benchmark_printer.ex | 29 +++++++++------ .../benchee/output/benchmark_printer_test.exs | 35 +++++++++++++------ 2 files changed, 42 insertions(+), 22 deletions(-) diff --git a/lib/benchee/output/benchmark_printer.ex b/lib/benchee/output/benchmark_printer.ex index d136f384..b39b23ea 100644 --- a/lib/benchee/output/benchmark_printer.ex +++ b/lib/benchee/output/benchmark_printer.ex @@ -30,26 +30,33 @@ defmodule Benchee.Output.BenchmarkPrinter do num_cores: num_cores, cpu_speed: cpu_speed, available_memory: available_memory}) do - IO.puts "Operating System: #{os}" - IO.puts "CPU Information: #{cpu_speed}" - IO.puts "Number of Available Cores: #{num_cores}" - IO.puts "Available memory: #{available_memory}" - IO.puts "Elixir #{elixir_version}" - IO.puts "Erlang #{erlang_version}" + IO.puts """ + Operating System: #{os}" + CPU Information: #{cpu_speed} + Number of Available Cores: #{num_cores} + Available memory: #{available_memory} + Elixir #{elixir_version} + Erlang #{erlang_version} + """ end - defp suite_information(scenarios, %{parallel: parallel, - time: time, - warmup: warmup, - inputs: inputs}) do + defp suite_information(scenarios, + %{ + parallel: parallel, + time: time, + warmup: warmup, + inputs: inputs, + memory_time: memory_time + }) do scenario_count = length(scenarios) - exec_time = warmup + time + exec_time = warmup + time + memory_time total_time = scenario_count * exec_time IO.puts """ Benchmark suite executing with the following configuration: warmup: #{Duration.format(warmup)} time: #{Duration.format(time)} + memory time: #{Duration.format(memory_time)} parallel: #{parallel} inputs: #{inputs_out(inputs)} Estimated total run time: #{Duration.format(total_time)} diff --git a/test/benchee/output/benchmark_printer_test.exs b/test/benchee/output/benchmark_printer_test.exs index 23bf63b2..1730636a 100644 --- a/test/benchee/output/benchmark_printer_test.exs +++ b/test/benchee/output/benchmark_printer_test.exs @@ -4,13 +4,16 @@ defmodule Benchee.Output.BenchmarkPrintertest do import Benchee.Output.BenchmarkPrinter alias Benchee.Benchmark.Scenario alias Benchee.Benchmark + alias Benchee.Configuration - @system_info %{elixir: "1.4", - erlang: "19.2", - os: :macOS, - num_cores: 4, - cpu_speed: "Intel(R) Core(TM) i5-4260U CPU @ 1.40GHz", - available_memory: 8568392814} + @system_info %{ + elixir: "1.4", + erlang: "19.2", + os: :macOS, + num_cores: 4, + cpu_speed: "Intel(R) Core(TM) i5-4260U CPU @ 1.40GHz", + available_memory: 8568392814 + } test ".duplicate_benchmark_warning" do output = capture_io fn -> @@ -25,7 +28,7 @@ defmodule Benchee.Output.BenchmarkPrintertest do test "sys information" do output = capture_io fn -> %{ - configuration: %{parallel: 2, time: 10_000, warmup: 0, inputs: nil}, + configuration: %Configuration{parallel: 2, time: 10_000, warmup: 0, inputs: nil}, scenarios: [%Scenario{job_name: "one"}, %Scenario{job_name: "two"}], system: @system_info } @@ -41,6 +44,7 @@ defmodule Benchee.Output.BenchmarkPrintertest do assert output =~ ~r/following configuration/i assert output =~ "warmup: 0 μs" assert output =~ "time: 10 ms" + assert output =~ "memory time: 0 μs" assert output =~ "parallel: 2" assert output =~ "Estimated total run time: 20 ms" end @@ -48,10 +52,11 @@ defmodule Benchee.Output.BenchmarkPrintertest do test "it scales times appropriately" do output = capture_io fn -> %{ - configuration: %Benchee.Configuration{ + configuration: %Configuration{ parallel: 1, time: 60_000_000, warmup: 10_000_000, + memory_time: 5_000_000, inputs: nil }, scenarios: [%Scenario{job_name: "one"}, %Scenario{job_name: "two"}], @@ -62,15 +67,22 @@ defmodule Benchee.Output.BenchmarkPrintertest do assert output =~ "warmup: 10 s" assert output =~ "time: 1 min" + assert output =~ "memory time: 5 s" assert output =~ "parallel: 1" - assert output =~ "Estimated total run time: 2.33 min" + assert output =~ "Estimated total run time: 2.50 min" end @inputs %{"Arg 1" => 1, "Arg 2" => 2} test "multiple inputs" do output = capture_io fn -> %{ - configuration: %{parallel: 2, time: 10_000, warmup: 0, inputs: @inputs}, + configuration: %{ + parallel: 2, + time: 10_000, + warmup: 0, + memory_time: 1_000, + inputs: @inputs + }, scenarios: [ %Scenario{job_name: "one", input_name: "Arg 1", input: 1}, %Scenario{job_name: "one", input_name: "Arg 2", input: 2}, @@ -83,9 +95,10 @@ defmodule Benchee.Output.BenchmarkPrintertest do end assert output =~ "time: 10 ms" + assert output =~ "memory time: 1 ms" assert output =~ "parallel: 2" assert output =~ "inputs: Arg 1, Arg 2" - assert output =~ "Estimated total run time: 40 ms" + assert output =~ "Estimated total run time: 44 ms" end test "does not print if disabled" do From 8de1e024d43d3569d87a3b73f8a286b02cdb8bae Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Sat, 7 Apr 2018 12:05:33 +0200 Subject: [PATCH 6/9] mix format on the benchmark printer (not too many changes) --- lib/benchee/output/benchmark_printer.ex | 60 ++++++++++++++----------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/lib/benchee/output/benchmark_printer.ex b/lib/benchee/output/benchmark_printer.ex index b39b23ea..27c232fe 100644 --- a/lib/benchee/output/benchmark_printer.ex +++ b/lib/benchee/output/benchmark_printer.ex @@ -9,7 +9,9 @@ defmodule Benchee.Output.BenchmarkPrinter do How would you want to discern those anyhow? """ def duplicate_benchmark_warning(name) do - IO.puts "You already have a job defined with the name \"#{name}\", you can't add two jobs with the same name!" + IO.puts( + "You already have a job defined with the name \"#{name}\", you can't add two jobs with the same name!" + ) end @doc """ @@ -19,40 +21,42 @@ defmodule Benchee.Output.BenchmarkPrinter do def configuration_information(%{configuration: %{print: %{configuration: false}}}) do nil end + def configuration_information(%{scenarios: scenarios, system: sys, configuration: config}) do system_information(sys) suite_information(scenarios, config) end - defp system_information(%{erlang: erlang_version, - elixir: elixir_version, - os: os, - num_cores: num_cores, - cpu_speed: cpu_speed, - available_memory: available_memory}) do - IO.puts """ + defp system_information(%{ + erlang: erlang_version, + elixir: elixir_version, + os: os, + num_cores: num_cores, + cpu_speed: cpu_speed, + available_memory: available_memory + }) do + IO.puts(""" Operating System: #{os}" CPU Information: #{cpu_speed} Number of Available Cores: #{num_cores} Available memory: #{available_memory} Elixir #{elixir_version} Erlang #{erlang_version} - """ + """) end - defp suite_information(scenarios, - %{ - parallel: parallel, - time: time, - warmup: warmup, - inputs: inputs, - memory_time: memory_time - }) do + defp suite_information(scenarios, %{ + parallel: parallel, + time: time, + warmup: warmup, + inputs: inputs, + memory_time: memory_time + }) do scenario_count = length(scenarios) - exec_time = warmup + time + memory_time - total_time = scenario_count * exec_time + exec_time = warmup + time + memory_time + total_time = scenario_count * exec_time - IO.puts """ + IO.puts(""" Benchmark suite executing with the following configuration: warmup: #{Duration.format(warmup)} time: #{Duration.format(time)} @@ -61,13 +65,14 @@ defmodule Benchee.Output.BenchmarkPrinter do inputs: #{inputs_out(inputs)} Estimated total run time: #{Duration.format(total_time)} - """ + """) end defp inputs_out(nil), do: "none specified" + defp inputs_out(inputs) do inputs - |> Map.keys + |> Map.keys() |> Enum.join(", ") end @@ -75,24 +80,25 @@ defmodule Benchee.Output.BenchmarkPrinter do Prints a notice which job is currently being benchmarked. """ def benchmarking(_, _, %{print: %{benchmarking: false}}), do: nil + def benchmarking(name, input_name, _config) do - IO.puts "Benchmarking #{name}#{input_information(input_name)}..." + IO.puts("Benchmarking #{name}#{input_information(input_name)}...") end - @no_input Benchmark.no_input - defp input_information(@no_input), do: "" + @no_input Benchmark.no_input() + defp input_information(@no_input), do: "" defp input_information(input_name), do: " with input #{input_name}" @doc """ Prints a warning about accuracy of benchmarks when the function is super fast. """ def fast_warning do - IO.puts """ + IO.puts(""" Warning: The function you are trying to benchmark is super fast, making measurements more unreliable! This holds especially true for memory measurements. See: https://github.com/PragTob/benchee/wiki/Benchee-Warnings#fast-execution-warning You may disable this warning by passing print: [fast_warning: false] as configuration options. - """ + """) end end From 7fb559373e6baa7ff33f8da0529b25f84f52b81b Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Sat, 7 Apr 2018 12:34:37 +0200 Subject: [PATCH 7/9] et le documentation --- README.md | 13 +++++++++---- lib/benchee/configuration.ex | 2 ++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index dd933354..e7eb4550 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,7 @@ The aforementioned [plugins](#plugins) like [benchee_html](https://github.com/Pr ## Features * first runs the functions for a given warmup time without recording the results, to simulate a _"warm"_ running system +* [measures memory](measuring-memory-consumption) * provides you with lots of statistics - check the next list * plugin/extensible friendly architecture so you can use different formatters to generate [CSV, HTML and more](#plugins) * nicely formatted console output with units scaled to appropriate units @@ -64,7 +65,8 @@ Provides you with the following **statistical data**: * **median** - when all measured times are sorted, this is the middle value (or average of the two middle values when the number of samples is even). More stable than the average and somewhat more likely to be a typical value you see. (the lower the better) * **99th %** - 99th percentile, 99% of all run times are less than this -In addition, you can optionally output an extended set of statistics. +In addition, you can optionally output an extended set of statistics: + * **minimum** - the smallest (fastest) run time measured for the job * **maximum** - the biggest (slowest) run time measured for the job * **sample size** - the number of run time measurements taken @@ -143,6 +145,7 @@ The available options are the following (also documented in [hexdocs](https://he * `warmup` - the time in seconds for which a benchmarking job should be run without measuring times before real measurements start. This simulates a _"warm"_ running system. Defaults to 2. * `time` - the time in seconds for how long each individual benchmarking job should be run and measured. Defaults to 5. +* `memory_time` - the time in seconds for how long [memory measurements](measuring-memory-consumption) should be conducted. Defaults to 0 (turned off). * `pre_check` - whether or not to run each job with each input - including all given before or after scenario or each hooks - before the benchmarks are measured to ensure that your code executes without error. This can save time while developing your suites. Defaults to `false`. * `inputs` - a map from descriptive input names to some different input, your benchmarking jobs will then be run with each of these inputs. For this to work your benchmarking function gets the current input passed in as an argument into the function. Defaults to `nil`, aka no input specified and functions are called without an argument. See [Inputs](#inputs). * `parallel` - the function of each benchmarking job will be executed in `parallel` number processes. If `parallel: 4` then 4 processes will be spawned that all execute the _same_ function for the given time. When these finish/the time is up 4 new processes will be spawned for the next job/function. This gives you more data in the same time, but also puts a load on the system interfering with benchmark results. For more on the pros and cons of parallel benchmarking [check the wiki](https://github.com/PragTob/benchee/wiki/Parallel-Benchmarking). Defaults to 1 (no parallel execution). @@ -586,20 +589,22 @@ In the more verbose API this is triggered via `Benchee.load/1`. ### Measuring memory consumption -Starting with version 0.13, users can now get measurements of how much memory their benchmarks use. This measurement is **not** the actual effect on the size of the BEAM VM size, but the total amount of memory that was allocated during the execution of a given scenario. This includes all memory that was garbage collected during the execution of that scenario. It **does not** include any memory used in any process other than the original one in which the scenario is run. +Starting with version 0.13, users can now get measurements of how much memory their benchmarks use. This measurement is **not** the actual effect on the size of the BEAM VM size, but the total amount of memory that was allocated during the execution of a given scenario. This includes all memory that was garbage collected during the execution of that scenario. It **does not** include any memory used in any process other than the original one in which the scenario is run. This measurement of memory does not affect the measurement of run times. In cases where all measurements of memory consumption are identical, which happens very frequently, the full statistics will be omitted from the standard console formatter. If your function is deterministic, this will always be the case. Only in functions with some amount of randomness will there be variation in memory usage. -Memory measurement is disabled by default, and you can choose to enable it by passing the following configuration option to `Benchee.run/2`. +Memory measurement is disabled by default, and you can choose to enable it by passing `memory_time: your_seconds` option to `Benchee.run/2`: ```elixir Benchee.run(%{ "something_great" => fn -> cool_stuff end -}, measure_memory: true) +}, memory_time: 2) ``` +Memory time can be specified separately as it will often be constant - so it might not need as much measuring time. + A full example, including an example of the console output, can be found [here](samples/measure_memory.exs). diff --git a/lib/benchee/configuration.ex b/lib/benchee/configuration.ex index dbcb2f5c..8d8a4d29 100644 --- a/lib/benchee/configuration.ex +++ b/lib/benchee/configuration.ex @@ -78,6 +78,8 @@ defmodule Benchee.Configuration do how often it is executed). Defaults to 5. * `warmup` - the time in seconds for which the benchmarking function should be run without gathering results. Defaults to 2. + * `memory_time` - the time in seconds for how long memory measurements + should be conducted. Defaults to 0 (turned off). * `pre_check` - whether or not to run each job with each input - including all given before or after scenario or each hooks - before the benchmarks are measured to ensure that your code executes without error. This can save time From 8ae652b9b824f7cd7a8c27cc714f73583fdd5662 Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Sun, 8 Apr 2018 12:42:37 +0200 Subject: [PATCH 8/9] Make sure we can opt out of running any type (time, memory etc.) * Fixes #206 * Also make sure that we're not printing that we are "benchmarking" --- lib/benchee/formatters/console.ex | 18 +++-- lib/benchee/formatters/console/run_time.ex | 14 ++++ lib/benchee/output/benchmark_printer.ex | 1 + .../formatters/console/memory_test.exs | 6 ++ .../formatters/console/run_time_test.exs | 69 ++++++++++++------- test/benchee/formatters/console_test.exs | 30 +++++--- .../benchee/output/benchmark_printer_test.exs | 8 +++ test/benchee_test.exs | 13 ++++ 8 files changed, 119 insertions(+), 40 deletions(-) diff --git a/lib/benchee/formatters/console.ex b/lib/benchee/formatters/console.ex index 8bddb27d..63dafdfe 100644 --- a/lib/benchee/formatters/console.ex +++ b/lib/benchee/formatters/console.ex @@ -22,15 +22,25 @@ defmodule Benchee.Formatters.Console do iex> scenarios = [ ...> %Benchee.Benchmark.Scenario{ ...> name: "My Job", input_name: "My input", run_time_statistics: %Benchee.Statistics{ - ...> average: 200.0,ips: 5000.0,std_dev_ratio: 0.1, median: 190.0, percentiles: %{99 => 300.1} + ...> average: 200.0, + ...> ips: 5000.0, + ...> std_dev_ratio: 0.1, + ...> median: 190.0, + ...> percentiles: %{99 => 300.1}, + ...> sample_size: 200 ...> }, - ...> memory_usage_statistics: %Benchee.Statistics{average: 100.0} + ...> memory_usage_statistics: %Benchee.Statistics{} ...> }, ...> %Benchee.Benchmark.Scenario{ ...> name: "Job 2", input_name: "My input", run_time_statistics: %Benchee.Statistics{ - ...> average: 400.0, ips: 2500.0, std_dev_ratio: 0.2, median: 390.0, percentiles: %{99 => 500.1} + ...> average: 400.0, + ...> ips: 2500.0, + ...> std_dev_ratio: 0.2, + ...> median: 390.0, + ...> percentiles: %{99 => 500.1}, + ...> sample_size: 200 ...> }, - ...> memory_usage_statistics: %Benchee.Statistics{average: 100.0} + ...> memory_usage_statistics: %Benchee.Statistics{} ...> } ...> ] iex> suite = %Benchee.Suite{ diff --git a/lib/benchee/formatters/console/run_time.ex b/lib/benchee/formatters/console/run_time.ex index 74f4c9e3..0c18e18e 100644 --- a/lib/benchee/formatters/console/run_time.ex +++ b/lib/benchee/formatters/console/run_time.ex @@ -61,6 +61,20 @@ defmodule Benchee.Formatters.Console.RunTime do """ @spec format_scenarios([Scenario.t()], map) :: [String.t(), ...] def format_scenarios(scenarios, config) do + if run_time_measurements_present?(scenarios) do + render(scenarios, config) + else + [] + end + end + + defp run_time_measurements_present?(scenarios) do + Enum.any?(scenarios, fn scenario -> + scenario.run_time_statistics.sample_size > 0 + end) + end + + defp render(scenarios, config) do %{unit_scaling: scaling_strategy} = config units = Conversion.units(scenarios, scaling_strategy) label_width = Helpers.label_width(scenarios) diff --git a/lib/benchee/output/benchmark_printer.ex b/lib/benchee/output/benchmark_printer.ex index 27c232fe..de0caeda 100644 --- a/lib/benchee/output/benchmark_printer.ex +++ b/lib/benchee/output/benchmark_printer.ex @@ -80,6 +80,7 @@ defmodule Benchee.Output.BenchmarkPrinter do Prints a notice which job is currently being benchmarked. """ def benchmarking(_, _, %{print: %{benchmarking: false}}), do: nil + def benchmarking(_, _, %{time: 0, warmup: 0, memory_time: 0}), do: nil def benchmarking(name, input_name, _config) do IO.puts("Benchmarking #{name}#{input_information(input_name)}...") diff --git a/test/benchee/formatters/console/memory_test.exs b/test/benchee/formatters/console/memory_test.exs index 4c6a7d5a..a5662367 100644 --- a/test/benchee/formatters/console/memory_test.exs +++ b/test/benchee/formatters/console/memory_test.exs @@ -293,6 +293,12 @@ defmodule Benchee.Formatters.Console.MemoryTest do assert result2 =~ ~r/50 K/ assert result2 =~ ~r/201.20/ end + + test "does nothing when there's no statistics to format" do + scenarios = [%Scenario{memory_usage_statistics: %Statistics{sample_size: 0}}] + + assert [] = Memory.format_scenarios(scenarios, %{}) + end end defp assert_column_width(name, string, expected_width) do diff --git a/test/benchee/formatters/console/run_time_test.exs b/test/benchee/formatters/console/run_time_test.exs index 98072ab8..9610d35e 100644 --- a/test/benchee/formatters/console/run_time_test.exs +++ b/test/benchee/formatters/console/run_time_test.exs @@ -31,7 +31,7 @@ defmodule Benchee.Formatters.Console.RunTimeTest do mode: 201.2, sample_size: 50_000 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -65,7 +65,7 @@ defmodule Benchee.Formatters.Console.RunTimeTest do mode: [201.2, 205.55], sample_size: 50_000 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -90,7 +90,7 @@ defmodule Benchee.Formatters.Console.RunTimeTest do maximum: 333.3, sample_size: 50_000 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -109,9 +109,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "Second", @@ -120,9 +121,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 2_500.0, std_dev_ratio: 0.1, median: 375.0, - percentiles: %{99 => 400.1} + percentiles: %{99 => 400.1}, + sample_size: 300 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -145,9 +147,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 2_500.0, std_dev_ratio: 0.1, median: 375.0, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } longer_scenarios = scenarios ++ [long_scenario] @@ -171,9 +174,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 400 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "Second", @@ -182,9 +186,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -205,9 +210,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: "First", @@ -218,7 +224,7 @@ defmodule Benchee.Formatters.Console.RunTimeTest do median: 90.0, percentiles: %{99 => 200.1} }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -247,9 +253,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} }, %Scenario{ name: second_name, @@ -258,9 +265,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -282,9 +290,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -301,9 +310,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 0.0125, - percentiles: %{99 => 0.0234} + percentiles: %{99 => 0.0234}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -322,9 +332,10 @@ defmodule Benchee.Formatters.Console.RunTimeTest do ips: 12000.0, std_dev_ratio: 13000.0, median: 140_000.0, - percentiles: %{99 => 200_000.1} + percentiles: %{99 => 200_000.1}, + sample_size: 200 }, - memory_usage_statistics: %Statistics{average: 150.0} + memory_usage_statistics: %Statistics{} } ] @@ -337,6 +348,12 @@ defmodule Benchee.Formatters.Console.RunTimeTest do assert result =~ "140 ms" assert result =~ "200.00 ms" end + + test "does nothing when there's no statistics to format" do + scenarios = [%Scenario{run_time_statistics: %Statistics{sample_size: 0}}] + + assert [] = RunTime.format_scenarios(scenarios, %{}) + end end defp assert_column_width(name, string, expected_width) do diff --git a/test/benchee/formatters/console_test.exs b/test/benchee/formatters/console_test.exs index f2bf5a58..db3f4a83 100644 --- a/test/benchee/formatters/console_test.exs +++ b/test/benchee/formatters/console_test.exs @@ -27,7 +27,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 400.1} + percentiles: %{99 => 400.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} }, @@ -40,7 +41,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} } @@ -75,7 +77,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 400.1} + percentiles: %{99 => 400.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} }, @@ -88,7 +91,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 2_500.0, std_dev_ratio: 0.15, median: 395.0, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} } @@ -118,7 +122,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} }, @@ -131,7 +136,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 10_000.0, std_dev_ratio: 0.3, median: 98.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} }, @@ -144,7 +150,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 2_500.0, std_dev_ratio: 0.15, median: 395.0, - percentiles: %{99 => 500.1} + percentiles: %{99 => 500.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} }, @@ -157,7 +164,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 4_000.0, std_dev_ratio: 0.31, median: 225.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} } @@ -191,7 +199,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 5_000.0, std_dev_ratio: 0.1, median: 195.5, - percentiles: %{99 => 300.1} + percentiles: %{99 => 300.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} }, @@ -204,7 +213,8 @@ defmodule Benchee.Formatters.ConsoleTest do ips: 10_000.0, std_dev_ratio: 0.1, median: 90.0, - percentiles: %{99 => 200.1} + percentiles: %{99 => 200.1}, + sample_size: 200 }, memory_usage_statistics: %Statistics{} } diff --git a/test/benchee/output/benchmark_printer_test.exs b/test/benchee/output/benchmark_printer_test.exs index 1730636a..bcca6edd 100644 --- a/test/benchee/output/benchmark_printer_test.exs +++ b/test/benchee/output/benchmark_printer_test.exs @@ -137,6 +137,14 @@ defmodule Benchee.Output.BenchmarkPrintertest do assert output == "" end + + test "doesn't print if all times are set to 0" do + output = capture_io fn -> + benchmarking "Never", "don't care", %Configuration{time: 0, warmup: 0, memory_time: 0} + end + + assert output == "" + end end test ".fast_warning warns with reference to more information" do diff --git a/test/benchee_test.exs b/test/benchee_test.exs index 7b567293..8df5aef3 100644 --- a/test/benchee_test.exs +++ b/test/benchee_test.exs @@ -402,6 +402,19 @@ defmodule BencheeTest do end end + test "does not blow up setting all times to 0 and never executes a function" do + output = capture_io fn -> + Benchee.run( + %{ + "never execute me" => fn -> raise "BOOOOM" end + }, + time: 0, warmup: 0, memory_time: 0 + ) + end + + refute output =~ "never execute me" + end + describe "save & load" do test "saving the suite to disk and restoring it" do save = [save: [path: "save.benchee", tag: "master"]] From fb0aa72668a38432224e2c16733d5138dd12af14 Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Wed, 11 Apr 2018 19:59:15 +0200 Subject: [PATCH 9/9] Improve docs to make clear run time vs. memory time --- README.md | 6 +++--- lib/benchee/configuration.ex | 10 ++++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index e7eb4550..1ef83667 100644 --- a/README.md +++ b/README.md @@ -143,8 +143,8 @@ Benchee.run(%{"some function" => fn -> magic end}, print: [benchmarking: false]) The available options are the following (also documented in [hexdocs](https://hexdocs.pm/benchee/Benchee.Configuration.html#init/1)). -* `warmup` - the time in seconds for which a benchmarking job should be run without measuring times before real measurements start. This simulates a _"warm"_ running system. Defaults to 2. -* `time` - the time in seconds for how long each individual benchmarking job should be run and measured. Defaults to 5. +* `warmup` - the time in seconds for which a benchmarking job should be run without measuring times before "real" measurements start. This simulates a _"warm"_ running system. Defaults to 2. +* `time` - the time in seconds for how long each individual benchmarking job should be run for measuring the execution times (run time performance). Defaults to 5. * `memory_time` - the time in seconds for how long [memory measurements](measuring-memory-consumption) should be conducted. Defaults to 0 (turned off). * `pre_check` - whether or not to run each job with each input - including all given before or after scenario or each hooks - before the benchmarks are measured to ensure that your code executes without error. This can save time while developing your suites. Defaults to `false`. * `inputs` - a map from descriptive input names to some different input, your benchmarking jobs will then be run with each of these inputs. For this to work your benchmarking function gets the current input passed in as an argument into the function. Defaults to `nil`, aka no input specified and functions are called without an argument. See [Inputs](#inputs). @@ -589,7 +589,7 @@ In the more verbose API this is triggered via `Benchee.load/1`. ### Measuring memory consumption -Starting with version 0.13, users can now get measurements of how much memory their benchmarks use. This measurement is **not** the actual effect on the size of the BEAM VM size, but the total amount of memory that was allocated during the execution of a given scenario. This includes all memory that was garbage collected during the execution of that scenario. It **does not** include any memory used in any process other than the original one in which the scenario is run. +Starting with version 0.13, users can now get measurements of how much memory their benchmarks use. This measurement is **not** the actual effect on the size of the BEAM VM size, but the total amount of memory that was allocated during the execution of a given scenario. This includes all memory that was garbage collected during the execution of that scenario. It **does not** include any memory used in any process other than the original one in which the scenario is run. This measurement of memory does not affect the measurement of run times. diff --git a/lib/benchee/configuration.ex b/lib/benchee/configuration.ex index 8d8a4d29..e249293e 100644 --- a/lib/benchee/configuration.ex +++ b/lib/benchee/configuration.ex @@ -74,10 +74,12 @@ defmodule Benchee.Configuration do Possible options: - * `time` - total run time in seconds of a single benchmark (determines - how often it is executed). Defaults to 5. - * `warmup` - the time in seconds for which the benchmarking function - should be run without gathering results. Defaults to 2. + * `warmup` - the time in seconds for which a benchmarking job should be run + without measuring times before "real" measurements start. This simulates a + _"warm"_ running system. Defaults to 2. + * `time` - the time in seconds for how long each individual benchmarking job + should be run for measuring the execution times (run time performance). + Defaults to 5. * `memory_time` - the time in seconds for how long memory measurements should be conducted. Defaults to 0 (turned off). * `pre_check` - whether or not to run each job with each input - including all