Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

First version of separating memory and runtime measurements #204

Merged
merged 9 commits into from
Apr 11, 2018
10 changes: 10 additions & 0 deletions lib/benchee/benchmark/measurer.ex
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
defmodule Benchee.Benchmark.Measurer do
@moduledoc """
A thing that measures something about a function execution - like time or
memory needed.

Callback is `measure` which takes an anonymous 0 arity function to measure
and returns the measurement and the return value of the function in a tuple.
"""
@callback measure((() -> any)) :: {non_neg_integer, any}
end
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
defmodule Benchee.MemoryMeasure do
defmodule Benchee.Benchmark.Measurer.Memory do
@moduledoc """
This exposes two functions, apply/1 and apply/3. Both execute a given function
and report on the memory used by monitoring the garbage collection process for
a single process.
Measure memory consumption of a function.
"""
import Kernel, except: [apply: 3, apply: 2]

@spec apply(fun) :: no_return() | tuple()
def apply(fun) do
@behaviour Benchee.Benchmark.Measurer

def measure(fun) do
ref = make_ref()
Process.flag(:trap_exit, true)
start_runner(fun, ref)
Expand Down
11 changes: 11 additions & 0 deletions lib/benchee/benchmark/measurer/time.ex
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
defmodule Benchee.Benchmark.Measurer.Time do
@moduledoc """
Measure the time consumed by a executing function.
"""

@behaviour Benchee.Benchmark.Measurer

def measure(function) do
:timer.tc(function)
end
end
118 changes: 60 additions & 58 deletions lib/benchee/benchmark/runner.ex
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ defmodule Benchee.Benchmark.Runner do
run time and memory usage to each scenario.
"""

alias Benchee.{Benchmark, MemoryMeasure}
alias Benchee.Benchmark
alias Benchee.Benchmark.{Scenario, ScenarioContext}
alias Benchee.Utility.{RepeatN, Parallel}
alias Benchee.Configuration
alias Benchee.Benchmark.Measurer
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe place this alias with the others from Benchmark

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah damn me, totally - thanks Eric!


@doc """
Executes the benchmarks defined before by first running the defined functions
Expand All @@ -34,7 +35,7 @@ defmodule Benchee.Benchmark.Runner do
defp pre_check(scenario, scenario_context = %ScenarioContext{config: %{pre_check: true}}) do
scenario_input = run_before_scenario(scenario, scenario_context)
scenario_context = %ScenarioContext{scenario_context | scenario_input: scenario_input}
_ = measure_iteration(scenario, scenario_context)
_ = measure_iteration(scenario, scenario_context, Measurer.Time)
_ = run_after_scenario(scenario, scenario_context)
nil
end
Expand All @@ -58,7 +59,6 @@ defmodule Benchee.Benchmark.Runner do
defp measure_scenario_parallel(config, scenario, scenario_context) do
1..config.parallel
|> Parallel.map(fn _ -> measure_scenario(scenario, scenario_context) end)
|> List.flatten()
end

defp add_measurements_to_scenario(measurements, scenario) do
Expand All @@ -72,9 +72,11 @@ defmodule Benchee.Benchmark.Runner do
scenario_input = run_before_scenario(scenario, scenario_context)
scenario_context = %ScenarioContext{scenario_context | scenario_input: scenario_input}
_ = run_warmup(scenario, scenario_context)
measurements = run_benchmark(scenario, scenario_context)
runtimes = run_runtime_benchmark(scenario, scenario_context)
memory_usages = run_memory_benchmark(scenario, scenario_context)
run_after_scenario(scenario, scenario_context)
measurements

{runtimes, memory_usages}
end

defp run_before_scenario(
Expand Down Expand Up @@ -103,7 +105,7 @@ defmodule Benchee.Benchmark.Runner do
measure_runtimes(scenario, scenario_context, warmup, false)
end

defp run_benchmark(
defp run_runtime_benchmark(
scenario,
scenario_context = %ScenarioContext{
config: %Configuration{
Expand All @@ -115,6 +117,29 @@ defmodule Benchee.Benchmark.Runner do
measure_runtimes(scenario, scenario_context, run_time, fast_warning)
end

defp run_memory_benchmark(_, %ScenarioContext{config: %{memory_time: 0}}) do
[]
end
defp run_memory_benchmark(
scenario,
scenario_context = %ScenarioContext{
config: %Configuration{
memory_time: memory_time
}
}
) do

end_time = current_time() + memory_time

new_context = %ScenarioContext{
scenario_context
| current_time: current_time(),
end_time: end_time
}

do_benchmark(scenario, new_context, Measurer.Memory, [])
end

defp run_after_scenario(
%{
after_scenario: local_after_scenario
Expand All @@ -135,7 +160,7 @@ defmodule Benchee.Benchmark.Runner do
end_time = current_time() + run_time
:erlang.garbage_collect()

{num_iterations, initial_run_time, initial_memory_usage} =
{num_iterations, initial_run_time} =
determine_n_times(scenario, scenario_context, fast_warning)

new_context = %ScenarioContext{
Expand All @@ -145,10 +170,7 @@ defmodule Benchee.Benchmark.Runner do
num_iterations: num_iterations
}

do_benchmark(
scenario,
new_context,
{[initial_run_time], updated_memory_usages(initial_memory_usage, [])})
do_benchmark(scenario, new_context, Measurer.Time, [initial_run_time])
end

defp current_time, do: :erlang.system_time(:micro_seconds)
Expand All @@ -166,12 +188,10 @@ defmodule Benchee.Benchmark.Runner do
},
fast_warning
) do
{run_time, memory_usage} = measure_iteration(scenario, scenario_context)
run_time = measure_iteration(scenario, scenario_context, Measurer.Time)

if run_time >= @minimum_execution_time do
{adjusted_run_time, adjusted_memory_usage} =
adjust_for_iterations(run_time, memory_usage, num_iterations)
{num_iterations, adjusted_run_time, adjusted_memory_usage}
{num_iterations, adjust_for_iterations(run_time, num_iterations)}
else
if fast_warning, do: printer.fast_warning()

Expand All @@ -195,92 +215,74 @@ defmodule Benchee.Benchmark.Runner do
current_time: current_time,
end_time: end_time
},
{run_times, memory_usages}
_measurer,
measurements
)
when current_time > end_time do
# restore correct order - important for graphing
{Enum.reverse(run_times), Enum.reverse(memory_usages)}
Enum.reverse(measurements)
end

defp do_benchmark(scenario, scenario_context, {run_times, memory_usages}) do
{run_time, memory_usage} = iteration_measurements(scenario, scenario_context)
defp do_benchmark(scenario, scenario_context, measurer, measurements) do
measurement = iteration_measurements(scenario, scenario_context, measurer)
updated_context = %ScenarioContext{scenario_context | current_time: current_time()}

do_benchmark(
scenario,
updated_context,
{[run_time | run_times], updated_memory_usages(memory_usage, memory_usages)}
)
do_benchmark(scenario, updated_context, measurer, [measurement | measurements])
end

# Hush now until I need you again
# We return nil if no memory measurement is performed so keep it empty
defp updated_memory_usages(nil, memory_usages), do: memory_usages
defp updated_memory_usages(memory_usage, memory_usages) when memory_usage < 0, do: memory_usages
defp updated_memory_usages(memory_usage, memory_usages), do: [memory_usage | memory_usages]
# defp updated_memory_usages(nil, memory_usages), do: memory_usages
# defp updated_memory_usages(memory_usage, memory_usages) when memory_usage < 0, do: memory_usages
# defp updated_memory_usages(memory_usage, memory_usages), do: [memory_usage | memory_usages]

defp iteration_measurements(
scenario,
scenario_context = %ScenarioContext{
num_iterations: num_iterations
}
},
measurer
) do
{run_time, memory_usage} = measure_iteration(scenario, scenario_context)

adjust_for_iterations(run_time, memory_usage, num_iterations)
end
measurement = measure_iteration(scenario, scenario_context, measurer)

defp adjust_for_iterations(run_time, nil, num_iterations) do
{run_time / num_iterations, nil}
adjust_for_iterations(measurement, num_iterations)
end

defp adjust_for_iterations(run_time, memory_usage, num_iterations) do
{run_time / num_iterations, memory_usage / num_iterations}
end
defp adjust_for_iterations(measurement, 1), do: measurement
defp adjust_for_iterations(measurement, num_iterations), do: measurement / num_iterations

defp measure_iteration(
scenario = %Scenario{function: function},
scenario_context = %ScenarioContext{
num_iterations: 1,
config: %{measure_memory: measure_memory}
}
},
measurer
) do
new_input = run_before_each(scenario, scenario_context)
function = main_function(function, new_input)

{microseconds, memory_usage, return_value} = measure_time_and_memory(function, measure_memory)
{measurement, return_value} = measurer.measure(function)

run_after_each(return_value, scenario, scenario_context)
{microseconds, memory_usage}
measurement
end

defp measure_iteration(
scenario,
scenario_context = %ScenarioContext{
num_iterations: iterations,
config: %{measure_memory: measure_memory}
}
num_iterations: iterations
},
measurer
)
when iterations > 1 do
# When we have more than one iteration, then the repetition and calling
# of hooks is already included in the function, for reference/reasoning see
# `build_benchmarking_function/2`
function = build_benchmarking_function(scenario, scenario_context)

{microseconds, memory_usage, _return_value} =
measure_time_and_memory(function, measure_memory)

{microseconds, memory_usage}
end

defp measure_time_and_memory(function, true) do
{microseconds, return_value} = :timer.tc(function)
{memory_usage, _} = MemoryMeasure.apply(function)
{microseconds, memory_usage, return_value}
end
{measurement, _return_value} = measurer.measure(function)

defp measure_time_and_memory(function, false) do
{microseconds, return_value} = :timer.tc(function)
{microseconds, nil, return_value}
measurement
end

@no_input Benchmark.no_input()
Expand Down
17 changes: 11 additions & 6 deletions lib/benchee/configuration.ex
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ defmodule Benchee.Configuration do
}

defstruct parallel: 1,
measure_memory: false,
time: 5,
warmup: 2,
memory_time: 0,
pre_check: false,
formatters: [Console],
print: %{
Expand Down Expand Up @@ -44,9 +44,9 @@ defmodule Benchee.Configuration do

@type t :: %__MODULE__{
parallel: integer,
measure_memory: boolean,
time: number,
warmup: number,
memory_time: number,
pre_check: boolean,
formatters: [(Suite.t() -> Suite.t())],
print: map,
Expand All @@ -63,7 +63,7 @@ defmodule Benchee.Configuration do
}

@type user_configuration :: map | keyword
@time_keys [:time, :warmup]
@time_keys [:time, :warmup, :memory_time]

@doc """
Returns the initial benchmark configuration for Benchee, composed of defaults
Expand Down Expand Up @@ -327,10 +327,15 @@ defmodule Benchee.Configuration do
end)
end

defp update_measure_memory(config = %{measure_memory: measure_memory}) do
defp update_measure_memory(config = %{memory_time: memory_time}) do
otp_version = List.to_integer(:erlang.system_info(:otp_release))
if measure_memory and otp_version <= 18, do: print_memory_measure_warning()
Map.put(config, :measure_memory, measure_memory and otp_version > 18)

if (memory_time > 0) and otp_version <= 18 do
print_memory_measure_warning()
Map.put(config, :memory_time, 0)
else
config
end
end

defp print_memory_measure_warning do
Expand Down
16 changes: 2 additions & 14 deletions lib/benchee/formatters/console.ex
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ defmodule Benchee.Formatters.Console do
...> console: %{comparison: false, extended_statistics: false}
...> },
...> unit_scaling: :best,
...> measure_memory: false
...> }
...> }
iex> Benchee.Formatters.Console.format(suite)
Expand All @@ -53,16 +52,14 @@ defmodule Benchee.Formatters.Console do
"""
@spec format(Suite.t()) :: [any]
def format(%Suite{scenarios: scenarios, configuration: config}) do
%{measure_memory: measure_memory} = config

config = console_configuration(config)

scenarios
|> Enum.group_by(fn scenario -> scenario.input_name end)
|> Enum.map(fn {input, scenarios} ->
scenarios
|> Statistics.sort()
|> generate_output(config, input, measure_memory)
|> generate_output(config, input)
end)
end

Expand Down Expand Up @@ -92,23 +89,14 @@ defmodule Benchee.Formatters.Console do
)
end

defp generate_output(scenarios, config, input, measure_memory)

defp generate_output(scenarios, config, input, true) do
defp generate_output(scenarios, config, input) do
[
input_header(input) |
RunTime.format_scenarios(scenarios, config) ++
Memory.format_scenarios(scenarios, config)
]
end

defp generate_output(scenarios, config, input, false) do
[
input_header(input) |
RunTime.format_scenarios(scenarios, config)
]
end

@no_input_marker Benchee.Benchmark.no_input()
defp input_header(input) when input == @no_input_marker, do: ""
defp input_header(input), do: "\n##### With input #{input} #####"
Expand Down
Loading