Skip to content

Commit

Permalink
Merge pull request #10 from AutoResearch/9-chore-rename-input-argumen…
Browse files Browse the repository at this point in the history
…ts-of-runners-to-us-with-state

feat: make synthetic runners use dataframes and rename inputs so stat…
  • Loading branch information
younesStrittmatter authored Sep 1, 2023
2 parents 6c3b5da + f095cb8 commit 1136529
Show file tree
Hide file tree
Showing 11 changed files with 379 additions and 207 deletions.
155 changes: 140 additions & 15 deletions docs/Example.ipynb

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
>>> s.name
'Template Experiment'
>>> s.variables
>>> s.variables # doctest: +ELLIPSIS
VariableCollection(...)
>>> s.domain()
Expand All @@ -25,8 +25,8 @@
[3]])
>>> s.ground_truth # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
functools.partial(<function template_experiment.<locals>.experiment_runner at 0x...>,
added_noise_=0.0)
functools.partial(<function template_experiment.<locals>.run at 0x...>,
added_noise=0.0)
>>> s.ground_truth(1.)
2.0
Expand All @@ -38,30 +38,31 @@
[4.]])
>>> s.experiment_runner # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<function template_experiment.<locals>.experiment_runner at 0x...>
>>> s.run # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<function template_experiment.<locals>.run at 0x...>
>>> s.experiment_runner(1.)
1.8697820493137682
>>> s.run(1., random_state=42)
2.003047170797544
>>> s.experiment_runner(s.domain())
array([[1.01278404],
[1.96837574],
[2.99831988],
[3.91469561]])
>>> s.run(s.domain(), random_state=42)
array([[1.00304717],
[1.98960016],
[3.00750451],
[4.00940565]])
>>> s.plotter()
>>> plt.show() # doctest: +SKIP
Generate a new version of the experiment with different parameters:
>>> new_params = dict(s.params, **dict(random_state=190))
>>> new_params = dict(s.params)
>>> s.factory_function(**new_params) # doctest: +ELLIPSIS
SyntheticExperimentCollection(..., params={..., 'random_state': 190}, ...)
SyntheticExperimentCollection(...)
"""


from functools import partial
from typing import Optional

import numpy as np
from numpy.typing import ArrayLike
Expand All @@ -73,22 +74,17 @@
def template_experiment(
# Add any configurable parameters with their defaults here:
name: str = "Template Experiment",
added_noise: float = 0.1,
random_state: int = 42,
):
"""
A template for synthetic experiments.
Parameters:
added_noise: standard deviation of gaussian noise added to output
random_state: seed for random number generator
name: name of the experiment
"""

params = dict(
# Include all parameters here:
name=name,
added_noise=added_noise,
random_state=random_state,
)

# Define variables
Expand All @@ -100,15 +96,19 @@ def template_experiment(
)

# Define experiment runner
rng = np.random.default_rng(random_state)

def experiment_runner(x: ArrayLike, added_noise_=added_noise):
def run(
conditions: ArrayLike,
added_noise: float = 0.01,
random_state: Optional[int] = None,
):
"""A function which simulates noisy observations."""
x_ = np.array(x)
y = x_ + 1.0 + rng.normal(0, added_noise_, size=x_.shape)
rng = np.random.default_rng(random_state)
x_ = np.array(conditions)
y = x_ + 1.0 + rng.normal(0, added_noise, size=x_.shape)
return y

ground_truth = partial(experiment_runner, added_noise_=0.0)
ground_truth = partial(run, added_noise=0.0)
"""A function which simulates perfect observations"""

def domain():
Expand Down Expand Up @@ -137,7 +137,7 @@ def plotter(model=None):
name=name,
description=template_experiment.__doc__,
variables=variables,
experiment_runner=experiment_runner,
run=run,
ground_truth=ground_truth,
domain=domain,
plotter=plotter,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from functools import partial
from typing import Optional, Union

import numpy as np
import pandas as pd

from autora.experiment_runner.synthetic.utilities import SyntheticExperimentCollection
from autora.variable import DV, IV, ValueType, VariableCollection
Expand Down Expand Up @@ -73,8 +75,6 @@ def expected_value_theory(
resolution=10,
minimum_value=-1,
maximum_value=1,
added_noise: float = 0.01,
random_state: int = 180,
):
"""
Expected Value Theory
Expand All @@ -86,9 +86,6 @@ def expected_value_theory(
resolution:
minimum_value:
maximum_value:
added_noise:
random_state:
"""

params = dict(
Expand All @@ -98,17 +95,19 @@ def expected_value_theory(
resolution=resolution,
choice_temperature=choice_temperature,
value_lambda=value_lambda,
added_noise=added_noise,
random_state=random_state,
)

rng = np.random.default_rng(random_state)

variables = get_variables(
minimum_value=minimum_value, maximum_value=maximum_value, resolution=resolution
)

def experiment_runner(X: np.ndarray, added_noise_=added_noise):
def run(
conditions: Union[pd.DataFrame, np.ndarray, np.recarray],
added_noise: float = 0.01,
random_state: Optional[int] = None,
):
rng = np.random.default_rng(random_state)
X = np.array(conditions)
Y = np.zeros((X.shape[0], 1))
for idx, x in enumerate(X):
value_A = value_lambda * x[0]
Expand All @@ -117,8 +116,8 @@ def experiment_runner(X: np.ndarray, added_noise_=added_noise):
probability_a = x[1]
probability_b = x[3]

expected_value_A = value_A * probability_a + rng.normal(0, added_noise_)
expected_value_B = value_B * probability_b + rng.normal(0, added_noise_)
expected_value_A = value_A * probability_a + rng.normal(0, added_noise)
expected_value_B = value_B * probability_b + rng.normal(0, added_noise)

# compute probability of choosing option A
p_choose_A = np.exp(expected_value_A / choice_temperature) / (
Expand All @@ -128,9 +127,12 @@ def experiment_runner(X: np.ndarray, added_noise_=added_noise):

Y[idx] = p_choose_A

return Y
experiment_data = pd.DataFrame(conditions)
experiment_data.columns = [v.name for v in variables.independent_variables]
experiment_data[variables.dependent_variables[0].name] = Y
return experiment_data

ground_truth = partial(experiment_runner, added_noise_=0.0)
ground_truth = partial(run, added_noise=0.0)

def domain():
X = np.array(
Expand Down Expand Up @@ -186,7 +188,7 @@ def plotter(model=None):
name=name,
description=expected_value_theory.__doc__,
variables=variables,
experiment_runner=experiment_runner,
run=run,
ground_truth=ground_truth,
domain=domain,
plotter=plotter,
Expand Down
27 changes: 17 additions & 10 deletions src/autora/experiment_runner/synthetic/economics/prospect_theory.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from functools import partial
from typing import Optional, Union

import numpy as np
import pandas as pd

from autora.experiment_runner.synthetic.economics.expected_value_theory import (
get_variables,
Expand All @@ -10,7 +12,6 @@

def prospect_theory(
name="Prospect Theory",
added_noise=0.01,
choice_temperature=0.1,
value_alpha=0.88,
value_beta=0.88,
Expand All @@ -20,7 +21,6 @@ def prospect_theory(
resolution=10,
minimum_value=-1,
maximum_value=1,
rng=np.random.default_rng(),
):
"""
Parameters from
Expand All @@ -44,7 +44,6 @@ def prospect_theory(
"""

params = dict(
added_noise=added_noise,
choice_temperature=choice_temperature,
value_alpha=value_alpha,
value_beta=value_beta,
Expand All @@ -54,15 +53,20 @@ def prospect_theory(
resolution=resolution,
minimum_value=minimum_value,
maximum_value=maximum_value,
rng=rng,
name=name,
)

variables = get_variables(
minimum_value=minimum_value, maximum_value=maximum_value, resolution=resolution
)

def experiment_runner(X: np.ndarray, added_noise_=added_noise):
def run(
conditions: Union[pd.DataFrame, np.ndarray, np.recarray],
added_noise=0.01,
random_state: Optional[int] = None,
):
rng = np.random.default_rng(random_state)
X = np.array(conditions)
Y = np.zeros((X.shape[0], 1))
for idx, x in enumerate(X):
# power value function according to:
Expand Down Expand Up @@ -113,8 +117,8 @@ def experiment_runner(X: np.ndarray, added_noise_=added_noise):
x[3] ** coefficient + (1 - x[3]) ** coefficient
) ** (1 / coefficient)

expected_value_A = value_A * probability_a + rng.normal(0, added_noise_)
expected_value_B = value_B * probability_b + rng.normal(0, added_noise_)
expected_value_A = value_A * probability_a + rng.normal(0, added_noise)
expected_value_B = value_B * probability_b + rng.normal(0, added_noise)

# compute probability of choosing option A
p_choose_A = np.exp(expected_value_A / choice_temperature) / (
Expand All @@ -124,9 +128,12 @@ def experiment_runner(X: np.ndarray, added_noise_=added_noise):

Y[idx] = p_choose_A

return Y
experiment_data = pd.DataFrame(conditions)
experiment_data.columns = [v.name for v in variables.independent_variables]
experiment_data[variables.dependent_variables[0].name] = Y
return experiment_data

ground_truth = partial(experiment_runner, added_noise_=0.0)
ground_truth = partial(run, added_noise=0.0)

def domain():
v_a = variables.independent_variables[0].allowed_values
Expand Down Expand Up @@ -188,7 +195,7 @@ def plotter(model=None):
params=params,
variables=variables,
domain=domain,
experiment_runner=experiment_runner,
run=run,
ground_truth=ground_truth,
plotter=plotter,
factory_function=prospect_theory,
Expand Down
Loading

0 comments on commit 1136529

Please sign in to comment.