Skip to content

Commit

Permalink
chore/bug: rename back to added_noise, move random_state to run, rena…
Browse files Browse the repository at this point in the history
…me experiment_runner to run in new synthetic models/fixxed wrong input statements, added new models to test
  • Loading branch information
younesStrittmatter committed Sep 1, 2023
1 parent 3cdf129 commit f095cb8
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 97 deletions.
120 changes: 71 additions & 49 deletions src/autora/experiment_runner/synthetic/neuroscience/task_switching.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@ def task_switching(
temperature=0.2,
minimum_task_control=0.15,
constant=1.5,
random_state: Optional[int] = None,
):
"""
Weber-Fechner Law
Task Switchng
Args:
name: name of the experiment
Expand All @@ -27,7 +26,6 @@ def task_switching(
temperature: temperature for softmax when computing performance of current task
constant: constant for task activation
minimum_task_control: minimum task control
random_state: integer used to seed the random number generator
"""

params = dict(
Expand All @@ -37,7 +35,6 @@ def task_switching(
temperature=temperature,
minimum_task_control=minimum_task_control,
constant=constant,
random_state=random_state,
)

current_task_strength = IV(
Expand All @@ -46,7 +43,7 @@ def task_switching(
value_range=(0, 1),
units="intensity",
variable_label="Strength of Current Task",
type=ValueType.REAL
type=ValueType.REAL,
)

alt_task_strength = IV(
Expand All @@ -55,7 +52,7 @@ def task_switching(
value_range=(0, 1),
units="intensity",
variable_label="Strength of Alternative Task",
type=ValueType.REAL
type=ValueType.REAL,
)

is_switch = IV(
Expand All @@ -64,34 +61,32 @@ def task_switching(
value_range=(0, 1),
units="indicator",
variable_label="Is Switch",
type=ValueType.PROBABILITY_SAMPLE
type=ValueType.PROBABILITY_SAMPLE,
)

cur_task_performance = DV(
name="cur_task_performance",
value_range=(0, 1),
units="performance",
variable_label="Accuray of Current Task",
type=ValueType.PROBABILITY
type=ValueType.PROBABILITY,
)

variables = VariableCollection(
independent_variables=[current_task_strength,
alt_task_strength,
is_switch],
independent_variables=[current_task_strength, alt_task_strength, is_switch],
dependent_variables=[cur_task_performance],
)

rng = np.random.default_rng(random_state)

def inverse(x, A, B):
y = 1 / (A * x + B)
return y

def experiment_runner(
def run(
conditions: Union[pd.DataFrame, np.ndarray, np.recarray],
observation_noise: float = 0.01,
added_noise: float = 0.01,
random_state: Optional[int] = None,
):
rng = np.random.default_rng(random_state)
X = np.array(conditions)
Y = np.zeros((X.shape[0], 1))
for idx, x in enumerate(X):
Expand All @@ -101,38 +96,39 @@ def experiment_runner(

# determine current task control

input_ratio = (cur_task_strength + priming_default * (1 - is_switch)) / \
(alt_task_strength + priming_default * (is_switch))
input_ratio = (cur_task_strength + priming_default * (1 - is_switch)) / (
alt_task_strength + priming_default * (is_switch)
)

cur_task_control = inverse(input_ratio, 2.61541389, 0.7042097)
cur_task_control = np.max([cur_task_control, minimum_task_control])

cur_task_input = cur_task_strength + \
priming_default * (1 - is_switch) + \
cur_task_control + \
rng.random.normal(0, std)
cur_task_input = (
cur_task_strength
+ priming_default * (1 - is_switch)
+ cur_task_control
+ rng.random.normal(0, added_noise)
)

alt_task_input = alt_task_strength + \
priming_default * (is_switch) + \
rng.random.normal(0, std)
alt_task_input = (
alt_task_strength
+ priming_default * (is_switch)
+ rng.random.normal(0, added_noise)
)

cur_task_activation = 1 - np.exp(-constant * cur_task_input)
alt_task_activation = 1 - np.exp(-constant * alt_task_input)

cur_task_performance = np.exp(cur_task_activation * 1 / temperature) / \
(np.exp(cur_task_activation * 1 / temperature) +
np.exp(alt_task_activation * 1 / temperature))

# word switch
# word nonswitch
# color switch
# color nonswitch
cur_task_performance = np.exp(cur_task_activation * 1 / temperature) / (
np.exp(cur_task_activation * 1 / temperature)
+ np.exp(alt_task_activation * 1 / temperature)
)

Y[idx] = cur_task_performance

return Y

ground_truth = partial(experiment_runner, observation_noise=0.0)
ground_truth = partial(run, added_noise=0.0)

def domain():
s1_values = variables.independent_variables[0].allowed_values
Expand Down Expand Up @@ -181,50 +177,76 @@ def plotter(
color_repetition_performance = y[3, 0]

x_data = [1, 2]
word_performance = (1 - np.array([word_repetition_performance,
word_switch_performance])) * 100
color_performance = (1 - np.array([color_repetition_performance,
color_switch_performance])) * 100
word_performance = (
1 - np.array([word_repetition_performance, word_switch_performance])
) * 100
color_performance = (
1 - np.array([color_repetition_performance, color_switch_performance])
) * 100

if model is not None:
y_pred = model.predict(X)
word_switch_performance_pred = y_pred[0][0]
word_repetition_performance_pred = y_pred[1][0]
color_switch_performance_pred = y_pred[2][0]
color_repetition_performance_pred = y_pred[3][0]
word_performance_recovered = (1 - np.array([word_repetition_performance_pred,
word_switch_performance_pred])) * 100
color_performance_recovered = (1 - np.array([color_repetition_performance_pred,
color_switch_performance_pred])) * 100

legend = ('Word Task (Original)', 'Color Task (Original)',
'Word Task (Recovered)', 'Color Task (Recovered)',)
word_performance_recovered = (
1
- np.array(
[word_repetition_performance_pred, word_switch_performance_pred]
)
) * 100
color_performance_recovered = (
1
- np.array(
[color_repetition_performance_pred, color_switch_performance_pred]
)
) * 100

legend = (
"Word Task (Original)",
"Color Task (Original)",
"Word Task (Recovered)",
"Color Task (Recovered)",
)

# plot
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt

colors = mcolors.TABLEAU_COLORS
col_keys = list(colors.keys())

plt.plot(x_data, word_performance, label=legend[0], c=colors[col_keys[0]])
plt.plot(x_data, color_performance, label=legend[1], c=colors[col_keys[1]])
if model is not None:
plt.plot(x_data, word_performance_recovered, '--', label=legend[2], c=colors[col_keys[0]])
plt.plot(x_data, color_performance_recovered, '--', label=legend[3], c=colors[col_keys[1]])
plt.plot(
x_data,
word_performance_recovered,
"--",
label=legend[2],
c=colors[col_keys[0]],
)
plt.plot(
x_data,
color_performance_recovered,
"--",
label=legend[3],
c=colors[col_keys[1]],
)
plt.xlim([0.5, 2.5])
plt.ylim([0, 50])
plt.ylabel("Error Rate (%)", fontsize="large")
plt.legend(loc=2, fontsize="large")
plt.title("Task Switching", fontsize="large")
plt.xticks(x_data, ['Repetition', 'Switch'], rotation='horizontal')
plt.xticks(x_data, ["Repetition", "Switch"], rotation="horizontal")
plt.show()

collection = SyntheticExperimentCollection(
name=name,
description=task_switching.__doc__,
variables=variables,
experiment_runner=experiment_runner,
run=run,
ground_truth=ground_truth,
domain=domain,
plotter=plotter,
Expand Down
49 changes: 24 additions & 25 deletions src/autora/experiment_runner/synthetic/psychology/exp_learning.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from functools import partial
from typing import Optional
from typing import Optional, Union

import numpy as np
import pandas as pd

from autora.experiment_runner.synthetic.utilities import SyntheticExperimentCollection
from autora.variable import DV, IV, ValueType, VariableCollection
Expand All @@ -15,7 +16,6 @@ def exp_learning(
maximum_initial_value=0.5,
lr=0.03,
p_asymptotic=1.0,
random_state: Optional[int] = None,
):
"""
Exponential Learning
Expand All @@ -28,7 +28,6 @@ def exp_learning(
minimum_trial: upper bound for exponential constant
name: name of the experiment
resolution: number of allowed values for stimulus
random_state: integer used to seed the random number generator
"""

maximum_trial = resolution
Expand All @@ -42,70 +41,70 @@ def exp_learning(
maximum_initial_value=maximum_initial_value,
lr=lr,
p_asymptotic=p_asymptotic,
random_state=random_state,
)

p_initial = IV(
name="P_asymptotic",
allowed_values=np.linspace(minimum_initial_value,
maximum_initial_value,
resolution),
value_range=(minimum_initial_value,
maximum_initial_value),
allowed_values=np.linspace(
minimum_initial_value, maximum_initial_value, resolution
),
value_range=(minimum_initial_value, maximum_initial_value),
units="performance",
variable_label="Asymptotic Performance",
type=ValueType.REAL
type=ValueType.REAL,
)

trial = IV(
name="trial",
allowed_values=np.linspace(minimum_trial,
maximum_trial,
resolution),
value_range=(minimum_trial,
maximum_trial),
allowed_values=np.linspace(minimum_trial, maximum_trial, resolution),
value_range=(minimum_trial, maximum_trial),
units="trials",
variable_label="Trials",
type=ValueType.REAL
type=ValueType.REAL,
)

performance = DV(
name="performance",
value_range=(0, p_asymptotic),
units="performance",
variable_label="Performance",
type=ValueType.REAL
type=ValueType.REAL,
)

variables = VariableCollection(
independent_variables=[p_initial, trial],
dependent_variables=[performance],
)

rng = np.random.default_rng(random_state)

def experiment_runner(
def run(
conditions: Union[pd.DataFrame, np.ndarray, np.recarray],
observation_noise: float = 0.01,
added_noise: float = 0.01,
random_state: Optional[int] = None,
):
rng = np.random.default_rng(random_state)
X = np.array(conditions)
Y = np.zeros((X.shape[0], 1))

# exp learning function according to
# Heathcote, A., Brown, S., & Mewhort, D. J. (2000). The power law repealed:
# The case for an exponential law of practice. Psychonomic bulletin & review, 7(2), 185–207.

# Thurstone, L. L. (1919). The learning curve equation. Psy- chological Monographs, 26(3), i.
# Thurstone, L. L. (1919). The learning curve equation.
# Psy- chological Monographs, 26(3), i.

for idx, x in enumerate(X):
p_initial_exp = x[0]
trial_exp = x[1]
y = p_asymptotic - (p_asymptotic - p_initial_exp) * np.exp(- lr * trial_exp) + rng.random.normal(0, std)
y = (
p_asymptotic
- (p_asymptotic - p_initial_exp) * np.exp(-lr * trial_exp)
+ rng.random.normal(0, added_noise)
)
Y[idx] = y

return Y

ground_truth = partial(experiment_runner, observation_noise=0.0)
ground_truth = partial(run, added_noise=0.0)

def domain():
p_initial_values = variables.independent_variables[0].allowed_values
Expand Down Expand Up @@ -149,7 +148,7 @@ def plotter(
name=name,
description=exp_learning.__doc__,
variables=variables,
experiment_runner=experiment_runner,
run=run,
ground_truth=ground_truth,
domain=domain,
plotter=plotter,
Expand Down
Loading

0 comments on commit f095cb8

Please sign in to comment.