diff --git a/src/autora/experiment_runner/synthetic/neuroscience/task_switching.py b/src/autora/experiment_runner/synthetic/neuroscience/task_switching.py index d3fb9441..a132b22e 100644 --- a/src/autora/experiment_runner/synthetic/neuroscience/task_switching.py +++ b/src/autora/experiment_runner/synthetic/neuroscience/task_switching.py @@ -15,10 +15,9 @@ def task_switching( temperature=0.2, minimum_task_control=0.15, constant=1.5, - random_state: Optional[int] = None, ): """ - Weber-Fechner Law + Task Switchng Args: name: name of the experiment @@ -27,7 +26,6 @@ def task_switching( temperature: temperature for softmax when computing performance of current task constant: constant for task activation minimum_task_control: minimum task control - random_state: integer used to seed the random number generator """ params = dict( @@ -37,7 +35,6 @@ def task_switching( temperature=temperature, minimum_task_control=minimum_task_control, constant=constant, - random_state=random_state, ) current_task_strength = IV( @@ -46,7 +43,7 @@ def task_switching( value_range=(0, 1), units="intensity", variable_label="Strength of Current Task", - type=ValueType.REAL + type=ValueType.REAL, ) alt_task_strength = IV( @@ -55,7 +52,7 @@ def task_switching( value_range=(0, 1), units="intensity", variable_label="Strength of Alternative Task", - type=ValueType.REAL + type=ValueType.REAL, ) is_switch = IV( @@ -64,7 +61,7 @@ def task_switching( value_range=(0, 1), units="indicator", variable_label="Is Switch", - type=ValueType.PROBABILITY_SAMPLE + type=ValueType.PROBABILITY_SAMPLE, ) cur_task_performance = DV( @@ -72,26 +69,24 @@ def task_switching( value_range=(0, 1), units="performance", variable_label="Accuray of Current Task", - type=ValueType.PROBABILITY + type=ValueType.PROBABILITY, ) variables = VariableCollection( - independent_variables=[current_task_strength, - alt_task_strength, - is_switch], + independent_variables=[current_task_strength, alt_task_strength, is_switch], dependent_variables=[cur_task_performance], ) - rng = np.random.default_rng(random_state) - def inverse(x, A, B): y = 1 / (A * x + B) return y - def experiment_runner( + def run( conditions: Union[pd.DataFrame, np.ndarray, np.recarray], - observation_noise: float = 0.01, + added_noise: float = 0.01, + random_state: Optional[int] = None, ): + rng = np.random.default_rng(random_state) X = np.array(conditions) Y = np.zeros((X.shape[0], 1)) for idx, x in enumerate(X): @@ -101,38 +96,39 @@ def experiment_runner( # determine current task control - input_ratio = (cur_task_strength + priming_default * (1 - is_switch)) / \ - (alt_task_strength + priming_default * (is_switch)) + input_ratio = (cur_task_strength + priming_default * (1 - is_switch)) / ( + alt_task_strength + priming_default * (is_switch) + ) cur_task_control = inverse(input_ratio, 2.61541389, 0.7042097) cur_task_control = np.max([cur_task_control, minimum_task_control]) - cur_task_input = cur_task_strength + \ - priming_default * (1 - is_switch) + \ - cur_task_control + \ - rng.random.normal(0, std) + cur_task_input = ( + cur_task_strength + + priming_default * (1 - is_switch) + + cur_task_control + + rng.random.normal(0, added_noise) + ) - alt_task_input = alt_task_strength + \ - priming_default * (is_switch) + \ - rng.random.normal(0, std) + alt_task_input = ( + alt_task_strength + + priming_default * (is_switch) + + rng.random.normal(0, added_noise) + ) cur_task_activation = 1 - np.exp(-constant * cur_task_input) alt_task_activation = 1 - np.exp(-constant * alt_task_input) - cur_task_performance = np.exp(cur_task_activation * 1 / temperature) / \ - (np.exp(cur_task_activation * 1 / temperature) + - np.exp(alt_task_activation * 1 / temperature)) - - # word switch - # word nonswitch - # color switch - # color nonswitch + cur_task_performance = np.exp(cur_task_activation * 1 / temperature) / ( + np.exp(cur_task_activation * 1 / temperature) + + np.exp(alt_task_activation * 1 / temperature) + ) Y[idx] = cur_task_performance return Y - ground_truth = partial(experiment_runner, observation_noise=0.0) + ground_truth = partial(run, added_noise=0.0) def domain(): s1_values = variables.independent_variables[0].allowed_values @@ -181,10 +177,12 @@ def plotter( color_repetition_performance = y[3, 0] x_data = [1, 2] - word_performance = (1 - np.array([word_repetition_performance, - word_switch_performance])) * 100 - color_performance = (1 - np.array([color_repetition_performance, - color_switch_performance])) * 100 + word_performance = ( + 1 - np.array([word_repetition_performance, word_switch_performance]) + ) * 100 + color_performance = ( + 1 - np.array([color_repetition_performance, color_switch_performance]) + ) * 100 if model is not None: y_pred = model.predict(X) @@ -192,17 +190,29 @@ def plotter( word_repetition_performance_pred = y_pred[1][0] color_switch_performance_pred = y_pred[2][0] color_repetition_performance_pred = y_pred[3][0] - word_performance_recovered = (1 - np.array([word_repetition_performance_pred, - word_switch_performance_pred])) * 100 - color_performance_recovered = (1 - np.array([color_repetition_performance_pred, - color_switch_performance_pred])) * 100 - - legend = ('Word Task (Original)', 'Color Task (Original)', - 'Word Task (Recovered)', 'Color Task (Recovered)',) + word_performance_recovered = ( + 1 + - np.array( + [word_repetition_performance_pred, word_switch_performance_pred] + ) + ) * 100 + color_performance_recovered = ( + 1 + - np.array( + [color_repetition_performance_pred, color_switch_performance_pred] + ) + ) * 100 + + legend = ( + "Word Task (Original)", + "Color Task (Original)", + "Word Task (Recovered)", + "Color Task (Recovered)", + ) # plot - import matplotlib.pyplot as plt import matplotlib.colors as mcolors + import matplotlib.pyplot as plt colors = mcolors.TABLEAU_COLORS col_keys = list(colors.keys()) @@ -210,21 +220,33 @@ def plotter( plt.plot(x_data, word_performance, label=legend[0], c=colors[col_keys[0]]) plt.plot(x_data, color_performance, label=legend[1], c=colors[col_keys[1]]) if model is not None: - plt.plot(x_data, word_performance_recovered, '--', label=legend[2], c=colors[col_keys[0]]) - plt.plot(x_data, color_performance_recovered, '--', label=legend[3], c=colors[col_keys[1]]) + plt.plot( + x_data, + word_performance_recovered, + "--", + label=legend[2], + c=colors[col_keys[0]], + ) + plt.plot( + x_data, + color_performance_recovered, + "--", + label=legend[3], + c=colors[col_keys[1]], + ) plt.xlim([0.5, 2.5]) plt.ylim([0, 50]) plt.ylabel("Error Rate (%)", fontsize="large") plt.legend(loc=2, fontsize="large") plt.title("Task Switching", fontsize="large") - plt.xticks(x_data, ['Repetition', 'Switch'], rotation='horizontal') + plt.xticks(x_data, ["Repetition", "Switch"], rotation="horizontal") plt.show() collection = SyntheticExperimentCollection( name=name, description=task_switching.__doc__, variables=variables, - experiment_runner=experiment_runner, + run=run, ground_truth=ground_truth, domain=domain, plotter=plotter, diff --git a/src/autora/experiment_runner/synthetic/psychology/exp_learning.py b/src/autora/experiment_runner/synthetic/psychology/exp_learning.py index b2654dd3..24ed25dc 100644 --- a/src/autora/experiment_runner/synthetic/psychology/exp_learning.py +++ b/src/autora/experiment_runner/synthetic/psychology/exp_learning.py @@ -1,7 +1,8 @@ from functools import partial -from typing import Optional +from typing import Optional, Union import numpy as np +import pandas as pd from autora.experiment_runner.synthetic.utilities import SyntheticExperimentCollection from autora.variable import DV, IV, ValueType, VariableCollection @@ -15,7 +16,6 @@ def exp_learning( maximum_initial_value=0.5, lr=0.03, p_asymptotic=1.0, - random_state: Optional[int] = None, ): """ Exponential Learning @@ -28,7 +28,6 @@ def exp_learning( minimum_trial: upper bound for exponential constant name: name of the experiment resolution: number of allowed values for stimulus - random_state: integer used to seed the random number generator """ maximum_trial = resolution @@ -42,31 +41,26 @@ def exp_learning( maximum_initial_value=maximum_initial_value, lr=lr, p_asymptotic=p_asymptotic, - random_state=random_state, ) p_initial = IV( name="P_asymptotic", - allowed_values=np.linspace(minimum_initial_value, - maximum_initial_value, - resolution), - value_range=(minimum_initial_value, - maximum_initial_value), + allowed_values=np.linspace( + minimum_initial_value, maximum_initial_value, resolution + ), + value_range=(minimum_initial_value, maximum_initial_value), units="performance", variable_label="Asymptotic Performance", - type=ValueType.REAL + type=ValueType.REAL, ) trial = IV( name="trial", - allowed_values=np.linspace(minimum_trial, - maximum_trial, - resolution), - value_range=(minimum_trial, - maximum_trial), + allowed_values=np.linspace(minimum_trial, maximum_trial, resolution), + value_range=(minimum_trial, maximum_trial), units="trials", variable_label="Trials", - type=ValueType.REAL + type=ValueType.REAL, ) performance = DV( @@ -74,7 +68,7 @@ def exp_learning( value_range=(0, p_asymptotic), units="performance", variable_label="Performance", - type=ValueType.REAL + type=ValueType.REAL, ) variables = VariableCollection( @@ -82,12 +76,12 @@ def exp_learning( dependent_variables=[performance], ) - rng = np.random.default_rng(random_state) - - def experiment_runner( + def run( conditions: Union[pd.DataFrame, np.ndarray, np.recarray], - observation_noise: float = 0.01, + added_noise: float = 0.01, + random_state: Optional[int] = None, ): + rng = np.random.default_rng(random_state) X = np.array(conditions) Y = np.zeros((X.shape[0], 1)) @@ -95,17 +89,22 @@ def experiment_runner( # Heathcote, A., Brown, S., & Mewhort, D. J. (2000). The power law repealed: # The case for an exponential law of practice. Psychonomic bulletin & review, 7(2), 185–207. - # Thurstone, L. L. (1919). The learning curve equation. Psy- chological Monographs, 26(3), i. + # Thurstone, L. L. (1919). The learning curve equation. + # Psy- chological Monographs, 26(3), i. for idx, x in enumerate(X): p_initial_exp = x[0] trial_exp = x[1] - y = p_asymptotic - (p_asymptotic - p_initial_exp) * np.exp(- lr * trial_exp) + rng.random.normal(0, std) + y = ( + p_asymptotic + - (p_asymptotic - p_initial_exp) * np.exp(-lr * trial_exp) + + rng.random.normal(0, added_noise) + ) Y[idx] = y return Y - ground_truth = partial(experiment_runner, observation_noise=0.0) + ground_truth = partial(run, added_noise=0.0) def domain(): p_initial_values = variables.independent_variables[0].allowed_values @@ -149,7 +148,7 @@ def plotter( name=name, description=exp_learning.__doc__, variables=variables, - experiment_runner=experiment_runner, + run=run, ground_truth=ground_truth, domain=domain, plotter=plotter, diff --git a/src/autora/experiment_runner/synthetic/psychophysics/stevens_power_law.py b/src/autora/experiment_runner/synthetic/psychophysics/stevens_power_law.py index 243d5534..7ebec869 100644 --- a/src/autora/experiment_runner/synthetic/psychophysics/stevens_power_law.py +++ b/src/autora/experiment_runner/synthetic/psychophysics/stevens_power_law.py @@ -14,7 +14,6 @@ def stevens_power_law( proportionality_constant=1.0, modality_constant=0.8, maximum_stimulus_intensity=5.0, - random_state: Optional[int] = None, ): """ Stevens' Power Law @@ -25,8 +24,6 @@ def stevens_power_law( modality_constant: power constant proportionality_constant: constant multiplier maximum_stimulus_intensity: maximum value for stimulus - random_state: integer used to seed the random number generator - """ params = dict( @@ -35,16 +32,17 @@ def stevens_power_law( proportionality_constant=proportionality_constant, modality_constant=modality_constant, maximum_stimulus_intensity=maximum_stimulus_intensity, - random_state=random_state, ) iv1 = IV( name="S", - allowed_values=np.linspace(1 / resolution, maximum_stimulus_intensity, resolution), + allowed_values=np.linspace( + 1 / resolution, maximum_stimulus_intensity, resolution + ), value_range=(1 / resolution, maximum_stimulus_intensity), units="intensity", variable_label="Stimulus Intensity", - type=ValueType.REAL + type=ValueType.REAL, ) dv1 = DV( @@ -52,7 +50,7 @@ def stevens_power_law( value_range=(0, maximum_stimulus_intensity), units="sensation", variable_label="Perceived Intensity", - type=ValueType.REAL + type=ValueType.REAL, ) variables = VariableCollection( @@ -60,21 +58,23 @@ def stevens_power_law( dependent_variables=[dv1], ) - rng = np.random.default_rng(random_state) - - def experiment_runner( + def run( conditions: Union[pd.DataFrame, np.ndarray, np.recarray], - observation_noise: float = 0.01, + added_noise: float = 0.01, + random_state: Optional[int] = None, ): + rng = np.random.default_rng(random_state) X = np.array(conditions) Y = np.zeros((X.shape[0], 1)) for idx, x in enumerate(X): - y = proportionality_constant * x[0] ** modality_constant + rng.random.normal(0, std) + y = proportionality_constant * x[ + 0 + ] ** modality_constant + rng.random.normal(0, added_noise) Y[idx] = y return Y - ground_truth = partial(experiment_runner, observation_noise =0.0) + ground_truth = partial(run, added_noise=0.0) def domain(): s_values = variables.independent_variables[0].allowed_values @@ -85,8 +85,8 @@ def domain(): def plotter( model=None, ): - import matplotlib.pyplot as plt import matplotlib.colors as mcolors + import matplotlib.pyplot as plt colors = mcolors.TABLEAU_COLORS col_keys = list(colors.keys()) @@ -95,7 +95,7 @@ def plotter( plt.plot(X, y, label="Original", c=colors[col_keys[0]]) if model is not None: y = model.predict(X) - plt.plot(X, y, label=f"Recovered", c=colors[col_keys[0]], linestyle="--") + plt.plot(X, y, label="Recovered", c=colors[col_keys[0]], linestyle="--") x_limit = [0, variables.independent_variables[0].value_range[1]] y_limit = [0, 4] x_label = "Stimulus Intensity" @@ -113,7 +113,7 @@ def plotter( name=name, description=stevens_power_law.__doc__, variables=variables, - experiment_runner=experiment_runner, + run=run, ground_truth=ground_truth, domain=domain, plotter=plotter, diff --git a/tests/test_bundled_models.py b/tests/test_bundled_models.py index f78cf93b..5418c946 100644 --- a/tests/test_bundled_models.py +++ b/tests/test_bundled_models.py @@ -8,20 +8,18 @@ expected_value_theory, ) from autora.experiment_runner.synthetic.economics.prospect_theory import prospect_theory -from autora.experiment_runner.synthetic.psychology.luce_choice_ratio import ( - luce_choice_ratio, -) from autora.experiment_runner.synthetic.neuroscience.task_switching import ( task_switching, ) -from autora.experiment_runner.synthetic.psychophysics.weber_fechner_law import ( - weber_fechner_law, +from autora.experiment_runner.synthetic.psychology.exp_learning import exp_learning +from autora.experiment_runner.synthetic.psychology.luce_choice_ratio import ( + luce_choice_ratio, ) from autora.experiment_runner.synthetic.psychophysics.stevens_power_law import ( stevens_power_law, ) -from autora.experiment_runner.synthetic.psychology.exp_learning import ( - exp_learning, +from autora.experiment_runner.synthetic.psychophysics.weber_fechner_law import ( + weber_fechner_law, ) from autora.experiment_runner.synthetic.utilities import describe, register, retrieve @@ -32,6 +30,8 @@ ("template_experiment", template_experiment), ("weber_fechner_law", weber_fechner_law), ("stevens_power_law", stevens_power_law), + ("task_switching", task_switching), + ("exp_learning", exp_learning), ] all_bundled_model_names = [b[0] for b in all_bundled_models]