diff --git a/ema_workbench/em_framework/parameters.py b/ema_workbench/em_framework/parameters.py index ed3108fdf..a924dd3fc 100644 --- a/ema_workbench/em_framework/parameters.py +++ b/ema_workbench/em_framework/parameters.py @@ -42,6 +42,10 @@ def __set_name__(self, cls, name): self.name = name self.internal_name = "_" + name + @abc.abstractmethod + def get_bound(self, instance): + ... + class UpperBound(Bound): def get_bound(self, instance): @@ -63,7 +67,7 @@ def get_bound(self, owner): return bound -class Constant(NamedObject): +class Constant(Variable): """Constant class, can be used for any parameter that has to be set to a fixed value @@ -78,8 +82,10 @@ def __repr__(self, *args, **kwargs): return f"{self.__class__.__name__}('{self.name}', {self.value})" -class Category(Constant): - pass +class Category(NamedObject): + def __init__(self, name, value): + super().__init__(name) + self.value = value def create_category(cat): diff --git a/ema_workbench/em_framework/util.py b/ema_workbench/em_framework/util.py index d7b493a7a..acb8e1003 100644 --- a/ema_workbench/em_framework/util.py +++ b/ema_workbench/em_framework/util.py @@ -62,6 +62,13 @@ def variable_name(self, name): name = [name] self._variable_name = name + def __init__(self, name): + if not name.isidentifier(): + DeprecationWarning( + f"'{name}' is not a valid Python identifier. Starting from version 3.0 of the EMAworkbench, names must be valid python identifiers" + ) + super().__init__(name) + class NamedObjectMap: def __init__(self, kind): # @ReservedAssignment diff --git a/ema_workbench/examples/data/1000 flu cases no policy.tar.gz b/ema_workbench/examples/data/1000 flu cases no policy.tar.gz index b8bd17b6d..aa5a1cf82 100644 Binary files a/ema_workbench/examples/data/1000 flu cases no policy.tar.gz and b/ema_workbench/examples/data/1000 flu cases no policy.tar.gz differ diff --git a/ema_workbench/examples/data/1000 flu cases with policies.tar.gz b/ema_workbench/examples/data/1000 flu cases with policies.tar.gz index 078441d35..0781f2821 100644 Binary files a/ema_workbench/examples/data/1000 flu cases with policies.tar.gz and b/ema_workbench/examples/data/1000 flu cases with policies.tar.gz differ diff --git a/ema_workbench/examples/example_pysd_teacup.py b/ema_workbench/examples/example_pysd_teacup.py index bc3240836..e39d7bb4f 100644 --- a/ema_workbench/examples/example_pysd_teacup.py +++ b/ema_workbench/examples/example_pysd_teacup.py @@ -18,7 +18,9 @@ model = PysdModel(mdl_file=mdl_file) - model.uncertainties = [RealParameter("Room Temperature", 33, 120)] - model.outcomes = [TimeSeriesOutcome("Teacup Temperature")] + model.uncertainties = [ + RealParameter("room_temperature", 33, 120, variable_name="Room Temperature") + ] + model.outcomes = [TimeSeriesOutcome("teacup_temperature", variable_name="Teacup Temperature")] perform_experiments(model, 100) diff --git a/ema_workbench/examples/example_vensim_advanced_flu.py b/ema_workbench/examples/example_vensim_advanced_flu.py index 63f074309..ab4cfca57 100644 --- a/ema_workbench/examples/example_vensim_advanced_flu.py +++ b/ema_workbench/examples/example_vensim_advanced_flu.py @@ -35,17 +35,20 @@ def time_of_max(infected_fraction, time): # outcomes model.outcomes = [ - TimeSeriesOutcome("deceased population region 1"), - TimeSeriesOutcome("infected fraction R1"), + TimeSeriesOutcome( + "deceased_population_region_1", variable_name="deceased population region 1" + ), + TimeSeriesOutcome("infected_fraction_R1", variable_name="infected fraction R1"), ScalarOutcome( - "max infection fraction", variable_name="infected fraction R1", function=np.max + "max_infection_fraction", variable_name="infected fraction R1", function=np.max ), ScalarOutcome( - "time of max", variable_name=["infected fraction R1", "TIME"], function=time_of_max + "time_of_max", variable_name=["infected fraction R1", "TIME"], function=time_of_max ), ] # create uncertainties based on csv + # FIXME csv is missing model.uncertainties = parameters_from_csv("./models/flu/flu_uncertainties.csv") # add policies diff --git a/ema_workbench/examples/example_vensim_energy.py b/ema_workbench/examples/example_vensim_energy.py new file mode 100644 index 000000000..b432d7354 --- /dev/null +++ b/ema_workbench/examples/example_vensim_energy.py @@ -0,0 +1,217 @@ +""" +Created on 27 Jan 2014 + +@author: jhkwakkel +""" +from ema_workbench import ( + RealParameter, + TimeSeriesOutcome, + ema_logging, + MultiprocessingEvaluator, + ScalarOutcome, + perform_experiments, + CategoricalParameter, + save_results, + Policy, +) +from ema_workbench.connectors.vensim import VensimModel +from ema_workbench.em_framework.evaluators import SequentialEvaluator + + +def get_energy_model(): + model = VensimModel( + "energyTransition", + wd="./models", + model_file="RB_V25_ets_1_policy_modified_adaptive_extended_outcomes.vpm", + ) + + model.outcomes = [ + TimeSeriesOutcome( + "cumulative_carbon_emissions", variable_name="cumulative carbon emissions" + ), + TimeSeriesOutcome( + "carbon_emissions_reduction_fraction", + variable_name="carbon emissions reduction fraction", + ), + TimeSeriesOutcome("fraction_renewables", variable_name="fraction renewables"), + TimeSeriesOutcome("average_total_costs", variable_name="average total costs"), + TimeSeriesOutcome("total_costs_of_electricity", variable_name="total costs of electricity"), + ] + + model.uncertainties = [ + RealParameter( + "demand_fuel_price_elasticity_factor", + 0, + 0.5, + variable_name="demand fuel price elasticity factor", + ), + RealParameter( + "economic_lifetime_biomass", 30, 50, variable_name="economic lifetime biomass" + ), + RealParameter("economic_lifetime_coal", 30, 50, variable_name="economic lifetime coal"), + RealParameter("economic_lifetime_gas", 25, 40, variable_name="economic lifetime gas"), + RealParameter("economic_lifetime_igcc", 30, 50, variable_name="economic lifetime igcc"), + RealParameter("economic_lifetime_ngcc", 25, 40, variable_name="economic lifetime ngcc"), + RealParameter( + "economic_lifetime_nuclear", 50, 70, variable_name="economic lifetime nuclear" + ), + RealParameter("economic_lifetime_pv", 20, 30, variable_name="economic lifetime pv"), + RealParameter("economic_lifetime_wind", 20, 30, variable_name="economic lifetime wind"), + RealParameter("economic_lifetime_hydro", 50, 70, variable_name="economic lifetime hydro"), + RealParameter( + "uncertainty_initial_gross_fuel_costs", + 0.5, + 1.5, + variable_name="uncertainty initial gross fuel costs", + ), + RealParameter( + "investment_proportionality_constant", + 0.5, + 4, + variable_name="investment proportionality constant", + ), + RealParameter( + "investors_desired_excess_capacity_investment", + 0.2, + 2, + variable_name="investors desired excess capacity investment", + ), + RealParameter( + "price_demand_elasticity_factor", + -0.07, + -0.001, + variable_name="price demand elasticity factor", + ), + RealParameter( + "price_volatility_global_resource_markets", + 0.1, + 0.2, + variable_name="price volatility global resource markets", + ), + RealParameter("progress_ratio_biomass", 0.85, 1, variable_name="progress ratio biomass"), + RealParameter("progress_ratio_coal", 0.9, 1.05, variable_name="progress ratio coal"), + RealParameter("progress_ratio_gas", 0.85, 1, variable_name="progress ratio gas"), + RealParameter("progress_ratio_igcc", 0.9, 1.05, variable_name="progress ratio igcc"), + RealParameter("progress_ratio_ngcc", 0.85, 1, variable_name="progress ratio ngcc"), + RealParameter("progress_ratio_nuclear", 0.9, 1.05, variable_name="progress ratio nuclear"), + RealParameter("progress_ratio_pv", 0.75, 0.9, variable_name="progress ratio pv"), + RealParameter("progress_ratio_wind", 0.85, 1, variable_name="progress ratio wind"), + RealParameter("progress_ratio_hydro", 0.9, 1.05, variable_name="progress ratio hydro"), + RealParameter( + "starting_construction_time", 0.1, 3, variable_name="starting construction time" + ), + RealParameter( + "time_of_nuclear_power_plant_ban", + 2013, + 2100, + variable_name="time of nuclear power plant ban", + ), + RealParameter( + "weight_factor_carbon_abatement", 1, 10, variable_name="weight factor carbon abatement" + ), + RealParameter( + "weight_factor_marginal_investment_costs", + 1, + 10, + variable_name="weight factor marginal investment costs", + ), + RealParameter( + "weight_factor_technological_familiarity", + 1, + 10, + variable_name="weight factor technological familiarity", + ), + RealParameter( + "weight_factor_technological_growth_potential", + 1, + 10, + variable_name="weight factor technological growth potential", + ), + RealParameter( + "maximum_battery_storage_uncertainty_constant", + 0.2, + 3, + variable_name="maximum battery storage uncertainty constant", + ), + RealParameter( + "maximum_no_storage_penetration_rate_wind", + 0.2, + 0.6, + variable_name="maximum no storage penetration rate wind", + ), + RealParameter( + "maximum_no_storage_penetration_rate_pv", + 0.1, + 0.4, + variable_name="maximum no storage penetration rate pv", + ), + CategoricalParameter( + "SWITCH_lookup_curve_TGC", (1, 2, 3, 4), variable_name="SWITCH lookup curve TGC" + ), + CategoricalParameter( + "SWTICH_preference_carbon_curve", (1, 2), variable_name="SWTICH preference carbon curve" + ), + CategoricalParameter( + "SWITCH_economic_growth", (1, 2, 3, 4, 5, 6), variable_name="SWITCH economic growth" + ), + CategoricalParameter( + "SWITCH_electrification_rate", + (1, 2, 3, 4, 5, 6), + variable_name="SWITCH electrification rate", + ), + CategoricalParameter( + "SWITCH_Market_price_determination", + (1, 2), + variable_name="SWITCH Market price determination", + ), + CategoricalParameter( + "SWITCH_physical_limits", (1, 2), variable_name="SWITCH physical limits" + ), + CategoricalParameter( + "SWITCH_low_reserve_margin_price_markup", + (1, 2, 3, 4), + variable_name="SWITCH low reserve margin price markup", + ), + CategoricalParameter( + "SWITCH_interconnection_capacity_expansion", + (1, 2, 3, 4), + variable_name="SWITCH interconnection capacity expansion", + ), + CategoricalParameter( + "SWITCH_storage_for_intermittent_supply", + (1, 2, 3, 4, 5, 6, 7), + variable_name="SWITCH storage for intermittent supply", + ), + CategoricalParameter("SWITCH_carbon_cap", (1, 2, 3), variable_name="SWITCH carbon cap"), + CategoricalParameter( + "SWITCH_TGC_obligation_curve", (1, 2, 3), variable_name="SWITCH TGC obligation curve" + ), + CategoricalParameter( + "SWITCH_carbon_price_determination", + (1, 2, 3), + variable_name="SWITCH carbon price determination", + ), + ] + return model + + +if __name__ == "__main__": + ema_logging.log_to_stderr(ema_logging.INFO) + model = get_energy_model() + + policies = [ + Policy("no policy", model_file="RB_V25_ets_1_extended_outcomes.vpm"), + Policy("static policy", model_file="ETSPolicy.vpm"), + Policy( + "adaptive policy", + model_file="RB_V25_ets_1_policy_modified_adaptive_extended_outcomes.vpm", + ), + ] + + n = 100000 + with MultiprocessingEvaluator(model) as evaluator: + experiments, outcomes = evaluator.perform_experiments(n, policies=policies) + # + # outcomes.pop("TIME") + # results = experiments, outcomes + # save_results(results, f"./data/{n}_lhs.tar.gz") diff --git a/ema_workbench/examples/example_vensim_flu.py b/ema_workbench/examples/example_vensim_flu.py index 035fab3c1..a12ed632c 100644 --- a/ema_workbench/examples/example_vensim_flu.py +++ b/ema_workbench/examples/example_vensim_flu.py @@ -16,9 +16,10 @@ ema_logging, ScalarOutcome, perform_experiments, + Policy, + save_results, ) from ema_workbench.connectors.vensim import VensimModel -from ema_workbench.em_framework.parameters import Policy if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) @@ -27,34 +28,95 @@ # outcomes model.outcomes = [ - TimeSeriesOutcome("deceased population region 1"), - TimeSeriesOutcome("infected fraction R1"), + TimeSeriesOutcome( + "deceased_population_region_1", variable_name="deceased population region 1" + ), + TimeSeriesOutcome("infected_fraction_R1", variable_name="infected fraction R1"), ScalarOutcome( - "max infection fraction", variable_name="infected fraction R1", function=np.max + "max_infection_fraction", variable_name="infected fraction R1", function=np.max ), ] # Plain Parametric Uncertainties model.uncertainties = [ - RealParameter("additional seasonal immune population fraction R1", 0, 0.5), - RealParameter("additional seasonal immune population fraction R2", 0, 0.5), - RealParameter("fatality ratio region 1", 0.0001, 0.1), - RealParameter("fatality rate region 2", 0.0001, 0.1), - RealParameter("initial immune fraction of the population of region 1", 0, 0.5), - RealParameter("initial immune fraction of the population of region 2", 0, 0.5), - RealParameter("normal interregional contact rate", 0, 0.9), - RealParameter("permanent immune population fraction R1", 0, 0.5), - RealParameter("permanent immune population fraction R2", 0, 0.5), - RealParameter("recovery time region 1", 0.1, 0.75), - RealParameter("recovery time region 2", 0.1, 0.75), - RealParameter("susceptible to immune population delay time region 1", 0.5, 2), - RealParameter("susceptible to immune population delay time region 2", 0.5, 2), - RealParameter("root contact rate region 1", 0.01, 5), - RealParameter("root contact ratio region 2", 0.01, 5), - RealParameter("infection ratio region 1", 0, 0.15), - RealParameter("infection rate region 2", 0, 0.15), - RealParameter("normal contact rate region 1", 10, 100), - RealParameter("normal contact rate region 2", 10, 200), + RealParameter( + "additional_seasonal_immune_population_fraction_R1", + 0, + 0.5, + variable_name="additional seasonal immune population fraction R1", + ), + RealParameter( + "additional_seasonal_immune_population_fraction_R2", + 0, + 0.5, + variable_name="additional seasonal immune population fraction R2", + ), + RealParameter( + "fatality_ratio_region_1", 0.0001, 0.1, variable_name="fatality ratio region 1" + ), + RealParameter( + "fatality_rate_region_2", 0.0001, 0.1, variable_name="fatality rate region 2" + ), + RealParameter( + "initial_immune_fraction_of_the_population_of_region_1", + 0, + 0.5, + variable_name="initial immune fraction of the population of region 1", + ), + RealParameter( + "initial_immune_fraction_of_the_population_of_region_2", + 0, + 0.5, + variable_name="initial immune fraction of the population of region 2", + ), + RealParameter( + "normal_interregional_contact_rate", + 0, + 0.9, + variable_name="normal interregional contact rate", + ), + RealParameter( + "permanent_immune_population_fraction_R1", + 0, + 0.5, + variable_name="permanent immune population fraction R1", + ), + RealParameter( + "permanent_immune_population_fraction_R2", + 0, + 0.5, + variable_name="permanent immune population fraction R2", + ), + RealParameter("recovery_time_region_1", 0.1, 0.75, variable_name="recovery time region 1"), + RealParameter("recovery_time_region_2", 0.1, 0.75, variable_name="recovery time region 2"), + RealParameter( + "susceptible_to_immune_population_delay_time_region_1", + 0.5, + 2, + variable_name="susceptible to immune population delay time region 1", + ), + RealParameter( + "susceptible_to_immune_population_delay_time_region_2", + 0.5, + 2, + variable_name="susceptible to immune population delay time region 2", + ), + RealParameter( + "root_contact_rate_region_1", 0.01, 5, variable_name="root contact rate region 1" + ), + RealParameter( + "root_contact_ratio_region_2", 0.01, 5, variable_name="root contact ratio region 2" + ), + RealParameter( + "infection_ratio_region_1", 0, 0.15, variable_name="infection ratio region 1" + ), + RealParameter("infection_rate_region_2", 0, 0.15, variable_name="infection rate region 2"), + RealParameter( + "normal_contact_rate_region_1", 10, 100, variable_name="normal contact rate region 1" + ), + RealParameter( + "normal_contact_rate_region_2", 10, 200, variable_name="normal contact rate region 2" + ), ] # add policies @@ -65,3 +127,4 @@ ] results = perform_experiments(model, 1000, policies=policies) + save_results(results, "./data/1000 flu cases with policies.tar.gz") diff --git a/ema_workbench/examples/example_vensim_lookup.py b/ema_workbench/examples/example_vensim_lookup.py index 741f3a421..c9fafb6e4 100644 --- a/ema_workbench/examples/example_vensim_lookup.py +++ b/ema_workbench/examples/example_vensim_lookup.py @@ -18,10 +18,10 @@ class Burnout(VensimModel): model_file = r"\BURNOUT.vpm" outcomes = [ - TimeSeriesOutcome("Accomplishments to Date"), - TimeSeriesOutcome("Energy Level"), - TimeSeriesOutcome("Hours Worked Per Week"), - TimeSeriesOutcome("accomplishments per hour"), + TimeSeriesOutcome("accomplishments_to_date", variable_name="Accomplishments to Date"), + TimeSeriesOutcome("energy_level", variable_name="Energy Level"), + TimeSeriesOutcome("hours_worked_per_week", variable_name="Hours Worked Per Week"), + TimeSeriesOutcome("accomplishments_per_hour", variable_name="accomplishments per hour"), ] def __init__(self, working_directory, name): diff --git a/ema_workbench/examples/example_vensim_no_policy_flu.py b/ema_workbench/examples/example_vensim_no_policy_flu.py index ab9ad70d3..bf3c32851 100644 --- a/ema_workbench/examples/example_vensim_no_policy_flu.py +++ b/ema_workbench/examples/example_vensim_no_policy_flu.py @@ -14,6 +14,7 @@ ema_logging, perform_experiments, MultiprocessingEvaluator, + save_results, ) from ema_workbench.connectors.vensim import VensimModel @@ -21,37 +22,100 @@ if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) - model = VensimModel("fluCase", wd="./models/flu", model_file="FLUvensimV1basecase.vpm") + model = VensimModel("fluCase", wd=r"./models/flu", model_file=r"FLUvensimV1basecase.vpm") # outcomes model.outcomes = [ - TimeSeriesOutcome("deceased population region 1"), - TimeSeriesOutcome("infected fraction R1"), + TimeSeriesOutcome( + "deceased_population_region_1", variable_name="deceased population region 1" + ), + TimeSeriesOutcome("infected_fraction_R1", variable_name="infected fraction R1"), ] # Plain Parametric Uncertainties model.uncertainties = [ - RealParameter("additional seasonal immune population fraction R1", 0, 0.5), - RealParameter("additional seasonal immune population fraction R2", 0, 0.5), - RealParameter("fatality ratio region 1", 0.0001, 0.1), - RealParameter("fatality rate region 2", 0.0001, 0.1), - RealParameter("initial immune fraction of the population of region 1", 0, 0.5), - RealParameter("initial immune fraction of the population of region 2", 0, 0.5), - RealParameter("normal interregional contact rate", 0, 0.9), - RealParameter("permanent immune population fraction R1", 0, 0.5), - RealParameter("permanent immune population fraction R2", 0, 0.5), - RealParameter("recovery time region 1", 0.1, 0.75), - RealParameter("recovery time region 2", 0.1, 0.75), - RealParameter("susceptible to immune population delay time region 1", 0.5, 2), - RealParameter("susceptible to immune population delay time region 2", 0.5, 2), - RealParameter("root contact rate region 1", 0.01, 5), - RealParameter("root contact ratio region 2", 0.01, 5), - RealParameter("infection ratio region 1", 0, 0.15), - RealParameter("infection rate region 2", 0, 0.15), - RealParameter("normal contact rate region 1", 10, 100), - RealParameter("normal contact rate region 2", 10, 200), + RealParameter( + "additional_seasonal_immune_population_fraction_R1", + 0, + 0.5, + variable_name="additional seasonal immune population fraction R1", + ), + RealParameter( + "additional_seasonal_immune_population_fraction_R2", + 0, + 0.5, + variable_name="additional seasonal immune population fraction R2", + ), + RealParameter( + "fatality_ratio_region_1", 0.0001, 0.1, variable_name="fatality ratio region 1" + ), + RealParameter( + "fatality_rate_region_2", 0.0001, 0.1, variable_name="fatality rate region 2" + ), + RealParameter( + "initial_immune_fraction_of_the_population_of_region_1", + 0, + 0.5, + variable_name="initial immune fraction of the population of region 1", + ), + RealParameter( + "initial_immune_fraction_of_the_population_of_region_2", + 0, + 0.5, + variable_name="initial immune fraction of the population of region 2", + ), + RealParameter( + "normal_interregional_contact_rate", + 0, + 0.9, + variable_name="normal interregional contact rate", + ), + RealParameter( + "permanent_immune_population_fraction_R1", + 0, + 0.5, + variable_name="permanent immune population fraction R1", + ), + RealParameter( + "permanent_immune_population_fraction_R2", + 0, + 0.5, + variable_name="permanent immune population fraction R2", + ), + RealParameter("recovery_time_region_1", 0.1, 0.75, variable_name="recovery time region 1"), + RealParameter("recovery_time_region_2", 0.1, 0.75, variable_name="recovery time region 2"), + RealParameter( + "susceptible_to_immune_population_delay_time_region_1", + 0.5, + 2, + variable_name="susceptible to immune population delay time region 1", + ), + RealParameter( + "susceptible_to_immune_population_delay_time_region_2", + 0.5, + 2, + variable_name="susceptible to immune population delay time region 2", + ), + RealParameter( + "root_contact_rate_region_1", 0.01, 5, variable_name="root contact rate region 1" + ), + RealParameter( + "root_contact_ratio_region_2", 0.01, 5, variable_name="root contact ratio region 2" + ), + RealParameter( + "infection_ratio_region_1", 0, 0.15, variable_name="infection ratio region 1" + ), + RealParameter("infection_rate_region_2", 0, 0.15, variable_name="infection rate region 2"), + RealParameter( + "normal_contact_rate_region_1", 10, 100, variable_name="normal contact rate region 1" + ), + RealParameter( + "normal_contact_rate_region_2", 10, 200, variable_name="normal contact rate region 2" + ), ] - nr_experiments = 10 + nr_experiments = 1000 with MultiprocessingEvaluator(model) as evaluator: results = perform_experiments(model, nr_experiments, evaluator=evaluator) + + save_results(results, "./data/1000 flu cases no policy.tar.gz") diff --git a/ema_workbench/examples/example_vensim_scarcity.py b/ema_workbench/examples/example_vensim_scarcity.py index 2a326dd0f..cefdd1c23 100644 --- a/ema_workbench/examples/example_vensim_scarcity.py +++ b/ema_workbench/examples/example_vensim_scarcity.py @@ -38,25 +38,25 @@ def priceSubstite(self, x, speed, begin, end): def run_model(self, scenario, policy): """Method for running an instantiated model structure""" kwargs = scenario - loc = kwargs.pop("lookup shortage loc") - speed = kwargs.pop("lookup shortage speed") + loc = kwargs.pop("lookup_shortage_loc") + speed = kwargs.pop("lookup_shortage_speed") lookup = [self.f(x / 10, speed, loc) for x in range(0, 100)] kwargs["shortage price effect lookup"] = lookup - speed = kwargs.pop("lookup price substitute speed") - begin = kwargs.pop("lookup price substitute begin") - end = kwargs.pop("lookup price substitute end") + speed = kwargs.pop("lookup_price_substitute_speed") + begin = kwargs.pop("lookup_price_substitute_begin") + end = kwargs.pop("lookup_price_substitute_end") lookup = [self.priceSubstite(x, speed, begin, end) for x in range(0, 100, 10)] kwargs["relative price substitute lookup"] = lookup - scale = kwargs.pop("lookup returns to scale speed") - speed = kwargs.pop("lookup returns to scale scale") + scale = kwargs.pop("lookup_returns_to_scale_speed") + speed = kwargs.pop("lookup_returns_to_scale_scale") lookup = [self.returnsToScale(x, speed, scale) for x in range(0, 101, 10)] kwargs["returns to scale lookup"] = lookup - scale = kwargs.pop("lookup approximated learning speed") - speed = kwargs.pop("lookup approximated learning scale") - start = kwargs.pop("lookup approximated learning start") + scale = kwargs.pop("lookup_approximated_learning_speed") + speed = kwargs.pop("lookup_approximated_learning_scale") + start = kwargs.pop("lookup_approximated_learning_start") lookup = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)] kwargs["approximated learning effect lookup"] = lookup @@ -66,48 +66,143 @@ def run_model(self, scenario, policy): if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.DEBUG) - model = ScarcityModel("scarcity", wd=r"./models/scarcity", model_file=r"\MetalsEMA.vpm") + model = ScarcityModel("scarcity", wd="./models/scarcity", model_file="MetalsEMA.vpm") model.outcomes = [ - TimeSeriesOutcome("relative market price"), - TimeSeriesOutcome("supply demand ratio"), - TimeSeriesOutcome("real annual demand"), - TimeSeriesOutcome("produced of intrinsically demanded"), - TimeSeriesOutcome("supply"), - TimeSeriesOutcome("Installed Recycling Capacity"), - TimeSeriesOutcome("Installed Extraction Capacity"), + TimeSeriesOutcome("relative_market_price", variable_name="relative market price"), + TimeSeriesOutcome("supply_demand_ratio", variable_name="supply demand ratio"), + TimeSeriesOutcome("real_annual_demand", variable_name="real annual demand"), + TimeSeriesOutcome( + "produced_of_intrinsically_demanded", variable_name="produced of intrinsically demanded" + ), + TimeSeriesOutcome("supply", variable_name="supply"), + TimeSeriesOutcome( + "Installed_Recycling_Capacity", variable_name="Installed Recycling Capacity" + ), + TimeSeriesOutcome( + "Installed_Extraction_Capacity", variable_name="Installed Extraction Capacity" + ), ] model.uncertainties = [ - RealParameter("price elasticity of demand", 0, 0.5), - RealParameter("fraction of maximum extraction capacity used", 0.6, 1.2), - RealParameter("initial average recycling cost", 1, 4), - RealParameter("exogenously planned extraction capacity", 0, 15000), - RealParameter("absolute recycling loss fraction", 0.1, 0.5), - RealParameter("normal profit margin", 0, 0.4), - RealParameter("initial annual supply", 100000, 120000), - RealParameter("initial in goods", 1500000, 2500000), - RealParameter("average construction time extraction capacity", 1, 10), - RealParameter("average lifetime extraction capacity", 20, 40), - RealParameter("average lifetime recycling capacity", 20, 40), - RealParameter("initial extraction capacity under construction", 5000, 20000), - RealParameter("initial recycling capacity under construction", 5000, 20000), - RealParameter("initial recycling infrastructure", 5000, 20000), + RealParameter( + "price_elasticity_of_demand", 0, 0.5, variable_name="price elasticity of demand" + ), + RealParameter( + "fraction_of_maximum_extraction_capacity_used", + 0.6, + 1.2, + variable_name="fraction of maximum extraction capacity used", + ), + RealParameter( + "initial_average_recycling_cost", 1, 4, variable_name="initial average recycling cost" + ), + RealParameter( + "exogenously_planned_extraction_capacity", + 0, + 15000, + variable_name="exogenously planned extraction capacity", + ), + RealParameter( + "absolute_recycling_loss_fraction", + 0.1, + 0.5, + variable_name="absolute recycling loss fraction", + ), + RealParameter("normal_profit_margin", 0, 0.4, variable_name="normal profit margin"), + RealParameter( + "initial_annual_supply", 100000, 120000, variable_name="initial annual supply" + ), + RealParameter("initial_in_goods", 1500000, 2500000, variable_name="initial in goods"), + RealParameter( + "average_construction_time_extraction_capacity", + 1, + 10, + variable_name="average construction time extraction capacity", + ), + RealParameter( + "average_lifetime_extraction_capacity", + 20, + 40, + variable_name="average lifetime extraction capacity", + ), + RealParameter( + "average_lifetime_recycling_capacity", + 20, + 40, + variable_name="average lifetime recycling capacity", + ), + RealParameter( + "initial_extraction_capacity_under_construction", + 5000, + 20000, + variable_name="initial extraction capacity under construction", + ), + RealParameter( + "initial_recycling_capacity_under_construction", + 5000, + 20000, + variable_name="initial recycling capacity under construction", + ), + RealParameter( + "initial_recycling_infrastructure", + 5000, + 20000, + variable_name="initial recycling infrastructure", + ), # order of delay - CategoricalParameter("order in goods delay", (1, 4, 10, 1000)), - CategoricalParameter("order recycling capacity delay", (1, 4, 10)), - CategoricalParameter("order extraction capacity delay", (1, 4, 10)), + CategoricalParameter( + "order_in_goods_delay", (1, 4, 10, 1000), variable_name="order in goods delay" + ), + CategoricalParameter( + "order_recycling_capacity_delay", + (1, 4, 10), + variable_name="order recycling capacity delay", + ), + CategoricalParameter( + "order_extraction_capacity_delay", + (1, 4, 10), + variable_name="order extraction capacity delay", + ), # uncertainties associated with lookups - RealParameter("lookup shortage loc", 20, 50), - RealParameter("lookup shortage speed", 1, 5), - RealParameter("lookup price substitute speed", 0.1, 0.5), - RealParameter("lookup price substitute begin", 3, 7), - RealParameter("lookup price substitute end", 15, 25), - RealParameter("lookup returns to scale speed", 0.01, 0.2), - RealParameter("lookup returns to scale scale", 0.3, 0.7), - RealParameter("lookup approximated learning speed", 0.01, 0.2), - RealParameter("lookup approximated learning scale", 0.3, 0.6), - RealParameter("lookup approximated learning start", 30, 60), + RealParameter("lookup_shortage_loc", 20, 50, variable_name="lookup shortage loc"), + RealParameter("lookup_shortage_speed", 1, 5, variable_name="lookup shortage speed"), + RealParameter( + "lookup_price_substitute_speed", 0.1, 0.5, variable_name="lookup price substitute speed" + ), + RealParameter( + "lookup_price_substitute_begin", 3, 7, variable_name="lookup price substitute begin" + ), + RealParameter( + "lookup_price_substitute_end", 15, 25, variable_name="lookup price substitute end" + ), + RealParameter( + "lookup_returns_to_scale_speed", + 0.01, + 0.2, + variable_name="lookup returns to scale speed", + ), + RealParameter( + "lookup_returns_to_scale_scale", 0.3, 0.7, variable_name="lookup returns to scale scale" + ), + RealParameter( + "lookup_approximated_learning_speed", + 0.01, + 0.2, + variable_name="lookup approximated learning speed", + ), + RealParameter( + "lookup_approximated_learning_scale", + 0.3, + 0.6, + variable_name="lookup approximated learning scale", + ), + RealParameter( + "lookup_approximated_learning_start", + 30, + 60, + variable_name="lookup approximated learning start", + ), ] - results = perform_experiments(model, 50) + results = perform_experiments(model, 1000) diff --git a/ema_workbench/examples/feature_scoring_flu.py b/ema_workbench/examples/feature_scoring_flu.py index 57777902f..16e442619 100644 --- a/ema_workbench/examples/feature_scoring_flu.py +++ b/ema_workbench/examples/feature_scoring_flu.py @@ -18,8 +18,8 @@ # we have timeseries so we need scalars y = { - "deceased population": outcomes["deceased population region 1"][:, -1], - "max. infected fraction": np.max(outcomes["infected fraction R1"], axis=1), + "deceased population": outcomes["deceased_population_region_1"][:, -1], + "max. infected fraction": np.max(outcomes["infected_fraction_R1"], axis=1), } scores = feature_scoring.get_feature_scores_all(x, y) diff --git a/ema_workbench/examples/feature_scoring_flu_confidence.py b/ema_workbench/examples/feature_scoring_flu_confidence.py index ea3a947d8..697472b1a 100644 --- a/ema_workbench/examples/feature_scoring_flu_confidence.py +++ b/ema_workbench/examples/feature_scoring_flu_confidence.py @@ -18,7 +18,7 @@ x, outcomes = load_results(fn) x = x.drop(["model", "policy"], axis=1) -y = np.max(outcomes["infected fraction R1"], axis=1) +y = np.max(outcomes["infected_fraction_R1"], axis=1) all_scores = [] for i in range(100): diff --git a/ema_workbench/examples/feature_scoring_flu_overtime.py b/ema_workbench/examples/feature_scoring_flu_overtime.py index 41fd1e0b9..5879d3ccf 100644 --- a/ema_workbench/examples/feature_scoring_flu_overtime.py +++ b/ema_workbench/examples/feature_scoring_flu_overtime.py @@ -18,7 +18,7 @@ x, outcomes = load_results(fn) x = x.drop(["model", "policy"], axis=1) -y = outcomes["deceased population region 1"] +y = outcomes["deceased_population_region_1"] all_scores = [] diff --git a/ema_workbench/examples/models/flu/Current.vdfx b/ema_workbench/examples/models/flu/Current.vdfx index 58349bce3..7398e772f 100644 Binary files a/ema_workbench/examples/models/flu/Current.vdfx and b/ema_workbench/examples/models/flu/Current.vdfx differ diff --git a/ema_workbench/examples/models/hamarat/ETSPolicy.vpm b/ema_workbench/examples/models/hamarat/ETSPolicy.vpm new file mode 100644 index 000000000..ef6d5f814 Binary files /dev/null and b/ema_workbench/examples/models/hamarat/ETSPolicy.vpm differ diff --git a/ema_workbench/examples/models/hamarat/RB_V25_ets_1_extended_outcomes.vpm b/ema_workbench/examples/models/hamarat/RB_V25_ets_1_extended_outcomes.vpm new file mode 100644 index 000000000..f3b3e09f4 Binary files /dev/null and b/ema_workbench/examples/models/hamarat/RB_V25_ets_1_extended_outcomes.vpm differ diff --git a/ema_workbench/examples/models/hamarat/RB_V25_ets_1_policy_modified_adaptive_extended_outcomes.vpm b/ema_workbench/examples/models/hamarat/RB_V25_ets_1_policy_modified_adaptive_extended_outcomes.vpm new file mode 100644 index 000000000..14bf77ae0 Binary files /dev/null and b/ema_workbench/examples/models/hamarat/RB_V25_ets_1_policy_modified_adaptive_extended_outcomes.vpm differ diff --git a/ema_workbench/examples/models/scarcity/Current.vdfx b/ema_workbench/examples/models/scarcity/Current.vdfx new file mode 100644 index 000000000..c83246d58 Binary files /dev/null and b/ema_workbench/examples/models/scarcity/Current.vdfx differ diff --git a/ema_workbench/examples/plotting_pairsplot_flu.py b/ema_workbench/examples/plotting_pairsplot_flu.py index 9e2ff92a7..4f994f5fe 100644 --- a/ema_workbench/examples/plotting_pairsplot_flu.py +++ b/ema_workbench/examples/plotting_pairsplot_flu.py @@ -24,7 +24,7 @@ time = outcomes.pop("TIME") for key, value in outcomes.items(): - if key == "deceased population region 1": + if key == "deceased_population_region_1": tr[key] = value[:, -1] # we want the end value else: # we want the maximum value of the peak diff --git a/ema_workbench/examples/sd_boostedtrees_flu.py b/ema_workbench/examples/sd_boostedtrees_flu.py index 073624d68..30d2c62e0 100644 --- a/ema_workbench/examples/sd_boostedtrees_flu.py +++ b/ema_workbench/examples/sd_boostedtrees_flu.py @@ -49,7 +49,7 @@ def plot_diag(x1, ax): # transform to numpy array with proper recoding of cateogorical variables x, columns = feature_scoring._prepare_experiments(experiments) -y = outcomes["deceased population region 1"][:, -1] > 1000000 +y = outcomes["deceased_population_region 1"][:, -1] > 1000000 # establish mean case for factor maps # this is questionable in particular in case of categorical dimensions diff --git a/ema_workbench/examples/sd_cart_flu.py b/ema_workbench/examples/sd_cart_flu.py index 399a9686d..248f00b5e 100644 --- a/ema_workbench/examples/sd_cart_flu.py +++ b/ema_workbench/examples/sd_cart_flu.py @@ -13,7 +13,7 @@ def classify(data): # get the output for deceased population - result = data["deceased population region 1"] + result = data["deceased_population_region_1"] # if deceased population is higher then 1.000.000 people, # classify as 1 diff --git a/ema_workbench/examples/sd_dimensional_stacking_flu.py b/ema_workbench/examples/sd_dimensional_stacking_flu.py index 6ddbdd302..c99883467 100644 --- a/ema_workbench/examples/sd_dimensional_stacking_flu.py +++ b/ema_workbench/examples/sd_dimensional_stacking_flu.py @@ -18,7 +18,7 @@ fn = "./data/1000 flu cases no policy.tar.gz" x, outcomes = load_results(fn) -y = outcomes["deceased population region 1"][:, -1] > 1000000 +y = outcomes["deceased_population_region_1"][:, -1] > 1000000 fig = dimensional_stacking.create_pivot_plot(x, y, 2, bin_labels=True) diff --git a/ema_workbench/examples/sd_logit_flu_example.py b/ema_workbench/examples/sd_logit_flu_example.py index f81d3b4a0..b3e8c999b 100644 --- a/ema_workbench/examples/sd_logit_flu_example.py +++ b/ema_workbench/examples/sd_logit_flu_example.py @@ -15,7 +15,7 @@ experiments, outcomes = load_results("./data/1000 flu cases no policy.tar.gz") x = experiments.drop(["model", "policy"], axis=1) -y = outcomes["deceased population region 1"][:, -1] > 1000000 +y = outcomes["deceased_population_region_1"][:, -1] > 1000000 logit = logistic_regression.Logit(x, y) logit.run() diff --git a/ema_workbench/examples/sd_prim_PCA_flu.py b/ema_workbench/examples/sd_prim_PCA_flu.py index 1982500fa..c6d151c15 100644 --- a/ema_workbench/examples/sd_prim_PCA_flu.py +++ b/ema_workbench/examples/sd_prim_PCA_flu.py @@ -23,7 +23,7 @@ x, outcomes = load_results(fn) # specify y -y = outcomes["deceased population region 1"][:, -1] > 1000000 +y = outcomes["deceased_population_region_1"][:, -1] > 1000000 rotated_experiments, rotation_matrix = prim.pca_preprocess(x, y, exclude=["model", "policy"]) diff --git a/ema_workbench/examples/sd_prim_flu.py b/ema_workbench/examples/sd_prim_flu.py index 706d1b47e..abb159103 100644 --- a/ema_workbench/examples/sd_prim_flu.py +++ b/ema_workbench/examples/sd_prim_flu.py @@ -21,7 +21,7 @@ def classify(data): # get the output for deceased population - ooi = data["deceased population region 1"] + ooi = data["deceased_population_region_1"] return ooi[:, -1] > 1000000 diff --git a/ema_workbench/examples/timeseries_clustering_flu.py b/ema_workbench/examples/timeseries_clustering_flu.py index 38c892c9b..b1dd8c024 100644 --- a/ema_workbench/examples/timeseries_clustering_flu.py +++ b/ema_workbench/examples/timeseries_clustering_flu.py @@ -10,7 +10,7 @@ from ema_workbench.analysis import clusterer, plotting, Density experiments, outcomes = load_results("./data/1000 flu cases no policy.tar.gz") -data = outcomes["infected fraction R1"] +data = outcomes["infected_fraction_R1"] # calculate distances distances = clusterer.calculate_cid(data) diff --git a/test/data/1000 flu cases no policy.tar.gz b/test/data/1000 flu cases no policy.tar.gz index c55082d9c..aa5a1cf82 100644 Binary files a/test/data/1000 flu cases no policy.tar.gz and b/test/data/1000 flu cases no policy.tar.gz differ diff --git a/test/data/1000 runs scarcity.tar.gz b/test/data/1000 runs scarcity.tar.gz index 905389040..a571a5629 100644 Binary files a/test/data/1000 runs scarcity.tar.gz and b/test/data/1000 runs scarcity.tar.gz differ diff --git a/test/data/eng_trans.tar.gz b/test/data/eng_trans.tar.gz index d86b7b62c..82e77f1c7 100644 Binary files a/test/data/eng_trans.tar.gz and b/test/data/eng_trans.tar.gz differ diff --git a/test/test_analysis/test_cart.py b/test/test_analysis/test_cart.py index 878af1a19..f4006a06b 100644 --- a/test/test_analysis/test_cart.py +++ b/test/test_analysis/test_cart.py @@ -16,7 +16,7 @@ def flu_classify(data): # get the output for deceased population - result = data["deceased population region 1"] + result = data["deceased_population_region_1"] # make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) @@ -28,7 +28,7 @@ def flu_classify(data): def scarcity_classify(outcomes): - outcome = outcomes["relative market price"] + outcome = outcomes["relative_market_price"] change = np.abs(outcome[:, 1::] - outcome[:, 0:-1]) neg_change = np.min(change, axis=1) @@ -57,7 +57,7 @@ def test_setup_cart(self): y[k] = v[:, -1] temp_results = (x, y) - alg = cart.setup_cart(temp_results, "deceased population region 1", mass_min=0.05) + alg = cart.setup_cart(temp_results, "deceased_population_region_1", mass_min=0.05) self.assertTrue(alg.mode == RuleInductionType.REGRESSION) n_cols = 5 @@ -137,7 +137,7 @@ def test_stats_to_dataframe(self): alg.build_tree() stats = alg.stats_to_dataframe() - y = outcomes["deceased population region 1"][:, -1] + y = outcomes["deceased_population_region_1"][:, -1] alg = cart.CART(x, y, mode=RuleInductionType.REGRESSION) alg.build_tree() stats = alg.stats_to_dataframe() @@ -163,7 +163,7 @@ def test_build_tree(self): y[k] = v temp_results = (x, y) - alg = cart.setup_cart(temp_results, "deceased population region 1", mass_min=0.05) + alg = cart.setup_cart(temp_results, "deceased_population_region_1", mass_min=0.05) alg.build_tree() self.assertTrue(isinstance(alg.clf, cart.tree.DecisionTreeRegressor)) diff --git a/test/test_analysis/test_clusterer.py b/test/test_analysis/test_clusterer.py index 6d5a2acbd..40415045e 100644 --- a/test/test_analysis/test_clusterer.py +++ b/test/test_analysis/test_clusterer.py @@ -10,7 +10,7 @@ class ClusterTestCase(unittest.TestCase): def test_cluster(self): n = 10 experiments, outcomes = utilities.load_flu_data() - data = outcomes["infected fraction R1"][0:n, :] + data = outcomes["infected_fraction_R1"][0:n, :] distances = clusterer.calculate_cid(data) self.assertEqual(distances.shape, (n, n)) diff --git a/test/test_analysis/test_dimensional_stacking.py b/test/test_analysis/test_dimensional_stacking.py index 3c81a7c64..a247a2281 100644 --- a/test/test_analysis/test_dimensional_stacking.py +++ b/test/test_analysis/test_dimensional_stacking.py @@ -35,7 +35,7 @@ def test_discretize(self): def test_create_pivot_plot(self): x, outcomes = utilities.load_flu_data() - y = outcomes["deceased population region 1"][:, -1] > 1000000 + y = outcomes["deceased_population_region_1"][:, -1] > 1000000 dimensional_stacking.create_pivot_plot(x, y, 2) dimensional_stacking.create_pivot_plot(x, y, 2, labels=False, bin_labels=True) diff --git a/test/test_analysis/test_feature_scoring.py b/test/test_analysis/test_feature_scoring.py index e819d0796..f602348d1 100644 --- a/test/test_analysis/test_feature_scoring.py +++ b/test/test_analysis/test_feature_scoring.py @@ -64,7 +64,7 @@ def test_prepare_outcomes(self): # string type correct ooi = "nr deaths" - outcomes[ooi] = outcomes["deceased population region 1"][:, -1] + outcomes[ooi] = outcomes["deceased_population_region_1"][:, -1] y, categorical = fs._prepare_outcomes(outcomes, ooi) self.assertFalse(categorical) @@ -76,7 +76,7 @@ def test_prepare_outcomes(self): # classify function correct def classify(data): - result = data["deceased population region 1"] + result = data["deceased_population_region_1"] classes = np.zeros(result.shape[0]) classes[result[:, -1] > 1000000] = 1 return classes @@ -95,7 +95,7 @@ def test_get_univariate_feature_scores(self): def classify(data): # get the output for deceased population - result = data["deceased population region 1"] + result = data["deceased_population_region_1"] # make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) @@ -116,7 +116,7 @@ def classify(data): self.assertEqual(len(scores), len(x.columns) - 3) # f regression - y = outcomes["deceased population region 1"][:, -1] + y = outcomes["deceased_population_region_1"][:, -1] scores = fs.get_univariate_feature_scores(x, y, score_func=F_REGRESSION) self.assertEqual(len(scores), len(x.columns) - 3) @@ -125,7 +125,7 @@ def test_get_rf_feature_scores(self): def classify(data): # get the output for deceased population - result = data["deceased population region 1"] + result = data["deceased_population_region_1"] # make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) @@ -147,7 +147,7 @@ def classify(data): self.assertRaises(ValueError, fs.get_rf_feature_scores, x, y, mode="illegal argument") - y = outcomes["deceased population region 1"][:, -1] + y = outcomes["deceased_population_region_1"][:, -1] scores, forest = fs.get_rf_feature_scores( x, y, mode=RuleInductionType.REGRESSION, random_state=10 ) @@ -157,7 +157,7 @@ def classify(data): def test_get_ex_feature_scores(self): x, outcomes = utilities.load_flu_data() - y = outcomes["deceased population region 1"][:, -1] > 1000000 + y = outcomes["deceased_population_region_1"][:, -1] > 1000000 scores, forest = fs.get_ex_feature_scores( x, y, mode=RuleInductionType.CLASSIFICATION, random_state=10 @@ -168,7 +168,7 @@ def test_get_ex_feature_scores(self): self.assertRaises(ValueError, fs.get_ex_feature_scores, x, y, mode="illegal argument") - y = outcomes["deceased population region 1"][:, -1] + y = outcomes["deceased_population_region_1"][:, -1] scores, forest = fs.get_ex_feature_scores( x, y, mode=RuleInductionType.REGRESSION, random_state=10 ) @@ -181,8 +181,8 @@ def test_get_feature_scores_all(self): # we have timeseries so we need scalars y = { - "deceased population": outcomes["deceased population region 1"][:, -1], - "max. infected fraction": np.max(outcomes["infected fraction R1"], axis=1), + "deceased population": outcomes["deceased_population_region_1"][:, -1], + "max. infected fraction": np.max(outcomes["infected_fraction_R1"], axis=1), } scores = fs.get_feature_scores_all(x, y) diff --git a/test/test_analysis/test_logistic_regression.py b/test/test_analysis/test_logistic_regression.py index 941a9a926..3578c5604 100644 --- a/test/test_analysis/test_logistic_regression.py +++ b/test/test_analysis/test_logistic_regression.py @@ -15,7 +15,7 @@ def flu_classify(data): # get the output for deceased population - result = data["deceased population region 1"][:, -1] + result = data["deceased_population_region_1"][:, -1] return result > 1000000 diff --git a/test/test_analysis/test_plotting.py b/test/test_analysis/test_plotting.py index e24a91797..f1fba9451 100644 --- a/test/test_analysis/test_plotting.py +++ b/test/test_analysis/test_plotting.py @@ -73,12 +73,12 @@ def test_group_results(self): print(experiments.shape[0], total_data) # test continuous parameter type - array = experiments["average planning and construction period T1"] + array = experiments["average_planning_and_construction_period_T1"] grouping_specifiers = make_continuous_grouping_specifiers(array, nr_of_groups=5) groups = group_results( experiments, outcomes, - group_by="average planning and construction period T1", + group_by="average_planning_and_construction_period_T1", grouping_specifiers=grouping_specifiers, grouping_labels=[str(entry) for entry in grouping_specifiers], ) @@ -88,12 +88,12 @@ def test_group_results(self): print(experiments.shape[0], total_data) # test integer type - array = experiments["seed PR T1"] + array = experiments["seed_PR_T1"] grouping_specifiers = make_continuous_grouping_specifiers(array, nr_of_groups=10) groups = group_results( experiments, outcomes, - group_by="seed PR T1", + group_by="seed_PR_T1", grouping_specifiers=grouping_specifiers, grouping_labels=[str(entry) for entry in grouping_specifiers], ) @@ -122,7 +122,7 @@ def test_lines(self): lines( experiments, outcomes, - outcomes_to_show="total fraction new technologies", + outcomes_to_show="total_fraction_new_technologies", experiments_to_show=np.arange(0, 600, 20), group_by="policy", grouping_specifiers="basic policy", @@ -292,7 +292,7 @@ def test_envelopes(self): envelopes(experiments, outcomes, density=None, titles=None) envelopes(experiments, outcomes, density=None, titles={}) envelopes( - experiments, outcomes, density=None, titles={"total fraction new technologies": "a"} + experiments, outcomes, density=None, titles={"total_fraction_new_technologies": "a"} ) plt.draw() @@ -302,7 +302,7 @@ def test_envelopes(self): envelopes(experiments, outcomes, density=None, ylabels=None) envelopes(experiments, outcomes, density=None, ylabels={}) envelopes( - experiments, outcomes, density=None, ylabels={"total fraction new technologies": "a"} + experiments, outcomes, density=None, ylabels={"total_fraction_new_technologies": "a"} ) plt.draw() @@ -401,7 +401,7 @@ def test_kde_over_time(self): def test_multiple_densities(self): experiments, outcomes = utilities.load_eng_trans_data() - ooi = "total fraction new technologies" + ooi = "total_fraction_new_technologies" multiple_densities(experiments, outcomes, group_by="policy", points_in_time=[2010]) multiple_densities( diff --git a/test/test_analysis/test_prim.py b/test/test_analysis/test_prim.py index bf047491c..4897c77c7 100644 --- a/test/test_analysis/test_prim.py +++ b/test/test_analysis/test_prim.py @@ -19,7 +19,7 @@ def flu_classify(data): # get the output for deceased population - result = data["deceased population region 1"] + result = data["deceased_population_region_1"] # make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) @@ -178,7 +178,7 @@ def test_setup_prim(self): # test initialization, including t_coi calculation in case of searching # for results equal to or higher than the threshold - outcomes["death toll"] = outcomes["deceased population region 1"][:, -1] + outcomes["death toll"] = outcomes["deceased_population_region_1"][:, -1] results = experiments, outcomes threshold = 10000 prim_obj = prim.setup_prim( @@ -228,7 +228,7 @@ def test_prim_init_select(self): # test initialization, including t_coi calculation in case of searching # for results equal to or higher than the threshold - outcomes["death toll"] = outcomes["deceased population region 1"][:, -1] + outcomes["death toll"] = outcomes["deceased_population_region_1"][:, -1] results = experiments, outcomes threshold = 10000 prim_obj = prim.setup_prim( @@ -323,7 +323,7 @@ def test_box_init(self): def test_prim_exceptions(self): results = utilities.load_flu_data() x, outcomes = results - y = outcomes["deceased population region 1"] + y = outcomes["deceased_population_region_1"] self.assertRaises( prim.PrimException, prim.Prim, x, y, threshold=0.8, mode=RuleInductionType.REGRESSION diff --git a/test/test_analysis/test_regional_sa.py b/test/test_analysis/test_regional_sa.py index b95bec5b8..81501b06b 100644 --- a/test/test_analysis/test_regional_sa.py +++ b/test/test_analysis/test_regional_sa.py @@ -14,7 +14,7 @@ class Test(unittest.TestCase): def test_plot_cdfs(self): x, outcomes = utilities.load_flu_data() - y = outcomes["deceased population region 1"][:, -1] > 1000000 + y = outcomes["deceased_population_region_1"][:, -1] > 1000000 regional_sa.plot_cdfs(x, y) regional_sa.plot_cdfs(x, y, ccdf=True) @@ -24,10 +24,10 @@ def test_plot_cdfs(self): def test_plot__individual_cdf(self): x, outcomes = utilities.load_flu_data() - y = outcomes["deceased population region 1"][:, -1] > 1000000 + y = outcomes["deceased_population_region_1"][:, -1] > 1000000 fig, ax = plt.subplots() - unc = "fatality ratio region 1" + unc = "fatality_ratio_region_1" regional_sa.plot_individual_cdf( ax, diff --git a/test/test_em_framework/test_optimization.py b/test/test_em_framework/test_optimization.py index 65714f06d..a028835c4 100644 --- a/test/test_em_framework/test_optimization.py +++ b/test/test_em_framework/test_optimization.py @@ -170,8 +170,8 @@ def test_to_robust_problem(self, mocked_platypus): scenarios = 5 robustness_functions = [ - ScalarOutcome("mean x", variable_name="x", function=mock.Mock(), kind="maximize"), - ScalarOutcome("mean y", variable_name="y", function=mock.Mock(), kind="maximize"), + ScalarOutcome("mean_x", variable_name="x", function=mock.Mock(), kind="maximize"), + ScalarOutcome("mean_y", variable_name="y", function=mock.Mock(), kind="maximize"), ] problem = to_robust_problem(mocked_model, scenarios, robustness_functions) @@ -179,7 +179,7 @@ def test_to_robust_problem(self, mocked_platypus): self.assertEqual("robust", problem.searchover) for entry in problem.parameters: self.assertIn(entry.name, mocked_model.levers.keys()) - self.assertEqual(["mean x", "mean y"], problem.outcome_names) + self.assertEqual(["mean_x", "mean_y"], problem.outcome_names) def test_process_robust(self): pass diff --git a/test/test_em_framework/test_parameter.py b/test/test_em_framework/test_parameter.py index fe8dac540..c15cdbb9f 100644 --- a/test/test_em_framework/test_parameter.py +++ b/test/test_em_framework/test_parameter.py @@ -55,7 +55,7 @@ def test_comparison(self): name = "test" par1 = parameters.RealParameter(name, lower_bound, upper_bound, resolution) - name = "what?" + name = "what" par2 = parameters.RealParameter(name, lower_bound, upper_bound, resolution) self.assertFalse(par1 == par2) diff --git a/test/test_em_framework/test_samplers.py b/test/test_em_framework/test_samplers.py index a4d939fae..0d4522b53 100644 --- a/test/test_em_framework/test_samplers.py +++ b/test/test_em_framework/test_samplers.py @@ -23,9 +23,9 @@ class SamplerTestCase(unittest.TestCase): uncertainties = [ - RealParameter("1", 0, 10), - IntegerParameter("2", 0, 10), - CategoricalParameter("3", ["a", "b", "c"]), + RealParameter("a", 0, 10), + IntegerParameter("b", 0, 10), + CategoricalParameter("c", ["a", "b", "c"]), ] def _test_generate_designs(self, sampler): @@ -37,9 +37,9 @@ def _test_generate_designs(self, sampler): for design in designs: actual_nr_designs += 1 - self.assertIn("1", design, msg) - self.assertIn("2", design, msg) - self.assertIn("3", design, msg) + self.assertIn("a", design, msg) + self.assertIn("b", design, msg) + self.assertIn("c", design, msg) self.assertEqual(designs.n, actual_nr_designs, msg) def test_lhs_sampler(self):