diff --git a/ema_workbench/analysis/dimensional_stacking.py b/ema_workbench/analysis/dimensional_stacking.py index 71cc6aadb..870af2261 100644 --- a/ema_workbench/analysis/dimensional_stacking.py +++ b/ema_workbench/analysis/dimensional_stacking.py @@ -61,7 +61,7 @@ def discretize(data, nbins=3, with_labels=False): n_unique = column_data.unique().shape[0] n = n_unique column_data = column_data.cat.rename_categories( - [x for x in range(1, n + 1)] + list(range(1, n + 1)) ) indices = column_data @@ -414,8 +414,8 @@ def create_pivot_plot( n = nr_levels * 2 scores = scores.index.tolist() - rows = [entry for entry in scores[0:n:2]] - columns = [entry for entry in scores[1:n:2]] + rows = list(scores[0:n:2]) + columns = list(scores[1:n:2]) discretized_x = discretize(x, nbins=nbins, with_labels=bin_labels) diff --git a/ema_workbench/analysis/plotting_util.py b/ema_workbench/analysis/plotting_util.py index 7e8cf391d..622f1c707 100644 --- a/ema_workbench/analysis/plotting_util.py +++ b/ema_workbench/analysis/plotting_util.py @@ -222,7 +222,7 @@ def plot_violinplot(ax, values, log, group_labels=None): if not group_labels: group_labels = [""] - data = pd.DataFrame.from_records({k: v for k, v in zip(group_labels, values)}) + data = pd.DataFrame.from_records(dict(zip(group_labels, values))) data = pd.melt(data) sns.violinplot(x="variable", y="value", data=data, order=group_labels, ax=ax) @@ -246,7 +246,7 @@ def plot_boxenplot(ax, values, log, group_labels=None): if not group_labels: group_labels = [""] - data = pd.DataFrame.from_records({k: v for k, v in zip(group_labels, values)}) + data = pd.DataFrame.from_records(dict(zip(group_labels, values))) data = pd.melt(data) sns.boxenplot(x="variable", y="value", data=data, order=group_labels, ax=ax) @@ -771,7 +771,7 @@ def prepare_data( if filter_scalar: outcomes = filter_scalar_outcomes(outcomes) if not outcomes_to_show: - outcomes_to_show = [o for o in outcomes.keys()] + outcomes_to_show = list(outcomes.keys()) # group the data if desired if group_by: @@ -783,9 +783,7 @@ def prepare_data( ) else: column_to_group_by = experiments[group_by] - if (column_to_group_by.dtype == object) or ( - column_to_group_by.dtype == "category" - ): + if column_to_group_by.dtype in (object, "category"): grouping_specifiers = set(column_to_group_by) else: grouping_specifiers = make_continuous_grouping_specifiers( diff --git a/ema_workbench/analysis/prim_util.py b/ema_workbench/analysis/prim_util.py index 8631e2935..78f8dbc4d 100644 --- a/ema_workbench/analysis/prim_util.py +++ b/ema_workbench/analysis/prim_util.py @@ -224,7 +224,7 @@ def __call__(self, box): def is_significant(box, i, alpha=0.05): qp = box.qp[i] - return not any([value > alpha for values in qp.values() for value in values]) + return not any(value > alpha for values in qp.values() for value in values) def is_pareto_efficient(data): diff --git a/ema_workbench/analysis/scenario_discovery_util.py b/ema_workbench/analysis/scenario_discovery_util.py index 7ea15048c..5d9399b2e 100644 --- a/ema_workbench/analysis/scenario_discovery_util.py +++ b/ema_workbench/analysis/scenario_discovery_util.py @@ -68,7 +68,7 @@ def _get_sorted_box_lims(boxes, box_init): # sort the uncertainties based on the normalized size of the # restricted dimensions uncs = uncs[np.argsort(box_size)] - box_lims = [box for box in boxes] + box_lims = list(boxes) return box_lims, uncs.tolist() @@ -454,7 +454,7 @@ def _setup_figure(uncs): ax.add_patch(rect) ax.set_xlim(left=-0.2, right=1.2) ax.set_ylim(top=-0.5, bottom=nr_unc - 0.5) - ax.yaxis.set_ticks([y for y in range(nr_unc)]) + ax.yaxis.set_ticks(list(range(nr_unc))) ax.xaxis.set_ticks([0, 0.25, 0.5, 0.75, 1]) ax.set_yticklabels(uncs[::-1]) return fig, ax diff --git a/ema_workbench/connectors/vensim.py b/ema_workbench/connectors/vensim.py index 8b404b117..91c8d72fb 100644 --- a/ema_workbench/connectors/vensim.py +++ b/ema_workbench/connectors/vensim.py @@ -390,17 +390,6 @@ def check_data(result): return results - def cleanup(self): - super().cleanup() - - def reset_model(self): - """ - Method for reseting the model to its initial state before runModel - was called - """ - - super().reset_model() - def _delete_lookup_uncertainties(self): """ deleting lookup uncertainties from the uncertainty list @@ -552,7 +541,7 @@ def _get_initial_lookup(self, name): list2 = [] number = [] for c in b: - if (c != "(") and (c != ")"): + if c not in ("(", ")"): list1.append(c) list1.append(",") diff --git a/ema_workbench/em_framework/evaluators.py b/ema_workbench/em_framework/evaluators.py index b4513082a..2be9bf350 100644 --- a/ema_workbench/em_framework/evaluators.py +++ b/ema_workbench/em_framework/evaluators.py @@ -274,8 +274,6 @@ def robust_optimize( class SequentialEvaluator(BaseEvaluator): - def __init__(self, models, **kwargs): - super().__init__(models, **kwargs) def initialize(self): pass diff --git a/ema_workbench/em_framework/optimization.py b/ema_workbench/em_framework/optimization.py index cd750fdc0..e94d1c5b8 100644 --- a/ema_workbench/em_framework/optimization.py +++ b/ema_workbench/em_framework/optimization.py @@ -844,7 +844,7 @@ def _optimize( "number of epsilon values does not match number " "of outcomes" ) - if all([isinstance(t, klass) for t in problem.types]): + if all(isinstance(t, klass) for t in problem.types): variator = None else: variator = CombinedVariator() diff --git a/ema_workbench/em_framework/outcomes.py b/ema_workbench/em_framework/outcomes.py index 67ca25869..4c7e5b46c 100644 --- a/ema_workbench/em_framework/outcomes.py +++ b/ema_workbench/em_framework/outcomes.py @@ -197,7 +197,7 @@ def __eq__(self, other): all( hasattr(self, key) == hasattr(other, key) and getattr(self, key) == getattr(other, key) - for key in self.__dict__.keys() + for key in self.__dict__ ), self.__class__ == other.__class__, ] diff --git a/ema_workbench/em_framework/parameters.py b/ema_workbench/em_framework/parameters.py index 9232647c2..2a6c0674f 100644 --- a/ema_workbench/em_framework/parameters.py +++ b/ema_workbench/em_framework/parameters.py @@ -79,8 +79,7 @@ def __repr__(self, *args, **kwargs): class Category(Constant): - def __init__(self, name, value): - super().__init__(name, value) + pass def create_category(cat): @@ -384,7 +383,7 @@ def __init__( self._categories = NamedObjectMap(Category) self.categories = cats - self.resolution = [i for i in range(len(self.categories))] + self.resolution = list(range(len(self.categories))) self.multivalue = multivalue def index_for_cat(self, category): @@ -488,7 +487,7 @@ def parameters_to_csv(parameters, file_name): else: values = param.lower_bound, param.upper_bound - dict_repr = {j: value for j, value in enumerate(values)} + dict_repr = dict(enumerate(values)) dict_repr["name"] = param.name params[i] = dict_repr diff --git a/ema_workbench/em_framework/points.py b/ema_workbench/em_framework/points.py index 1d7a855c8..fb080a6cd 100644 --- a/ema_workbench/em_framework/points.py +++ b/ema_workbench/em_framework/points.py @@ -163,7 +163,7 @@ def combine_cases_sampling(*point_collection): # figure out the longest def exhaust_cases(cases): - return [case for case in cases] + return list(cases) point_collection = [exhaust_cases(case) for case in point_collection] longest_cases = max(point_collection, key=len) diff --git a/ema_workbench/em_framework/samplers.py b/ema_workbench/em_framework/samplers.py index 1ba2aba8d..ef00680d2 100644 --- a/ema_workbench/em_framework/samplers.py +++ b/ema_workbench/em_framework/samplers.py @@ -49,9 +49,6 @@ class AbstractSampler(metaclass=abc.ABCMeta): """ - def __init__(self): - super().__init__() - def sample(self, distribution, size): """ method for sampling a number of samples from a particular distribution. @@ -132,9 +129,6 @@ class LHSSampler(AbstractSampler): generates a Latin Hypercube sample for each of the parameters """ - def __init__(self): - super().__init__() - def sample(self, distribution, size): """ generate a Latin Hypercube Sample. @@ -253,9 +247,6 @@ class MonteCarloSampler(AbstractSampler): """ - def __init__(self): - super().__init__() - def sample(self, distribution, size): """ generate a Monte Carlo Sample. @@ -286,9 +277,6 @@ class FullFactorialSampler(AbstractSampler): """ - def __init__(self): - super().__init__() - def generate_samples(self, parameters, size): """ The main method of :class: `~sampler.Sampler` and its diff --git a/ema_workbench/examples/eijgenraam_example.py b/ema_workbench/examples/eijgenraam_example.py index 5449e79ed..28950fed9 100644 --- a/ema_workbench/examples/eijgenraam_example.py +++ b/ema_workbench/examples/eijgenraam_example.py @@ -112,7 +112,7 @@ 52: (49.2200, 1.6075, 0.0047, 0.036173, 0.304, 0.001716, 4025.6, 0.00171, 1 / 1250), 53: (69.4565, 1.1625, 0.0028, 0.031651, 0.336, 0.002700, 9819.5, 0.00171, 1 / 1250), } -data = {i: {k: v for k, v in zip(params, raw_data[i])} for i in raw_data.keys()} +data = {i: dict(zip(params, raw_data[i])) for i in raw_data} # Set the ring we are analyzing ring = 15 diff --git a/test/models/Sales_Agent_Market_Building_Dynamics.py b/test/models/Sales_Agent_Market_Building_Dynamics.py index fa5ab4674..6936e9836 100644 --- a/test/models/Sales_Agent_Market_Building_Dynamics.py +++ b/test/models/Sales_Agent_Market_Building_Dynamics.py @@ -319,7 +319,7 @@ def income(): tier_1_income() + tier_2_income() + if_then_else( - time() < startup_subsidy_length(), lambda: startup_subsidy(), lambda: 0 + time() < startup_subsidy_length(), startup_subsidy, lambda: 0 ) ) @@ -1051,16 +1051,16 @@ def time_step(): return 0.0625 -_integ_total_cumulative_sales = Integ(lambda: accumulating_sales(), lambda: 0) +_integ_total_cumulative_sales = Integ(accumulating_sales, lambda: 0) -_integ_tenure = Integ(lambda: accumulating_tenure(), lambda: 0) +_integ_tenure = Integ(accumulating_tenure, lambda: 0) -_integ_total_cumulative_income = Integ(lambda: accumulating_income(), lambda: 0) +_integ_total_cumulative_income = Integ(accumulating_income, lambda: 0) -_integ_months_of_buffer = Integ(lambda: income() - expenses(), lambda: initial_buffer()) +_integ_months_of_buffer = Integ(lambda: income() - expenses(), initial_buffer) _integ_tier_2_clients = Integ( diff --git a/test/models/Sales_Agent_Motivation_Dynamics.py b/test/models/Sales_Agent_Motivation_Dynamics.py index 3f410a73d..57fe55c6f 100644 --- a/test/models/Sales_Agent_Motivation_Dynamics.py +++ b/test/models/Sales_Agent_Motivation_Dynamics.py @@ -297,7 +297,7 @@ def income(): Technically in units of months of expenses earned per month """ return months_of_expenses_per_sale() * sales() + if_then_else( - time() < startup_subsidy_length(), lambda: startup_subsidy(), lambda: 0 + time() < startup_subsidy_length(), startup_subsidy, lambda: 0 ) @@ -481,13 +481,13 @@ def time_step(): return 0.0625 -_integ_total_cumulative_income = Integ(lambda: accumulating_income(), lambda: 0) +_integ_total_cumulative_income = Integ(accumulating_income, lambda: 0) -_integ_total_cumulative_sales = Integ(lambda: accumulating_sales(), lambda: 0) +_integ_total_cumulative_sales = Integ(accumulating_sales, lambda: 0) -_integ_tenure = Integ(lambda: accumulating_tenure(), lambda: 0) +_integ_tenure = Integ(accumulating_tenure, lambda: 0) -_integ_motivation = Integ(lambda: motivation_adjustment(), lambda: 1) +_integ_motivation = Integ(motivation_adjustment, lambda: 1) diff --git a/test/test_em_framework/test_callback.py b/test/test_em_framework/test_callback.py index 49bd0c00e..03f20cc9a 100644 --- a/test/test_em_framework/test_callback.py +++ b/test/test_em_framework/test_callback.py @@ -87,7 +87,7 @@ def test_store_results(self): _, out = callback.get_results() - self.assertIn(outcomes[0].name, {entry for entry in out.keys()}) + self.assertIn(outcomes[0].name, set(out.keys())) self.assertEqual(out[outcomes[0].name].shape, (3,)) # case 2 time series shape = (1, nr_time_steps) diff --git a/test/test_em_framework/test_ema_ipyparallel.py b/test/test_em_framework/test_ema_ipyparallel.py index 682572c67..b22d89982 100644 --- a/test/test_em_framework/test_ema_ipyparallel.py +++ b/test/test_em_framework/test_ema_ipyparallel.py @@ -87,7 +87,7 @@ def add_engines(n=1, profile="iptest", total=False): eps.append(ep) tic = time.time() while len(rc) < base + n: - if any([ep.poll() is not None for ep in eps]): + if any(ep.poll() is not None for ep in eps): raise RuntimeError("A test engine failed to start.") elif time.time() - tic > 15: raise RuntimeError("Timeout waiting for engines to connect.") diff --git a/test/test_em_framework/test_model.py b/test/test_em_framework/test_model.py index 1607b2d66..65c84bef3 100644 --- a/test/test_em_framework/test_model.py +++ b/test/test_em_framework/test_model.py @@ -20,8 +20,7 @@ class FileModelTest(FileModel): - def run_model(self, scenario, policy): - super().run_model(scenario, policy) + pass class TestFileModel(unittest.TestCase): diff --git a/test/test_em_framework/test_points.py b/test/test_em_framework/test_points.py index 4ad7fd16b..ecd29fd78 100644 --- a/test/test_em_framework/test_points.py +++ b/test/test_em_framework/test_points.py @@ -13,7 +13,7 @@ def test_experiment_gemerator(self): experiments = points.experiment_generator( scenarios, model_structures, policies, combine="factorial" ) - experiments = [e for e in experiments] + experiments = list(experiments) self.assertEqual( len(experiments), 6, ("wrong number of experiments " "for factorial") ) @@ -21,7 +21,7 @@ def test_experiment_gemerator(self): experiments = points.experiment_generator( scenarios, model_structures, policies, combine="sample" ) - experiments = [e for e in experiments] + experiments = list(experiments) self.assertEqual( len(experiments), 3, ("wrong number of experiments " "for zipover") ) @@ -30,7 +30,7 @@ def test_experiment_gemerator(self): experiments = points.experiment_generator( scenarios, model_structures, policies, combine="adf" ) - _ = [e for e in experiments] + _ = list(experiments) # def test_experiment_generator(self): # sampler = LHSSampler() diff --git a/test/test_em_framework/test_util.py b/test/test_em_framework/test_util.py index 6728b8dde..be0132dc1 100644 --- a/test/test_em_framework/test_util.py +++ b/test/test_em_framework/test_util.py @@ -44,7 +44,7 @@ def test_namedict(self): self.assertEqual(2, len(nd), "length not correct") # test in - for entry in kwargs.keys(): + for entry in kwargs: self.assertIn(entry, nd, f"{entry} not in NamedDict") # test addition diff --git a/test/test_util/test_utilities.py b/test/test_util/test_utilities.py index e8117aab0..edb83b8a7 100644 --- a/test/test_util/test_utilities.py +++ b/test/test_util/test_utilities.py @@ -46,8 +46,7 @@ def test_save_results(self): ) outcome_q = np.random.rand(nr_experiments, 1) - outcomes = {} - outcomes[ScalarOutcome("q").name] = outcome_q + outcomes = {ScalarOutcome("q").name: outcome_q} results = (experiments, outcomes) # test for 2d @@ -61,8 +60,7 @@ def test_save_results(self): ) outcome_r = np.zeros((nr_experiments, nr_timesteps)) - outcomes = {} - outcomes[ArrayOutcome("r").name] = outcome_r + outcomes = {ArrayOutcome("r").name: outcome_r} results = (experiments, outcomes) save_results(results, fn) @@ -78,8 +76,7 @@ def test_save_results(self): ) outcome_s = np.zeros((nr_experiments, nr_timesteps, nr_replications)) - outcomes = {} - outcomes[ArrayOutcome("s").name] = outcome_s + outcomes = {ArrayOutcome("s").name: outcome_s} results = (experiments, outcomes) save_results(results, fn) @@ -104,8 +101,7 @@ def test_load_results(self): outcome_a = np.zeros((nr_experiments, 1)) - outcomes = {} - outcomes[ArrayOutcome("a").name] = outcome_a + outcomes = {ArrayOutcome("a").name: outcome_a} results = (experiments, outcomes) save_results(results, "../data/test.tar.gz") @@ -129,8 +125,7 @@ def test_load_results(self): outcome_b = np.zeros((nr_experiments, nr_timesteps, nr_replications)) - outcomes = {} - outcomes[ArrayOutcome("b").name] = outcome_b + outcomes = {ArrayOutcome("b").name: outcome_b} results = (experiments, outcomes) save_results(results, "../data/test.tar.gz")