Skip to content

Commit

Permalink
Formatting: Remove trailing commas to combine lines
Browse files Browse the repository at this point in the history
Remove trailing commas to combine multiple lines to a single line. A few edge-cases in which this degraded readability were not included (not visible in this commit). This should make code more compact while retaining or improving readability.

Command used: black --line-length 100 -C

See https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#trailing-commas
  • Loading branch information
EwoutH committed Sep 23, 2022
1 parent 9a17feb commit ab3a28e
Show file tree
Hide file tree
Showing 50 changed files with 121 additions and 609 deletions.
44 changes: 7 additions & 37 deletions docs/source/indepth_tutorial/directed-search.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,7 @@
"ema_logging.log_to_stderr(ema_logging.INFO)\n",
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" results = evaluator.optimize(\n",
" nfe=250,\n",
" searchover=\"levers\",\n",
" epsilons=[\n",
" 0.1,\n",
" ]\n",
" * len(model.outcomes),\n",
" )"
" results = evaluator.optimize(nfe=250, searchover=\"levers\", epsilons=[0.1] * len(model.outcomes))"
]
},
{
Expand Down Expand Up @@ -314,13 +307,7 @@
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" results = evaluator.optimize(\n",
" nfe=250,\n",
" searchover=\"levers\",\n",
" epsilons=[\n",
" 0.1,\n",
" ]\n",
" * len(model.outcomes),\n",
" constraints=constraints,\n",
" nfe=250, searchover=\"levers\", epsilons=[0.1] * len(model.outcomes), constraints=constraints\n",
" )"
]
},
Expand Down Expand Up @@ -469,10 +456,7 @@
" results, convergence = evaluator.optimize(\n",
" nfe=10000,\n",
" searchover=\"levers\",\n",
" epsilons=[\n",
" 0.05,\n",
" ]\n",
" * len(model.outcomes),\n",
" epsilons=[0.05] * len(model.outcomes),\n",
" convergence=convergence_metrics,\n",
" constraints=constraints,\n",
" )\n",
Expand Down Expand Up @@ -548,12 +532,7 @@
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" results = evaluator.optimize(\n",
" nfe=1000,\n",
" searchover=\"uncertainties\",\n",
" epsilons=[\n",
" 0.1,\n",
" ]\n",
" * len(model.outcomes),\n",
" nfe=1000, searchover=\"uncertainties\", epsilons=[0.1] * len(model.outcomes)\n",
" )"
]
},
Expand Down Expand Up @@ -626,10 +605,7 @@
"MINIMIZE = ScalarOutcome.MINIMIZE\n",
"robustnes_functions = [\n",
" ScalarOutcome(\n",
" \"90th percentile max_p\",\n",
" kind=MINIMIZE,\n",
" variable_name=\"max_P\",\n",
" function=percentile90,\n",
" \"90th percentile max_p\", kind=MINIMIZE, variable_name=\"max_P\", function=percentile90\n",
" ),\n",
" ScalarOutcome(\n",
" \"10th percentile reliability\",\n",
Expand All @@ -638,16 +614,10 @@
" function=percentile10,\n",
" ),\n",
" ScalarOutcome(\n",
" \"10th percentile inertia\",\n",
" kind=MAXIMIZE,\n",
" variable_name=\"inertia\",\n",
" function=percentile10,\n",
" \"10th percentile inertia\", kind=MAXIMIZE, variable_name=\"inertia\", function=percentile10\n",
" ),\n",
" ScalarOutcome(\n",
" \"10th percentile utility\",\n",
" kind=MAXIMIZE,\n",
" variable_name=\"utility\",\n",
" function=percentile10,\n",
" \"10th percentile utility\", kind=MAXIMIZE, variable_name=\"utility\", function=percentile10\n",
" ),\n",
"]"
]
Expand Down
4 changes: 1 addition & 3 deletions docs/source/indepth_tutorial/general-introduction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,7 @@
" X[0] = 0.0\n",
" decision = 0.1\n",
"\n",
" decisions = np.zeros(\n",
" myears,\n",
" )\n",
" decisions = np.zeros(myears)\n",
" decisions[0] = decision\n",
"\n",
" natural_inflows = np.random.lognormal(\n",
Expand Down
8 changes: 1 addition & 7 deletions ema_workbench/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,7 @@
ArrayOutcome,
Samplers,
)
from .util import (
save_results,
load_results,
ema_logging,
EMAError,
process_replications,
)
from .util import save_results, load_results, ema_logging, EMAError, process_replications

# from . import analysis

Expand Down
7 changes: 1 addition & 6 deletions ema_workbench/analysis/dimensional_stacking.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,12 +276,7 @@ def plot_pivot_table(
height_ratios = dim_ratios(figsize=figsize, axis=0)

gs = mpl.gridspec.GridSpec(
3,
3,
wspace=0.01,
hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios,
3, 3, wspace=0.01, hspace=0.01, width_ratios=width_ratios, height_ratios=height_ratios
)

ax_plot = fig.add_subplot(gs[2, 2])
Expand Down
25 changes: 3 additions & 22 deletions ema_workbench/analysis/pairs_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,7 @@ def pairs_lines(

for ax in figure.axes:
gs2 = ax._subplotspec
if all(
(
gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2,
)
):
if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)):
break

make_legend(grouping_labels, ax, legend_type=LegendEnum.LINE)
Expand Down Expand Up @@ -312,14 +306,7 @@ def determine_extents(outcomes, outcomes_to_show):


def simple_pairs_density(
outcomes,
outcomes_to_show,
log,
colormap,
gridsize,
ylabels,
extents=None,
title=None,
outcomes, outcomes_to_show, log, colormap, gridsize, ylabels, extents=None, title=None
):
"""
Expand Down Expand Up @@ -509,13 +496,7 @@ def pairs_scatter(

for ax in figure.axes:
gs2 = ax._subplotspec
if all(
(
gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2,
)
):
if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)):
break

make_legend(grouping_labels, ax, legend_type=LegendEnum.SCATTER)
Expand Down
68 changes: 9 additions & 59 deletions ema_workbench/analysis/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,7 @@ def envelopes(

if group_by:
group_by_envelopes(
outcomes,
outcome_to_plot,
time,
density,
ax,
ax_d,
fill,
grouping_labels,
log,
outcomes, outcome_to_plot, time, density, ax, ax_d, fill, grouping_labels, log
)
else:
single_envelope(outcomes, outcome_to_plot, time, density, ax, ax_d, fill, log)
Expand All @@ -171,13 +163,7 @@ def envelopes(

for ax in figure.axes:
gs2 = ax._subplotspec
if all(
(
gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2,
)
):
if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)):
break
if fill:
make_legend(grouping_labels, ax, alpha=0.3, legend_type=LegendEnum.PATCH)
Expand Down Expand Up @@ -354,12 +340,7 @@ def lines(
)

data = prepare_data(
experiments,
experiments_to_show,
outcomes,
outcomes_to_show,
group_by,
grouping_specifiers,
experiments, experiments_to_show, outcomes, outcomes_to_show, group_by, grouping_specifiers
)
experiments, outcomes, outcomes_to_show, time, grouping_labels = data

Expand Down Expand Up @@ -392,13 +373,7 @@ def lines(

for ax in figure.axes:
gs2 = ax._subplotspec
if all(
(
gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2,
)
):
if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)):
break

make_legend(grouping_labels, ax)
Expand Down Expand Up @@ -472,12 +447,7 @@ def plot_lines_with_envelopes(
experiments, None, outcomes, outcomes_to_show, group_by, grouping_specifiers
)[1]
data = prepare_data(
experiments,
experiments_to_show,
outcomes,
outcomes_to_show,
group_by,
grouping_specifiers,
experiments, experiments_to_show, outcomes, outcomes_to_show, group_by, grouping_specifiers
)
experiments, outcomes, outcomes_to_show, time, grouping_labels = data

Expand Down Expand Up @@ -510,8 +480,7 @@ def plot_lines_with_envelopes(
group_density(ax_d, density, full_outcomes, outcome_to_plot, grouping_labels, log)

ax_d.get_yaxis().set_view_interval(
ax.get_yaxis().get_view_interval()[0],
ax.get_yaxis().get_view_interval()[1],
ax.get_yaxis().get_view_interval()[0], ax.get_yaxis().get_view_interval()[1]
)

else:
Expand All @@ -533,13 +502,7 @@ def plot_lines_with_envelopes(

for ax in figure.axes:
gs2 = ax._subplotspec
if all(
(
gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2,
)
):
if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)):
break
make_legend(grouping_labels, ax)

Expand Down Expand Up @@ -823,14 +786,7 @@ def multiple_densities(
ax4 = plt.subplot2grid((2, 6), (1, 3), sharex=ax1, sharey=ax_env)
ax5 = plt.subplot2grid((2, 6), (1, 4), sharex=ax1, sharey=ax_env)
ax6 = plt.subplot2grid((2, 6), (1, 5), sharex=ax1, sharey=ax_env)
kde_axes = [
ax1,
ax2,
ax3,
ax4,
ax5,
ax6,
]
kde_axes = [ax1, ax2, ax3, ax4, ax5, ax6]
else:
raise EMAError("too many points in time provided")

Expand Down Expand Up @@ -875,13 +831,7 @@ def multiple_densities(
# TODO grouping labels, boxplots, and sharex
# create a problem
group_density(
ax,
density,
outcomes,
outcome_to_show,
grouping_labels,
index=index,
log=log,
ax, density, outcomes, outcome_to_show, grouping_labels, index=index, log=log
)

min_y, max_y = ax_env.get_ylim()
Expand Down
11 changes: 2 additions & 9 deletions ema_workbench/analysis/plotting_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,8 +322,7 @@ def simple_density(density, value, ax_d, ax, log):
ax.get_yaxis().get_view_interval()[0], ax.get_yaxis().get_view_interval()[1]
)
ax_d.set_ylim(
bottom=ax.get_yaxis().get_view_interval()[0],
top=ax.get_yaxis().get_view_interval()[1],
bottom=ax.get_yaxis().get_view_interval()[0], top=ax.get_yaxis().get_view_interval()[1]
)

ax_d.set_xlabel("")
Expand Down Expand Up @@ -676,13 +675,7 @@ def prepare_pairs_data(
raise EMAError("for pair wise plotting, more than one outcome needs to be provided")

experiments, outcomes, outcomes_to_show, time, grouping_labels = prepare_data(
experiments,
None,
outcomes,
outcomes_to_show,
group_by,
grouping_specifiers,
filter_scalar,
experiments, None, outcomes, outcomes_to_show, group_by, grouping_specifiers, filter_scalar
)

def filter_outcomes(outcomes, point_in_time):
Expand Down
26 changes: 5 additions & 21 deletions ema_workbench/analysis/prim.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,16 +368,7 @@ def __init__(self, prim, box_lims, indices):
self._resampled = []
self.yi_initial = indices[:]

columns = [
"name",
"lower",
"upper",
"minimum",
"maximum",
"qp_lower",
"qp_upper",
"id",
]
columns = ["name", "lower", "upper", "minimum", "maximum", "qp_lower", "qp_upper", "id"]
self.boxes_quantitative = pd.DataFrame(columns=columns)

columns = ["item", "name", "n_items", "x", "id"]
Expand Down Expand Up @@ -559,11 +550,7 @@ def inspect_tradeoff(self):
# unless we can force a selection?
name = f"{dim}, {qp.loc[qp.index[0], dim]: .2g}"
entry = dict(
name=name,
n_items=len(items) + 1,
item=item,
id=int(i),
x=j / len(items),
name=name, n_items=len(items) + 1, item=item, id=int(i), x=j / len(items)
)
nominal_vars.append(entry)

Expand Down Expand Up @@ -988,10 +975,7 @@ def __init__(
mode=sdutil.RuleInductionType.BINARY,
update_function="default",
):
assert mode in {
sdutil.RuleInductionType.BINARY,
sdutil.RuleInductionType.REGRESSION,
}
assert mode in {sdutil.RuleInductionType.BINARY, sdutil.RuleInductionType.REGRESSION}
assert self._assert_mode(y, mode, update_function)
# preprocess x
try:
Expand Down Expand Up @@ -1207,7 +1191,7 @@ def _peel(self, box):
# identify all possible peels
possible_peels = []

for x, columns, dtype, in [
for x, columns, dtype in [
(x_float, self.x_float_colums, "float"),
(x_int, self.x_int_columns, "int"),
(x_nominal, self.x_nominal_columns, "object"),
Expand Down Expand Up @@ -1421,7 +1405,7 @@ def _paste(self, box):

# identify all possible pastes
possible_pastes = []
for columns, dtype, in [
for columns, dtype in [
(self.x_float_colums, "float"),
(self.x_int_columns, "int"),
(self.x_nominal_columns, "object"),
Expand Down
Loading

0 comments on commit ab3a28e

Please sign in to comment.