Skip to content

Commit

Permalink
Formatting: Format with Black, increase max line length to 100, combi…
Browse files Browse the repository at this point in the history
…ne multi-line blocks (#178)

* Format all files with Black

Formats all files with Black. Uses default line-limit of 88 characters.

A lot of trailing commas were removed manually.

* Increase Black line limit to 100

Format all code with Black using a line-limit of 100. This shortens many multi-line code blocks to a single line, making them more compact readable.

* Formatting: Remove trailing commas to combine lines

Remove trailing commas to combine multiple lines to a single line. A few edge-cases in which this degraded readability were not included (not visible in this commit). This should make code more compact while retaining or improving readability.

Command used: black --line-length 100 -C

See https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#trailing-commas
  • Loading branch information
EwoutH authored Sep 23, 2022
1 parent abfaafb commit 4f0ae67
Show file tree
Hide file tree
Showing 92 changed files with 500 additions and 1,523 deletions.
18 changes: 8 additions & 10 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,15 +130,15 @@ def establish_version():
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = dict(
github_user='quaquel',
github_repo='EMAworkbench',
github_version='/tree/master/docs/',
github_user="quaquel",
github_repo="EMAworkbench",
github_version="/tree/master/docs/",
# On master branch and new branch still in
# pre-release status: true; else: false.
in_progress='true',
in_progress="true",
# On branches previous to "latest": true; else: false.
outdated='false',
)
outdated="false",
)

# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
Expand Down Expand Up @@ -257,12 +257,10 @@ def establish_version():

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "emaworkbench", "EMA workbench Documentation", ["J.H. Kwakkel"], 1)
]
man_pages = [("index", "emaworkbench", "EMA workbench Documentation", ["J.H. Kwakkel"], 1)]


def setup(app):
# copy changelog into source folder for documentation
dest = osp.join(HERE, "changelog.md")
shutil.copy(osp.join(HERE, "..", "..", "CHANGELOG.md"), dest)
shutil.copy(osp.join(HERE, "..", "..", "CHANGELOG.md"), dest)
48 changes: 8 additions & 40 deletions docs/source/indepth_tutorial/directed-search.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,7 @@
"ema_logging.log_to_stderr(ema_logging.INFO)\n",
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" results = evaluator.optimize(\n",
" nfe=250,\n",
" searchover=\"levers\",\n",
" epsilons=[\n",
" 0.1,\n",
" ]\n",
" * len(model.outcomes),\n",
" )"
" results = evaluator.optimize(nfe=250, searchover=\"levers\", epsilons=[0.1] * len(model.outcomes))"
]
},
{
Expand Down Expand Up @@ -287,9 +280,7 @@
"source": [
"from ema_workbench import Constraint\n",
"\n",
"constraints = [\n",
" Constraint(\"max pollution\", outcome_names=\"max_P\", function=lambda x: max(0, x - 1))\n",
"]"
"constraints = [Constraint(\"max pollution\", outcome_names=\"max_P\", function=lambda x: max(0, x - 1))]"
]
},
{
Expand All @@ -316,13 +307,7 @@
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" results = evaluator.optimize(\n",
" nfe=250,\n",
" searchover=\"levers\",\n",
" epsilons=[\n",
" 0.1,\n",
" ]\n",
" * len(model.outcomes),\n",
" constraints=constraints,\n",
" nfe=250, searchover=\"levers\", epsilons=[0.1] * len(model.outcomes), constraints=constraints\n",
" )"
]
},
Expand Down Expand Up @@ -471,10 +456,7 @@
" results, convergence = evaluator.optimize(\n",
" nfe=10000,\n",
" searchover=\"levers\",\n",
" epsilons=[\n",
" 0.05,\n",
" ]\n",
" * len(model.outcomes),\n",
" epsilons=[0.05] * len(model.outcomes),\n",
" convergence=convergence_metrics,\n",
" constraints=constraints,\n",
" )\n",
Expand Down Expand Up @@ -550,12 +532,7 @@
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" results = evaluator.optimize(\n",
" nfe=1000,\n",
" searchover=\"uncertainties\",\n",
" epsilons=[\n",
" 0.1,\n",
" ]\n",
" * len(model.outcomes),\n",
" nfe=1000, searchover=\"uncertainties\", epsilons=[0.1] * len(model.outcomes)\n",
" )"
]
},
Expand Down Expand Up @@ -628,10 +605,7 @@
"MINIMIZE = ScalarOutcome.MINIMIZE\n",
"robustnes_functions = [\n",
" ScalarOutcome(\n",
" \"90th percentile max_p\",\n",
" kind=MINIMIZE,\n",
" variable_name=\"max_P\",\n",
" function=percentile90,\n",
" \"90th percentile max_p\", kind=MINIMIZE, variable_name=\"max_P\", function=percentile90\n",
" ),\n",
" ScalarOutcome(\n",
" \"10th percentile reliability\",\n",
Expand All @@ -640,16 +614,10 @@
" function=percentile10,\n",
" ),\n",
" ScalarOutcome(\n",
" \"10th percentile inertia\",\n",
" kind=MAXIMIZE,\n",
" variable_name=\"inertia\",\n",
" function=percentile10,\n",
" \"10th percentile inertia\", kind=MAXIMIZE, variable_name=\"inertia\", function=percentile10\n",
" ),\n",
" ScalarOutcome(\n",
" \"10th percentile utility\",\n",
" kind=MAXIMIZE,\n",
" variable_name=\"utility\",\n",
" function=percentile10,\n",
" \"10th percentile utility\", kind=MAXIMIZE, variable_name=\"utility\", function=percentile10\n",
" ),\n",
"]"
]
Expand Down
12 changes: 5 additions & 7 deletions docs/source/indepth_tutorial/dps_lake_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def lake_model(
"""
np.random.seed(seed)
Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5)
Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)

X = np.zeros((myears,))
average_daily_P = np.zeros((myears,))
Expand All @@ -98,12 +98,12 @@ def lake_model(
X[0] = 0.0
decision = 0.1

decisions = np.zeros(myears,)
decisions = np.zeros(myears)
decisions[0] = decision

natural_inflows = np.random.lognormal(
math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)),
math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)),
math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
size=myears,
)

Expand All @@ -123,8 +123,6 @@ def lake_model(

reliability += np.sum(X < Pcrit) / (nsamples * myears)
inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)
utility += (
np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples
)
utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples
max_P = np.max(average_daily_P)
return max_P, utility, inertia, reliability
8 changes: 2 additions & 6 deletions docs/source/indepth_tutorial/general-introduction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,7 @@
" X[0] = 0.0\n",
" decision = 0.1\n",
"\n",
" decisions = np.zeros(\n",
" myears,\n",
" )\n",
" decisions = np.zeros(myears)\n",
" decisions[0] = decision\n",
"\n",
" natural_inflows = np.random.lognormal(\n",
Expand All @@ -150,9 +148,7 @@
"\n",
" reliability += np.sum(X < Pcrit) / (nsamples * myears)\n",
" inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)\n",
" utility += (\n",
" np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples\n",
" )\n",
" utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples\n",
" max_P = np.max(average_daily_P)\n",
" return max_P, utility, inertia, reliability"
]
Expand Down
16 changes: 4 additions & 12 deletions docs/source/indepth_tutorial/open-exploration.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,7 @@
"source": [
"from ema_workbench.analysis import pairs_plotting\n",
"\n",
"fig, axes = pairs_plotting.pairs_scatter(\n",
" experiments, outcomes, group_by=\"policy\", legend=False\n",
")\n",
"fig, axes = pairs_plotting.pairs_scatter(experiments, outcomes, group_by=\"policy\", legend=False)\n",
"fig.set_size_inches(8, 8)\n",
"plt.show()"
]
Expand Down Expand Up @@ -458,9 +456,7 @@
"x = experiments\n",
"y = outcomes[\"max_P\"] < 0.8\n",
"\n",
"fs, alg = feature_scoring.get_ex_feature_scores(\n",
" x, y, mode=RuleInductionType.CLASSIFICATION\n",
")\n",
"fs, alg = feature_scoring.get_ex_feature_scores(x, y, mode=RuleInductionType.CLASSIFICATION)\n",
"fs.sort_values(ascending=False, by=1)"
]
},
Expand Down Expand Up @@ -599,16 +595,12 @@
"from ema_workbench.em_framework.salib_samplers import get_SALib_problem\n",
"\n",
"with MultiprocessingEvaluator(model) as evaluator:\n",
" sa_results = evaluator.perform_experiments(\n",
" scenarios=1000, uncertainty_sampling=Samplers.SOBOL\n",
" )\n",
" sa_results = evaluator.perform_experiments(scenarios=1000, uncertainty_sampling=Samplers.SOBOL)\n",
"\n",
"experiments, outcomes = sa_results\n",
"\n",
"problem = get_SALib_problem(model.uncertainties)\n",
"Si = sobol.analyze(\n",
" problem, outcomes[\"max_P\"], calc_second_order=True, print_to_console=False\n",
")"
"Si = sobol.analyze(problem, outcomes[\"max_P\"], calc_second_order=True, print_to_console=False)"
]
},
{
Expand Down
4 changes: 1 addition & 3 deletions docs/source/pyplots/basicEnvelope2.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,5 @@
from analysis.plotting import envelopes

data = load_results(r"../../../src/analysis/1000 flu cases.cPickle", zipped=False)
fig = envelopes(
data, group_by="policy", grouping_specifiers=["static policy", "adaptive policy"]
)
fig = envelopes(data, group_by="policy", grouping_specifiers=["static policy", "adaptive policy"])
plt.show()
4 changes: 1 addition & 3 deletions docs/source/pyplots/basicMultiplotDensity.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@
from expWorkbench.util import load_results

# load the data
experiments, results = load_results(
r"../../../src/analysis/1000 flu cases.cPickle", zipped=False
)
experiments, results = load_results(r"../../../src/analysis/1000 flu cases.cPickle", zipped=False)

# transform the results to the required format
newResults = {}
Expand Down
4 changes: 1 addition & 3 deletions docs/source/pyplots/primExample.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@ def classify(data):
results = (newExperiments, newResults)

# perform prim on modified results tuple
prims, uncertainties, x = prim.perform_prim(
results, classify, threshold=0.8, threshold_type=1
)
prims, uncertainties, x = prim.perform_prim(results, classify, threshold=0.8, threshold_type=1)

# visualize

Expand Down
8 changes: 1 addition & 7 deletions ema_workbench/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,7 @@
ArrayOutcome,
Samplers,
)
from .util import (
save_results,
load_results,
ema_logging,
EMAError,
process_replications,
)
from .util import save_results, load_results, ema_logging, EMAError, process_replications

# from . import analysis

Expand Down
8 changes: 2 additions & 6 deletions ema_workbench/analysis/b_and_w_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,7 @@ def _set_ax_polycollection_to_bw(collection, ax, style, colormap):
collection.update({"alpha": 1})

for path in collection.get_paths():
p1 = mpl.patches.PathPatch(
path, fc="none", hatch=colormap[orig_color]["hatch"]
)
p1 = mpl.patches.PathPatch(path, fc="none", hatch=colormap[orig_color]["hatch"])
ax.add_patch(p1)
p1.set_zorder(collection.get_zorder() - 0.1)

Expand Down Expand Up @@ -335,9 +333,7 @@ def set_fig_to_bw(fig, style=HATCHING, line_style="continuous"):

if len(all_colors) > len(bw_mapping):
mapping_cycle = itertools.cycle(bw_mapping)
_logger.warning(
"more colors used than provided in B&W mapping, cycling over mapping"
)
_logger.warning("more colors used than provided in B&W mapping, cycling over mapping")
else:
mapping_cycle = bw_mapping
colormap = dict(zip(all_colors, mapping_cycle))
Expand Down
6 changes: 2 additions & 4 deletions ema_workbench/analysis/cart.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def build_tree(self):

def show_tree(self, mplfig=True, format="png"):
"""return a png (defaults) or svg of the tree
On Windows, graphviz needs to be installed with conda.
Parameters
Expand All @@ -320,9 +320,7 @@ def show_tree(self, mplfig=True, format="png"):
import pydot # dirty hack for read the docs

dot_data = StringIO()
tree.export_graphviz(
self.clf, out_file=dot_data, feature_names=self.feature_names
)
tree.export_graphviz(self.clf, out_file=dot_data, feature_names=self.feature_names)
dot_data = dot_data.getvalue() # .encode('ascii') # @UndefinedVariable
graphs = pydot.graph_from_dot_data(dot_data)

Expand Down
Loading

0 comments on commit 4f0ae67

Please sign in to comment.