Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

blacken the codebase #57

Merged
merged 10 commits into from
Jan 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 2 additions & 8 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,10 @@ before_install:
- export PATH="$HOME/.pyenv/bin:$PATH"
- eval "$(pyenv init -)"
- pyenv install -s 2.7.15
- pyenv install -s 3.4.8
- pyenv install -s 3.5.5
- pyenv install -s 3.6.5
- pyenv install -s 3.7.0
- pyenv local 2.7.15 3.4.8 3.5.5 3.6.5 3.7.0
- |
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew install python
brew link --overwrite python
fi
- pyenv install -s 3.7.1
- pyenv local 2.7.15 3.5.5 3.6.5 3.7.1
- pip install tox tox-pyenv codecov

# Command to run tests, e.g. python setup.py test
Expand Down
14 changes: 9 additions & 5 deletions CONTRIBUTING.rst
Original file line number Diff line number Diff line change
Expand Up @@ -91,15 +91,17 @@ Ready to contribute? Here's how to set up ``unyt`` for local development.

Now you can make your changes locally.

5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
5. When you're done making changes, check that your changes pass flake8, format
the code with black, and run the tests, including testing several Python
versions with tox::

$ flake8 unyt
$ black ./
$ pytest --doctest-modules --doctest-rst --doctest-plus
$ tox

To get flake8, pytest, pytest-doctestplus, and tox, just pip install them
into your virtualenv.
To get ``flake8``, ``black``, ``pytest``, ``pytest-doctestplus``, and
``tox``, just pip install them into your virtualenv.

6. Commit your changes and push your branch to GitHub::

Expand All @@ -118,7 +120,9 @@ the ``unyt`` repository, simply run ``pytest`` in the root of the repository::
$ cd unyt/
$ py.test --doctest-modules --doctest-rst --doctest-plus

You will need to install ``pytest`` and ``pytest-doctestplus`` from ``pip`` to run this command. Some tests depend on ``h5py``, ``Pint``, ``astropy``, and ``flake8`` being installed.
You will need to install ``pytest`` and ``pytest-doctestplus`` from ``pip`` to
run this command. Some tests depend on ``h5py``, ``Pint``, ``astropy``,
``black``, and ``flake8`` being installed.

If you would like to run the tests on multiple python versions, first ensure that you have multiple python versions visible on your ``$PATH``, then simply execute ``tox`` in the root of the ``unyt`` repository::

Expand Down
231 changes: 123 additions & 108 deletions benchmarks/bench.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import matplotlib
matplotlib.use('agg')

matplotlib.use("agg")
from collections import OrderedDict
import contextlib
import io
Expand All @@ -10,6 +11,7 @@
import subprocess
import sys


@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
Expand All @@ -23,10 +25,9 @@ def stdoutIO(stdout=None):
def run_perf(args, json_name):
if os.path.exists(json_name):
return
args = args + ['-o', json_name]
args = args + ["-o", json_name]
print(args)
p = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
print(out.decode())
print(err.decode())
Expand All @@ -36,132 +37,139 @@ def make_plot(extension):
ratios = OrderedDict()
stddevs = OrderedDict()
benchmarks = OrderedDict()
np_bench = perf.Benchmark.load(
open('{}_{}'.format('numpy', extension), 'r'))
np_bench = perf.Benchmark.load(open("{}_{}".format("numpy", extension), "r"))
np_mean = np_bench.mean()
np_stddev = np_bench.stdev()
for package in setup:
if package == 'numpy':
if package == "numpy":
continue
benchmarks[package] = perf.Benchmark.load(
open('{}_{}'.format(package, extension), 'r'))
open("{}_{}".format(package, extension), "r")
)
mean = benchmarks[package].mean()
stddev = benchmarks[package].stdev()
ratios[package] = mean/np_mean
stddevs[package] = ratios[package]*np.sqrt(
(np_stddev/np_mean)**2 + (stddev/mean)**2)
ratios[package] = mean / np_mean
stddevs[package] = ratios[package] * np.sqrt(
(np_stddev / np_mean) ** 2 + (stddev / mean) ** 2
)
fig, ax = plt.subplots()
packages = list(ratios.keys())
ax.bar(packages, ratios.values(), yerr=stddevs.values())
fig.suptitle(extension.replace('.json', '').replace('_', ' ').title())
ax.set_ylabel('numpy overhead (x time for numpy); lower is better')
plt.savefig(extension.replace('.json', '.png'))
fig.suptitle(extension.replace(".json", "").replace("_", " ").title())
ax.set_ylabel("numpy overhead (x time for numpy); lower is better")
plt.savefig(extension.replace(".json", ".png"))
plt.close(fig)

if ratios['unyt'] != min(ratios.values()):
if ratios["unyt"] != min(ratios.values()):
rvalues = list(ratios.values())
svalues = list(stddevs.values())
unyt_index = packages.index('unyt')
unyt_index = packages.index("unyt")
min_index = rvalues.index(min(rvalues))
if ratios['unyt'] > 3*svalues[min_index] + rvalues[min_index]:
if ratios["unyt"] > 3 * svalues[min_index] + rvalues[min_index]:
for package in ratios:
script = get_script(benchmarks, package)
with stdoutIO() as s:
exec(script)
res = s.getvalue().replace('\n', '')
print("{}: {} +- {} ({})".format(
package, ratios[package], stddevs[package], res))
print(get_script(benchmarks, 'unyt'))
res = s.getvalue().replace("\n", "")
print(
"{}: {} +- {} ({})".format(
package, ratios[package], stddevs[package], res
)
)
print(get_script(benchmarks, "unyt"))


def get_script(benchmarks, package):
meta = benchmarks[package].get_metadata()
setup_s = meta['timeit_setup'][1:-1]
bench_s = 'print(' + meta['timeit_stmt'][1:-1] + ')'
script = setup_s + '; ' + bench_s
script = script.replace('; ', '\n')
setup_s = meta["timeit_setup"][1:-1]
bench_s = "print(" + meta["timeit_stmt"][1:-1] + ")"
script = setup_s + "; " + bench_s
script = script.replace("; ", "\n")
return script


setup = OrderedDict([
('numpy', 'import numpy as np'),
('pint', 'from pint import UnitRegistry; u = UnitRegistry()'),
('astropy', 'import astropy.units as u'),
('unyt', 'import unyt as u'),
])

base_args = ['python3.6', '-m', 'perf', 'timeit']

shared_setup = 'import numpy as np; import operator'

base_setups = OrderedDict([
('small_list', 'data = [1., 2., 3.]'),
('small_tuple', 'data = (1., 2., 3.)'),
('small_array', 'data = np.array([1., 2., 3.])'),
('big_list', 'data = (np.arange(1e6)+1).tolist()'),
('big_array', 'data = (np.arange(1e6)+1)'),
])

op_ufuncs = OrderedDict([
('operator.add', 'np.add'),
('operator.sub', 'np.subtract'),
('operator.mul', 'np.multiply'),
('operator.truediv', 'np.true_divide'),
('operator.eq', 'np.equal'),
])
setup = OrderedDict(
[
("numpy", "import numpy as np"),
("pint", "from pint import UnitRegistry; u = UnitRegistry()"),
("astropy", "import astropy.units as u"),
("unyt", "import unyt as u"),
]
)

base_args = ["python3.6", "-m", "perf", "timeit"]

shared_setup = "import numpy as np; import operator"

base_setups = OrderedDict(
[
("small_list", "data = [1., 2., 3.]"),
("small_tuple", "data = (1., 2., 3.)"),
("small_array", "data = np.array([1., 2., 3.])"),
("big_list", "data = (np.arange(1e6)+1).tolist()"),
("big_array", "data = (np.arange(1e6)+1)"),
]
)

op_ufuncs = OrderedDict(
[
("operator.add", "np.add"),
("operator.sub", "np.subtract"),
("operator.mul", "np.multiply"),
("operator.truediv", "np.true_divide"),
("operator.eq", "np.equal"),
]
)

for bs in base_setups:
for package in sorted(setup):
print(package)
setup_s = '; '.join([shared_setup, setup[package], base_setups[bs]])
args = base_args + ['-s', setup_s + ' ']
if package == 'numpy':
args.append('np.array(data)')
setup_s = "; ".join([shared_setup, setup[package], base_setups[bs]])
args = base_args + ["-s", setup_s + " "]
if package == "numpy":
args.append("np.array(data)")
else:
args.append('data*u.g')
json_name = '{}_{}_create.json'.format(package, bs)
args.append("data*u.g")
json_name = "{}_{}_create.json".format(package, bs)
run_perf(args, json_name)

if 'list' in bs or 'tuple' in bs:
if "list" in bs or "tuple" in bs:
continue

args = base_args + ['-s', setup_s +
'; data=np.asarray(data); out=data.copy()']
if package == 'numpy':
args[-1] += '; '
args = base_args + ["-s", setup_s + "; data=np.asarray(data); out=data.copy()"]
if package == "numpy":
args[-1] += "; "
else:
if package != 'pint':
args[-1] += '*u.g'
args[-1] += '; data = data*u.g '
if package != "pint":
args[-1] += "*u.g"
args[-1] += "; data = data*u.g "

args.append('data**2')
json_name = '{}_{}_square.json'.format(package, bs)
args.append("data**2")
json_name = "{}_{}_square.json".format(package, bs)
run_perf(args, json_name)

args[-1] = 'np.power(data, 2)'
json_name = '{}_{}_npsquare.json'.format(package, bs)
args[-1] = "np.power(data, 2)"
json_name = "{}_{}_npsquare.json".format(package, bs)
run_perf(args, json_name)

args[-1] = 'np.power(data, 2, out=out)'
json_name = '{}_{}_npsquareout.json'.format(package, bs)
args[-1] = "np.power(data, 2, out=out)"
json_name = "{}_{}_npsquareout.json".format(package, bs)
run_perf(args, json_name)

args[-1] = 'data**0.5'
json_name = '{}_{}_sqrt.json'.format(package, bs)
args[-1] = "data**0.5"
json_name = "{}_{}_sqrt.json".format(package, bs)
run_perf(args, json_name)

args[-1] = 'np.sqrt(data)'
json_name = '{}_{}_npsqrt.json'.format(package, bs)
args[-1] = "np.sqrt(data)"
json_name = "{}_{}_npsqrt.json".format(package, bs)
run_perf(args, json_name)

args[-1] = 'np.sqrt(data, out=out)'
json_name = '{}_{}_npsqrtout.json'.format(package, bs)
args[-1] = "np.sqrt(data, out=out)"
json_name = "{}_{}_npsqrtout.json".format(package, bs)
run_perf(args, json_name)



make_plot("{}_create.json".format(bs))
if 'list' not in bs and 'tuple' not in bs:
if "list" not in bs and "tuple" not in bs:
make_plot("{}_square.json".format(bs))
make_plot("{}_npsquare.json".format(bs))
make_plot("{}_npsquareout.json".format(bs))
Expand All @@ -171,41 +179,48 @@ def get_script(benchmarks, package):


for bs in base_setups:
if 'list' in bs or 'tuple' in bs:
if "list" in bs or "tuple" in bs:
continue
for op, ufunc in op_ufuncs.items():
for bench, bench_name in [
(op + r'(data1, data2)', op + '12.json'),
(op + r'(data2, data1)', op + '21.json'),
(ufunc + r'(data1, data2)', ufunc + '12.json'),
(ufunc + r'(data2, data1)', ufunc + '21.json'),
(ufunc + r'(data1, data2, out=out)', ufunc + '12out.json'),
(ufunc + r'(data2, data1, out=out)', ufunc + '21out.json'),
(op + r"(data1, data2)", op + "12.json"),
(op + r"(data2, data1)", op + "21.json"),
(ufunc + r"(data1, data2)", ufunc + "12.json"),
(ufunc + r"(data2, data1)", ufunc + "21.json"),
(ufunc + r"(data1, data2, out=out)", ufunc + "12out.json"),
(ufunc + r"(data2, data1, out=out)", ufunc + "21out.json"),
]:
for unit_choice in [('g', 'g'), ('kg', 'g')]:
for unit_choice in [("g", "g"), ("kg", "g")]:
for package in sorted(setup):
print(package)
setup_s = '; '.join(
[shared_setup, setup[package], base_setups[bs]]) + '; '
if 'out' in bench:
if (package not in ('pint', 'numpy') and
'equal' not in bench):
setup_s += 'out=data*u.{}; '.format(unit_choice[0])
setup_s = (
"; ".join([shared_setup, setup[package], base_setups[bs]])
+ "; "
)
if "out" in bench:
if package not in ("pint", "numpy") and "equal" not in bench:
setup_s += "out=data*u.{}; ".format(unit_choice[0])
else:
setup_s += 'out=np.array(data); '
if package == 'numpy':
setup_s += '; '.join([r'data1 = np.array(data)',
r'data2 = np.array(data)'])
setup_s += "out=np.array(data); "
if package == "numpy":
setup_s += "; ".join(
[r"data1 = np.array(data)", r"data2 = np.array(data)"]
)
if unit_choice[0] != unit_choice[1]:
_bench = bench.replace('data1', '.001*data1')
_bench = bench.replace("data1", ".001*data1")
else:
setup_s += '; '.join(
['data1 = data*u.{}'.format(unit_choice[0]),
'data2 = data*u.{}'.format(unit_choice[1])])
setup_s += "; ".join(
[
"data1 = data*u.{}".format(unit_choice[0]),
"data2 = data*u.{}".format(unit_choice[1]),
]
)
_bench = bench
args = base_args + ['-s', setup_s + ' ']
json_name = '{}_{}_{}{}'.format(
package, bs, unit_choice[0], unit_choice[1])
run_perf(args + [_bench], json_name + '_' + bench_name)
make_plot("{}_{}{}_{}".format(
bs, unit_choice[0], unit_choice[1], bench_name))
args = base_args + ["-s", setup_s + " "]
json_name = "{}_{}_{}{}".format(
package, bs, unit_choice[0], unit_choice[1]
)
run_perf(args + [_bench], json_name + "_" + bench_name)
make_plot(
"{}_{}{}_{}".format(bs, unit_choice[0], unit_choice[1], bench_name)
)
Loading