Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TST: Add pytest-flake8 to travis tests #38

Merged
merged 15 commits into from
Apr 21, 2020
3 changes: 2 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ addons:

before_install:
- pip install pytest-cov
- pip install pytest-flake8
- pip install coveralls
- pip install future
- pip install numpy
Expand Down Expand Up @@ -41,7 +42,7 @@ install:
- python setup.py install

script:
- pytest -vs --cov=pysatModels/
- pytest -vs --cov=pysatModels/ --flake8

after_success:
- coveralls
9 changes: 5 additions & 4 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,21 +57,22 @@ To set up `pysatModels` for local development:
``test_``.

4. When you're done making changes, run all the checks to ensure that nothing
is broken on your local system::
is broken on your local system. You may need to install pytest and
pytest-flake8 first. ::
aburrell marked this conversation as resolved.
Show resolved Hide resolved


pytest -vs
pytest -vs --flake8

5. Update/add documentation (in ``docs``), if relevant

5. Commit your changes and push your branch to GitHub::
6. Commit your changes and push your branch to GitHub::


git add .
git commit -m "Brief description of your changes"
git push origin name-of-your-bugfix-or-feature

6. Submit a pull request through the GitHub website. Pull requests should be
7. Submit a pull request through the GitHub website. Pull requests should be
made to the ``develop`` branch.

Pull Request Guidelines
Expand Down
7 changes: 4 additions & 3 deletions pysatModels/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# -----------------------------------------------------------------------------
"""
pysatModels
===============
===========

Model utilities designed to facilitate studies that integrate observational
and modelled data sets.
Expand All @@ -18,8 +18,9 @@
import logging
import os

from pysatModels import (utils)
from pysatModels import (models)
# Import key modules and skip F401 testing in flake8
from pysatModels import (utils) # noqa: F401
from pysatModels import (models) # noqa: F401

# set the version
here = os.path.abspath(os.path.dirname(__file__))
Expand Down
3 changes: 2 additions & 1 deletion pysatModels/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@
from __future__ import absolute_import
from __future__ import unicode_literals

from pysatModels.models import ucar_tiegcm
# Import key modules and skip F401 testing in flake8
from pysatModels.models import ucar_tiegcm # noqa: F401
3 changes: 2 additions & 1 deletion pysatModels/tests/test_utils_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,8 @@ def test_bad_arg_input(self, bad_index, bad_input, err_msg):
"interpn only understands the methods"),
("model_label", 1, "Unknown format code "),
("time_method", "fun", "unknown time method"),
("pair_method", "fun", "unknown pairing method")])
("pair_method", "fun",
"unknown pairing method")])
def test_bad_kwarg_input(self, bad_key, bad_val, err_msg):
""" Test for expected failure with bad kwarg input """
kwargs = {bad_key: bad_val}
Expand Down
13 changes: 8 additions & 5 deletions pysatModels/tests/test_utils_match.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

import pysat

import pysatModels as ps_mod
import pysatModels.utils.match as match


Expand Down Expand Up @@ -77,12 +76,16 @@ def teardown(self):
del self.input_args, self.required_kwargs, self.inst, self.model

@pytest.mark.parametrize("del_key,err_msg",
[("inst_lon_name", "Need longitude name for inst"),
("mod_lon_name", "Need longitude name for model"),
("inst_name", "Must provide instrument location"),
[("inst_lon_name",
"Need longitude name for inst"),
("mod_lon_name",
"Need longitude name for model"),
("inst_name",
"Must provide instrument location"),
("mod_name", "Must provide the same number"),
("mod_units", "Must provide units for each "),
("mod_datetime_name", "Need datetime coordinate"),
("mod_datetime_name",
"Need datetime coordinate"),
("mod_time_name", "Need time coordinate"),
("inst_clean_rout", "Need routine to clean")])
def test_input_failure(self, del_key, err_msg):
Expand Down
11 changes: 6 additions & 5 deletions pysatModels/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,15 @@
# -----------------------------------------------------------------------------
"""
pysatModels.utils
=====================
=================

Utilities designed to extract, match, and compare modelled and observed data

"""

from __future__ import absolute_import, unicode_literals
from __future__ import absolute_import, unicode_literals

from pysatModels.utils import extract
from pysatModels.utils import match
from pysatModels.utils import compare
# Import key modules and skip F401 testing in flake8
from pysatModels.utils import extract # noqa: F401
from pysatModels.utils import match # noqa: F401
from pysatModels.utils import compare # noqa: F401
12 changes: 6 additions & 6 deletions pysatModels/utils/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[],
raise ValueError('must provide Dataset of paired observations')

if len(inst_name) != len(mod_name):
raise ValueError('must provide equal number of instrument and model ' +
'data names for comparison')
raise ValueError(''.join(['must provide equal number of instrument ',
'and model data names for comparison']))

if not np.all([iname in pairs.data_vars.keys() for iname in inst_name]):
raise ValueError('unknown instrument data value supplied')
Expand All @@ -130,9 +130,9 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[],
known_methods.extend(list(grouped_methods.keys()))
unknown_methods = [mm for mm in methods
if mm not in list(method_rout.keys())]
raise ValueError('unknown statistical method(s) requested:\n' +
'{:}\nuse only:\n{:}'.format(unknown_methods,
known_methods))
raise ValueError(''.join(['unknown statistical method(s) requested:\n',
'{:}\nuse only:\n'.format(unknown_methods),
'{:}'.format(unknown_methods)]))

# Initialize the output
stat_dict = {iname: dict() for iname in inst_name}
Expand Down Expand Up @@ -170,6 +170,6 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[],
# instead of stopping processing. Only valid statistics will
# be included in output
ps_mod.logger.info("{:s} can't use {:s}: {:}".format(iname,
mm, err))
mm, err))

return stat_dict, data_units
71 changes: 35 additions & 36 deletions pysatModels/utils/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name,
raise ValueError(estr)

if len(mod_name) != len(mod_units):
raise ValueError('Must provide units for each model location ' +
'attribute')
raise ValueError(''.join(['Must provide units for each model location',
' attribute']))

if mod_time_name not in model.coords:
raise ValueError("Unknown model time coordinate key name")
Expand All @@ -122,21 +122,22 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name,
inst.units_label])

# create initial fake regular grid index in inst
inst_model_coord = inst[inst_name[0]]*0
inst_model_coord = inst[inst_name[0]] * 0

# we need to create altitude index from model
# collect relevant inputs
# First, model locations for interpolation
# we use the dimensions associated with model altitude
# in the order provided
points = [model[dim].values/temp_scale for dim, temp_scale in zip(mod_name,
inst_scale)]
points = [model[dim].values / temp_scale
for dim, temp_scale in zip(mod_name, inst_scale)]
# time first
points.insert(0, model[mod_datetime_name].values.astype(int))

# create interpolator
interp = interpolate.RegularGridInterpolator(points,
np.log(model[mod_alt].values/alt_scale),
np.log(model[mod_alt].values
/ alt_scale),
bounds_error=False,
fill_value=None)
# use this interpolator to figure out what altitudes we are at
Expand All @@ -149,7 +150,7 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name,
# log of instrument altitude
log_ialt = np.log(inst[inst_alt])
# initial difference signal
diff = log_ialt*0 + 2.*tol
diff = log_ialt * 0 + 2.0 * tol
while np.any(np.abs(diff) > tol):
# create input array using satellite time/position
# replace the altitude coord with the fake tiegcm one
Expand All @@ -160,7 +161,7 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name,
coords.append(inst_model_coord)
else:
# scale other dimensions to the model
coords.append(inst[coord]*iscale)
coords.append(inst[coord] * iscale)

coords.insert(0, inst.index.values.astype(int))
# to peform the interpolation we need points
Expand All @@ -179,7 +180,7 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name,
# shift index in inst for model pressure level
# in the opposite direction to diff
# reduced value by scale, the 'scale height'
inst_model_coord -= diff/scale
inst_model_coord -= diff / scale

# achieved model altitude
inst[inst_out_alt] = np.e**orbit_alt
Expand Down Expand Up @@ -287,8 +288,8 @@ def instrument_view_through_model(inst, model, inst_name, mod_name,
raise ValueError(estr)

if len(mod_name) != len(mod_units):
raise ValueError('Must provide units for each model location ' +
'attribute')
raise ValueError(''.join(['Must provide units for each model location',
' attribute']))

if mod_time_name not in model.coords:
raise ValueError("Unknown model time coordinate key name")
Expand Down Expand Up @@ -324,14 +325,12 @@ def instrument_view_through_model(inst, model, inst_name, mod_name,
points.append(model[mod_datetime_name].values.astype(int))
# now spatial
for iscale, var in zip(inst_scale, mod_name):
points.append(model[var].values/iscale)
points.append(model[var].values / iscale)

# create the interpolator
interp[label] = interpolate.RegularGridInterpolator(points,
model[label].values,
bounds_error=False,
fill_value=None,
method=method)
interp[label] = interpolate.RegularGridInterpolator(
points, model[label].values, bounds_error=False, fill_value=None,
method=method)
# apply it at observed locations and store result
output_names.append('_'.join((model_label, label)))
inst[output_names[-1]] = interp[label](inst_pts)
Expand Down Expand Up @@ -416,8 +415,8 @@ def instrument_view_irregular_model(inst, model, inst_name, mod_name,
raise ValueError(estr)

if len(mod_name) != len(mod_units):
raise ValueError('Must provide units for each model location ' +
'attribute')
raise ValueError(''.join(['Must provide units for each model location',
' attribute']))

# ensure coordinate dimensions match
for var in sel_name:
Expand Down Expand Up @@ -447,8 +446,8 @@ def instrument_view_irregular_model(inst, model, inst_name, mod_name,
inst.meta[iname, inst.units_label])

# First, model locations for interpolation (regulargrid)
coords = [model[dim].values/temp_scale for dim, temp_scale in zip(mod_name,
inst_scale)]
coords = [model[dim].values / temp_scale
for dim, temp_scale in zip(mod_name, inst_scale)]
# time first
coords.insert(0, model[mod_datetime_name].values.astype(int))

Expand Down Expand Up @@ -489,11 +488,11 @@ def instrument_view_irregular_model(inst, model, inst_name, mod_name,
else:
max_sel_val = max_pts_alt
# perform downselection
idx, = np.where((points[:, update_dim] >= min_sel_val) &
(points[:, update_dim] <= max_sel_val))
idx, = np.where((points[:, update_dim] >= min_sel_val)
& (points[:, update_dim] <= max_sel_val))
points = points[idx, :]
ps_mod.logger.debug('Remaining points after downselection '
+ str(len(idx)))
ps_mod.logger.debug('Remaining points after downselection {:d}'.format(
len(idx)))

# create input array using inst time/position
coords = [inst[coord] for coord in inst_name]
Expand Down Expand Up @@ -599,8 +598,8 @@ def extract_modelled_observations(inst, model, inst_name, mod_name,
raise ValueError(estr)

if len(mod_name) != len(mod_units):
raise ValueError('Must provide units for each model location ' +
'attribute')
raise ValueError(''.join(['Must provide units for each model location',
' attribute']))

if mod_time_name not in model.coords:
raise ValueError("Unknown model time coordinate key name")
Expand All @@ -613,7 +612,7 @@ def extract_modelled_observations(inst, model, inst_name, mod_name,

# Ensure mod_name is a list
mod_name = list(mod_name)

# Remove any model coordinates from the modelled data to interpolate
sel_name = sel_name[[mdat not in mod_name for mdat in sel_name]]

Expand Down Expand Up @@ -663,14 +662,15 @@ def extract_modelled_observations(inst, model, inst_name, mod_name,
# resolution of a model run
mind = list()
iind = list()
del_sec = abs(mod_datetime-inst.index[:, np.newaxis]).astype(float) * 1.0e-9
del_sec = abs(mod_datetime
- inst.index[:, np.newaxis]).astype(float) * 1.0e-9
for inst_ind, mod_ind in enumerate(del_sec.argmin(axis=1)):
if del_sec[inst_ind, mod_ind] <= min_del:
if mod_ind in mind and pair_method == 'closest':
# Test to see if this model observation has multiple pairings
old_ind = mind.index(mod_ind)
if(del_sec[inst_ind, mod_ind] <
del_sec[iind[old_ind], mind[old_ind]]):
if(del_sec[inst_ind, mod_ind]
< del_sec[iind[old_ind], mind[old_ind]]):
# If this one is closer, keep it
iind[old_ind] = inst_ind
mind[old_ind] = mod_ind
Expand Down Expand Up @@ -709,7 +709,6 @@ def extract_modelled_observations(inst, model, inst_name, mod_name,

# Determine the dimension values
dims = list(model.data_vars[mdat].dims)
ndim = model.data_vars[mdat].data.shape
indices = {mod_time_name: mind[i]}

# Construct the data needed for interpolation, ensuring that
Expand Down Expand Up @@ -753,7 +752,7 @@ def extract_modelled_observations(inst, model, inst_name, mod_name,
if icycles < ncycles or icycles == 0:
ss = [ii if k == 0 else 0 for k in range(idims)]
se = [ii + 1 if k == 0 else
len(inst.data.coords[idim_names[k-1]])
len(inst.data.coords[idim_names[k - 1]])
for k in range(idims)]
xout = [cinds[ind_dims.index(k)] if k in ind_dims
else slice(ss[k], se[k]) for k in range(idims)]
Expand All @@ -779,11 +778,11 @@ def extract_modelled_observations(inst, model, inst_name, mod_name,
k = 0
cinds[k] += 1

while cinds[k] > \
inst.data.coords.dims[inst_name[imod_dims[k]]]:
while cinds[k] > inst.data.coords.dims[
inst_name[imod_dims[k]]]:
k += 1
if k < len(cinds):
cinds[k-1] = 0
cinds[k - 1] = 0
cinds[k] += 1
else:
break
Expand Down
Loading