Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revisit # noqa annotations #3359

Merged
merged 2 commits into from
Oct 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def decorator(func):

def requires_dask():
try:
import dask # noqa
import dask # noqa: F401
except ImportError:
raise NotImplementedError

Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/dataarray_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from . import randn, requires_dask

try:
import dask # noqa
import dask # noqa: F401
except ImportError:
pass

Expand Down
2 changes: 1 addition & 1 deletion doc/examples/_code/weather_data_setup.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
import pandas as pd
import seaborn as sns # noqa, pandas aware plotting library
import seaborn as sns

import xarray as xr

Expand Down
2 changes: 1 addition & 1 deletion doc/gallery/plot_cartopy_facetgrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
For more details see `this discussion`_ on github.

.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567
""" # noqa
"""


import cartopy.crs as ccrs
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
- Issue tracker: http://github.com/pydata/xarray/issues
- Source code: http://github.com/pydata/xarray
- SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk
""" # noqa
"""


setup(
Expand Down
73 changes: 35 additions & 38 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@

def _get_default_engine_remote_uri():
try:
import netCDF4 # noqa
import netCDF4 # noqa: F401

engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import pydap # noqa
import pydap # noqa: F401

engine = "pydap"
except ImportError:
Expand All @@ -61,13 +61,13 @@ def _get_default_engine_remote_uri():
def _get_default_engine_grib():
msgs = []
try:
import Nio # noqa
import Nio # noqa: F401

msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
except ImportError: # pragma: no cover
pass
try:
import cfgrib # noqa
import cfgrib # noqa: F401

msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
except ImportError: # pragma: no cover
Expand All @@ -80,7 +80,7 @@ def _get_default_engine_grib():

def _get_default_engine_gz():
try:
import scipy # noqa
import scipy # noqa: F401

engine = "scipy"
except ImportError: # pragma: no cover
Expand All @@ -90,12 +90,12 @@ def _get_default_engine_gz():

def _get_default_engine_netcdf():
try:
import netCDF4 # noqa
import netCDF4 # noqa: F401

engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf # noqa
import scipy.io.netcdf # noqa: F401

engine = "scipy"
except ImportError:
Expand Down Expand Up @@ -722,44 +722,41 @@ def open_mfdataset(
):
"""Open multiple files as a single dataset.

If combine='by_coords' then the function ``combine_by_coords`` is used to
combine the datasets into one before returning the result, and if
combine='nested' then ``combine_nested`` is used. The filepaths must be
structured according to which combining function is used, the details of
which are given in the documentation for ``combine_by_coords`` and
``combine_nested``. By default the old (now deprecated) ``auto_combine``
will be used, please specify either ``combine='by_coords'`` or
``combine='nested'`` in future. Requires dask to be installed. See
documentation for details on dask [1]. Attributes from the first dataset
file are used for the combined dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)
``auto_combine`` will be used, please specify either ``combine='by_coords'`` or
``combine='nested'`` in future. Requires dask to be installed. See documentation for
details on dask [1]. Attributes from the first dataset file are used for the
combined dataset.

Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths. If concatenation along more than one dimension is desired, then
``paths`` must be a nested list-of-lists (see ``manual_combine`` for
details). (A string glob will be expanded to a 1-dimensional list.)
Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``manual_combine`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2].
concat_dim : str, or list of str, DataArray, Index or None, optional
Dimensions to concatenate files along. You only
need to provide this argument if any of the dimensions along which you
want to concatenate is not a dimension in the original datasets, e.g.,
if you want to stack a collection of 2D arrays along a third dimension.
Set ``concat_dim=[..., None, ...]`` explicitly to
Dimensions to concatenate files along. You only need to provide this argument
if any of the dimensions along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of 2D arrays
along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to
disable concatenation along a particular dimension.
combine : {'by_coords', 'nested'}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is
used to combine all the data. If this argument is not provided,
`xarray.auto_combine` is used, but in the future this behavior will
switch to use `xarray.combine_by_coords` by default.
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. If this argument is not provided, `xarray.auto_combine` is
used, but in the future this behavior will switch to use
`xarray.combine_by_coords` by default.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts', 'override'}, optional
String indicating how to compare variables of the same name for
Expand Down Expand Up @@ -854,7 +851,7 @@ def open_mfdataset(

.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
""" # noqa
"""
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
Expand Down
4 changes: 1 addition & 3 deletions xarray/backends/locks.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@
NETCDFC_LOCK = SerializableLock()


_FILE_LOCKS = (
weakref.WeakValueDictionary()
) # type: MutableMapping[Any, threading.Lock] # noqa
_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock]


def _get_threaded_lock(key):
Expand Down
2 changes: 1 addition & 1 deletion xarray/conventions.py
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ def cf_encoder(variables, attributes):
for var in new_vars.values():
bounds = var.attrs["bounds"] if "bounds" in var.attrs else None
if bounds and bounds in new_vars:
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries # noqa
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries
for attr in [
"units",
"standard_name",
Expand Down
16 changes: 8 additions & 8 deletions xarray/core/alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
from .variable import IndexVariable, Variable

if TYPE_CHECKING:
from .dataarray import DataArray # noqa: F401
from .dataset import Dataset # noqa: F401
from .dataarray import DataArray
from .dataset import Dataset


def _get_joiner(join):
Expand Down Expand Up @@ -350,8 +350,8 @@ def deep_align(

This function is not public API.
"""
from .dataarray import DataArray # noqa: F811
from .dataset import Dataset # noqa: F811
from .dataarray import DataArray
from .dataset import Dataset

if indexes is None:
indexes = {}
Expand Down Expand Up @@ -411,7 +411,7 @@ def is_alignable(obj):


def reindex_like_indexers(
target: Union["DataArray", "Dataset"], other: Union["DataArray", "Dataset"]
target: "Union[DataArray, Dataset]", other: "Union[DataArray, Dataset]"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Work around PyCQA/pyflakes#453

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could add as a comment to the code

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Meh, if you try changing it flake8 will bomb out. Also it's temporary until either the upstream issue is fixed or we drop Python 3.6 (whatever happens first).

) -> Dict[Hashable, pd.Index]:
"""Extract indexers to align target with other.

Expand Down Expand Up @@ -503,7 +503,7 @@ def reindex_variables(
new_indexes : OrderedDict
Dict of indexes associated with the reindexed variables.
"""
from .dataarray import DataArray # noqa: F811
from .dataarray import DataArray

# create variables for the new dataset
reindexed = OrderedDict() # type: OrderedDict[Any, Variable]
Expand Down Expand Up @@ -600,8 +600,8 @@ def _get_broadcast_dims_map_common_coords(args, exclude):

def _broadcast_helper(arg, exclude, dims_map, common_coords):

from .dataarray import DataArray # noqa: F811
from .dataset import Dataset # noqa: F811
from .dataarray import DataArray
from .dataset import Dataset

def _set_dims(var):
# Add excluded dims to a copy of dims_map
Expand Down
10 changes: 5 additions & 5 deletions xarray/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def _ipython_key_completions_(self) -> List[str]:
"""Provide method for the key-autocompletions in IPython.
See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion
For the details.
""" # noqa
"""
item_lists = [
item
for sublist in self._item_sources
Expand Down Expand Up @@ -669,7 +669,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):
--------
core.groupby.DataArrayGroupBy
core.groupby.DatasetGroupBy
""" # noqa
"""
return self._groupby_cls(
self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims
)
Expand Down Expand Up @@ -732,7 +732,7 @@ def groupby_bins(
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
""" # noqa
"""
return self._groupby_cls(
self,
group,
Expand Down Expand Up @@ -808,7 +808,7 @@ def rolling(
--------
core.rolling.DataArrayRolling
core.rolling.DatasetRolling
""" # noqa
"""
dim = either_dict_or_kwargs(dim, window_kwargs, "rolling")
return self._rolling_cls(self, dim, min_periods=min_periods, center=center)

Expand Down Expand Up @@ -1005,7 +1005,7 @@ def resample(
----------

.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
""" # noqa
"""
# TODO support non-string indexer after removing the old API.

from .dataarray import DataArray
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -3054,7 +3054,7 @@ def integrate(
return self._from_temp_dataset(ds)

# this needs to be at the end, or mypy will confuse with `str`
# https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names # noqa
# https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names
str = property(StringAccessor)


Expand Down
8 changes: 4 additions & 4 deletions xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1063,7 +1063,7 @@ def copy(self, deep: bool = False, data: Mapping = None) -> "Dataset":
See Also
--------
pandas.DataFrame.copy
""" # noqa
"""
if data is None:
variables = OrderedDict(
(k, v.copy(deep=deep)) for k, v in self._variables.items()
Expand Down Expand Up @@ -1714,7 +1714,7 @@ def chunk(
from dask.base import tokenize
except ImportError:
# raise the usual error if dask is entirely missing
import dask # noqa
import dask # noqa: F401

raise ImportError("xarray requires dask version 0.9 or newer")

Expand Down Expand Up @@ -4178,7 +4178,7 @@ def apply(
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0
""" # noqa
"""
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in self.data_vars.items()
Expand Down Expand Up @@ -5381,7 +5381,7 @@ def filter_by_attrs(self, **kwargs):
temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...
precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...

""" # noqa
"""
selection = []
for var_name, variable in self.variables.items():
has_value_flag = False
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ class ExplicitIndexer:
__slots__ = ("_key",)

def __init__(self, key):
if type(self) is ExplicitIndexer: # noqa
if type(self) is ExplicitIndexer:
raise TypeError("cannot instantiate base ExplicitIndexer objects")
self._key = tuple(key)

Expand Down Expand Up @@ -1261,7 +1261,7 @@ def _indexing_array_and_key(self, key):
array = self.array
# We want 0d slices rather than scalars. This is achieved by
# appending an ellipsis (see
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes). # noqa
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).
key = key.tuple + (Ellipsis,)
else:
raise TypeError("unexpected key type: {}".format(type(key)))
Expand Down
Loading