From fe98274184ec20a458afc7e11feae4bed559768e Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Fri, 28 Aug 2020 10:27:16 +0100 Subject: [PATCH] CLN remove unnecessary trailing commas to get ready for new version of black: generic -> blocks (#35950) * pandas/core/groupby/generic.py * pandas/core/groupby/groupby.py * pandas/core/groupby/ops.py * pandas/core/indexes/datetimelike.py * pandas/core/indexes/interval.py * pandas/core/indexes/numeric.py * pandas/core/indexes/range.py * pandas/core/internals/blocks.py --- pandas/core/groupby/generic.py | 8 ++------ pandas/core/groupby/groupby.py | 8 +++----- pandas/core/groupby/ops.py | 14 ++++---------- pandas/core/indexes/datetimelike.py | 4 +--- pandas/core/indexes/interval.py | 4 ++-- pandas/core/indexes/numeric.py | 2 +- pandas/core/indexes/range.py | 2 +- pandas/core/internals/blocks.py | 30 ++++++++++++++--------------- 8 files changed, 29 insertions(+), 43 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2afa56b50c3c7..82e629d184b19 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -221,9 +221,7 @@ def _selection_name(self): def apply(self, func, *args, **kwargs): return super().apply(func, *args, **kwargs) - @doc( - _agg_template, examples=_agg_examples_doc, klass="Series", - ) + @doc(_agg_template, examples=_agg_examples_doc, klass="Series") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): @@ -935,9 +933,7 @@ class DataFrameGroupBy(GroupBy[DataFrame]): See :ref:`groupby.aggregate.named` for more.""" ) - @doc( - _agg_template, examples=_agg_examples_doc, klass="DataFrame", - ) + @doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f96b488fb8d0d..a91366af61d0d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1077,7 +1077,7 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) tuple(args), kwargs, func, engine_kwargs ) result = numba_agg_func( - sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns), + sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) ) if cache_key not in NUMBA_FUNC_CACHE: NUMBA_FUNC_CACHE[cache_key] = numba_agg_func @@ -1595,8 +1595,7 @@ def max(self, numeric_only: bool = False, min_count: int = -1): def first(self, numeric_only: bool = False, min_count: int = -1): def first_compat(obj: FrameOrSeries, axis: int = 0): def first(x: Series): - """Helper function for first item that isn't NA. - """ + """Helper function for first item that isn't NA.""" x = x.array[notna(x.array)] if len(x) == 0: return np.nan @@ -1620,8 +1619,7 @@ def first(x: Series): def last(self, numeric_only: bool = False, min_count: int = -1): def last_compat(obj: FrameOrSeries, axis: int = 0): def last(x: Series): - """Helper function for last item that isn't NA. - """ + """Helper function for last item that isn't NA.""" x = x.array[notna(x.array)] if len(x) == 0: return np.nan diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c6171a55359fe..290680f380f5f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -583,7 +583,7 @@ def transform(self, values, how: str, axis: int = 0, **kwargs): return self._cython_operation("transform", values, how, axis, **kwargs) def _aggregate( - self, result, counts, values, comp_ids, agg_func, min_count: int = -1, + self, result, counts, values, comp_ids, agg_func, min_count: int = -1 ): if agg_func is libgroupby.group_nth: # different signature from the others @@ -603,9 +603,7 @@ def _transform( return result - def agg_series( - self, obj: Series, func: F, *args, **kwargs, - ): + def agg_series(self, obj: Series, func: F, *args, **kwargs): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 @@ -653,9 +651,7 @@ def _aggregate_series_fast(self, obj: Series, func: F): result, counts = grouper.get_result() return result, counts - def _aggregate_series_pure_python( - self, obj: Series, func: F, *args, **kwargs, - ): + def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs): group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) @@ -841,9 +837,7 @@ def groupings(self) -> "List[grouper.Grouping]": for lvl, name in zip(self.levels, self.names) ] - def agg_series( - self, obj: Series, func: F, *args, **kwargs, - ): + def agg_series(self, obj: Series, func: F, *args, **kwargs): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0e8d7c1b866b8..efe1a853a9a76 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -81,9 +81,7 @@ def wrapper(left, right): DatetimeLikeArrayMixin, cache=True, ) -@inherit_names( - ["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin, -) +@inherit_names(["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin) class DatetimeIndexOpsMixin(ExtensionIndex): """ Common ops mixin to support a unified interface datetimelike Index. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9281f8017761d..5d309ef7cd515 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -182,10 +182,10 @@ def func(intvidx_self, other, sort=False): ) @inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True) @inherit_names( - ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray, + ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray ) @inherit_names( - ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True, + ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True ) class IntervalIndex(IntervalMixin, ExtensionIndex): _typ = "intervalindex" diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 731907993d08f..80bb9f10fadd9 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -436,7 +436,7 @@ def isin(self, values, level=None): def _is_compatible_with_other(self, other) -> bool: return super()._is_compatible_with_other(other) or all( isinstance( - obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex), + obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex) ) for obj in [self, other] ) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index b85e2d3947cb1..f1457a9aac62b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -82,7 +82,7 @@ class RangeIndex(Int64Index): # Constructors def __new__( - cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None, + cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None ): cls._validate_dtype(dtype) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c62be4f767f00..a38b47a4c2a25 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -724,7 +724,7 @@ def replace( # _can_hold_element checks have reduced this back to the # scalar case and we can avoid a costly object cast return self.replace( - to_replace[0], value, inplace=inplace, regex=regex, convert=convert, + to_replace[0], value, inplace=inplace, regex=regex, convert=convert ) # GH 22083, TypeError or ValueError occurred within error handling @@ -905,7 +905,7 @@ def setitem(self, indexer, value): return block def putmask( - self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, + self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False ) -> List["Block"]: """ putmask the data to the block; it is possible that we may create a @@ -1292,7 +1292,7 @@ def shift(self, periods: int, axis: int = 0, fill_value=None): return [self.make_block(new_values)] def where( - self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0, + self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0 ) -> List["Block"]: """ evaluate the block; return result block(s) from the result @@ -1366,7 +1366,7 @@ def where_func(cond, values, other): # we are explicitly ignoring errors block = self.coerce_to_target_dtype(other) blocks = block.where( - orig_other, cond, errors=errors, try_cast=try_cast, axis=axis, + orig_other, cond, errors=errors, try_cast=try_cast, axis=axis ) return self._maybe_downcast(blocks, "infer") @@ -1605,7 +1605,7 @@ def set(self, locs, values): self.values = values def putmask( - self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, + self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False ) -> List["Block"]: """ See Block.putmask.__doc__ @@ -1816,7 +1816,7 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]: return super().diff(n, axis) def shift( - self, periods: int, axis: int = 0, fill_value: Any = None, + self, periods: int, axis: int = 0, fill_value: Any = None ) -> List["ExtensionBlock"]: """ Shift the block by `periods`. @@ -1833,7 +1833,7 @@ def shift( ] def where( - self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0, + self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0 ) -> List["Block"]: cond = _extract_bool_array(cond) @@ -1945,7 +1945,7 @@ def _can_hold_element(self, element: Any) -> bool: ) def to_native_types( - self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs, + self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs ): """ convert to our native types format """ values = self.values @@ -2369,7 +2369,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): if not np.can_cast(to_replace_values, bool): return self return super().replace( - to_replace, value, inplace=inplace, regex=regex, convert=convert, + to_replace, value, inplace=inplace, regex=regex, convert=convert ) @@ -2453,18 +2453,18 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): if not either_list and is_re(to_replace): return self._replace_single( - to_replace, value, inplace=inplace, regex=True, convert=convert, + to_replace, value, inplace=inplace, regex=True, convert=convert ) elif not (either_list or regex): return super().replace( - to_replace, value, inplace=inplace, regex=regex, convert=convert, + to_replace, value, inplace=inplace, regex=regex, convert=convert ) elif both_lists: for to_rep, v in zip(to_replace, value): result_blocks = [] for b in blocks: result = b._replace_single( - to_rep, v, inplace=inplace, regex=regex, convert=convert, + to_rep, v, inplace=inplace, regex=regex, convert=convert ) result_blocks = _extend_blocks(result, result_blocks) blocks = result_blocks @@ -2475,18 +2475,18 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): result_blocks = [] for b in blocks: result = b._replace_single( - to_rep, value, inplace=inplace, regex=regex, convert=convert, + to_rep, value, inplace=inplace, regex=regex, convert=convert ) result_blocks = _extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks return self._replace_single( - to_replace, value, inplace=inplace, convert=convert, regex=regex, + to_replace, value, inplace=inplace, convert=convert, regex=regex ) def _replace_single( - self, to_replace, value, inplace=False, regex=False, convert=True, mask=None, + self, to_replace, value, inplace=False, regex=False, convert=True, mask=None ): """ Replace elements by the given value.