diff --git a/distributed/dashboard/components/scheduler.py b/distributed/dashboard/components/scheduler.py index 1dc529b19f8..2c3b80904c1 100644 --- a/distributed/dashboard/components/scheduler.py +++ b/distributed/dashboard/components/scheduler.py @@ -2662,7 +2662,7 @@ def _get_timeseries(self, restrict_to_existing=False): back = None # Remove any periods of zero compute at the front or back of the timeseries if len(self.plugin.compute): - agg = sum([np.array(v[front:]) for v in self.plugin.compute.values()]) + agg = sum(np.array(v[front:]) for v in self.plugin.compute.values()) front2 = len(agg) - len(np.trim_zeros(agg, trim="f")) front += front2 back = len(np.trim_zeros(agg, trim="b")) - len(agg) or None diff --git a/distributed/scheduler.py b/distributed/scheduler.py index 4bd84851f7c..b6720ddab94 100644 --- a/distributed/scheduler.py +++ b/distributed/scheduler.py @@ -1019,7 +1019,7 @@ def __repr__(self): @property def nbytes_total(self): tg: TaskGroup - return sum([tg._nbytes_total for tg in self._groups]) + return sum(tg._nbytes_total for tg in self._groups) def __len__(self): return sum(map(len, self._groups)) @@ -1027,7 +1027,7 @@ def __len__(self): @property def duration(self): tg: TaskGroup - return sum([tg._duration for tg in self._groups]) + return sum(tg._duration for tg in self._groups) @property def types(self): @@ -6816,9 +6816,9 @@ def workers_to_close( groups = groupby(key, parent._workers.values()) limit_bytes = { - k: sum([ws._memory_limit for ws in v]) for k, v in groups.items() + k: sum(ws._memory_limit for ws in v) for k, v in groups.items() } - group_bytes = {k: sum([ws._nbytes for ws in v]) for k, v in groups.items()} + group_bytes = {k: sum(ws._nbytes for ws in v) for k, v in groups.items()} limit = sum(limit_bytes.values()) total = sum(group_bytes.values()) @@ -7780,9 +7780,9 @@ def profile_to_figure(state): tasks_timings=tasks_timings, address=self.address, nworkers=len(parent._workers_dv), - threads=sum([ws._nthreads for ws in parent._workers_dv.values()]), + threads=sum(ws._nthreads for ws in parent._workers_dv.values()), memory=format_bytes( - sum([ws._memory_limit for ws in parent._workers_dv.values()]) + sum(ws._memory_limit for ws in parent._workers_dv.values()) ), code=code, dask_version=dask.__version__, @@ -8027,8 +8027,8 @@ def adaptive_target(self, target_duration=None): cpu = max(1, cpu) # add more workers if more than 60% of memory is used - limit = sum([ws._memory_limit for ws in parent._workers_dv.values()]) - used = sum([ws._nbytes for ws in parent._workers_dv.values()]) + limit = sum(ws._memory_limit for ws in parent._workers_dv.values()) + used = sum(ws._nbytes for ws in parent._workers_dv.values()) memory = 0 if used > 0.6 * limit and limit > 0: memory = 2 * len(parent._workers_dv) @@ -8481,7 +8481,7 @@ def validate_task_state(ts: TaskState): if ts._actor: if ts._state == "memory": - assert sum([ts in ws._actors for ws in ts._who_has]) == 1 + assert sum(ts in ws._actors for ws in ts._who_has) == 1 if ts._state == "processing": assert ts in ts._processing_on.actors