diff --git a/pyperf/__main__.py b/pyperf/__main__.py index aee19d18..7d3b671d 100644 --- a/pyperf/__main__.py +++ b/pyperf/__main__.py @@ -349,11 +349,7 @@ def group_by_name(self): def group_by_name_ignored(self): names = set(self._group_by_name_names()) for suite in self.suites: - ignored = [] - for bench in suite: - if bench.get_name() not in names: - ignored.append(bench) - if ignored: + if ignored := [bench for bench in suite if bench.get_name() not in names]: yield (suite, ignored) diff --git a/pyperf/_bench.py b/pyperf/_bench.py index fae94bbf..b55031b5 100644 --- a/pyperf/_bench.py +++ b/pyperf/_bench.py @@ -557,10 +557,7 @@ def _filter_runs(self, include, only_runs): if include: old_runs = self._runs max_index = len(old_runs) - 1 - runs = [] - for index in only_runs: - if index <= max_index: - runs.append(old_runs[index]) + runs = [old_runs[index] for index in only_runs if index <= max_index] else: runs = self._runs max_index = len(runs) - 1 diff --git a/pyperf/_compare.py b/pyperf/_compare.py index dc2afdc9..e63986e7 100644 --- a/pyperf/_compare.py +++ b/pyperf/_compare.py @@ -163,10 +163,7 @@ def __init__(self, headers, rows): self.widths[column] = max(self.widths[column], len(cell)) def _render_line(self, char='-'): - parts = [''] - for width in self.widths: - parts.append(char * (width + 2)) - parts.append('') + parts = [''] + [char * (width + 2) for width in self.widths] + [''] return '+'.join(parts) def _render_row(self, row): @@ -250,7 +247,7 @@ def __init__(self, benchmarks, args): for results in self.all_results: for result in results: self.tags.update(get_tags_for_result(result)) - self.tags = sorted(list(self.tags)) + self.tags = sorted(self.tags) def compare_benchmarks(self, name, benchmarks): min_speed = self.min_speed @@ -280,9 +277,9 @@ def sort_key(results): self.all_results.sort(key=sort_key) - headers = ['Benchmark', self.all_results[0][0].ref.name] - for item in self.all_results[0]: - headers.append(item.changed.name) + headers = ['Benchmark', self.all_results[0][0].ref.name] + [ + item.changed.name for item in self.all_results[0] + ] all_norm_means = [[] for _ in range(len(headers[2:]))] @@ -427,9 +424,7 @@ def list_ignored(self): def compare_geometric_mean(self, all_results): # use a list since two filenames can be identical, # even if results are different - all_norm_means = [] - for item in all_results[0]: - all_norm_means.append((item.changed.name, [])) + all_norm_means = [(item.changed.name, []) for item in all_results[0]] for results in all_results: for index, result in enumerate(results): diff --git a/pyperf/_cpu_utils.py b/pyperf/_cpu_utils.py index f810df25..315e6cec 100644 --- a/pyperf/_cpu_utils.py +++ b/pyperf/_cpu_utils.py @@ -90,8 +90,7 @@ def parse_cpu_list(cpu_list): parts = part.split('-', 1) first = int(parts[0]) last = int(parts[1]) - for cpu in range(first, last + 1): - cpus.append(cpu) + cpus.extend(range(first, last + 1)) else: cpus.append(int(part)) cpus.sort() diff --git a/pyperf/_utils.py b/pyperf/_utils.py index 87bb1a7f..3984bce6 100644 --- a/pyperf/_utils.py +++ b/pyperf/_utils.py @@ -129,8 +129,7 @@ def parse_run_list(run_list): parts = part.split('-', 1) first = int(parts[0]) last = int(parts[1]) - for run in range(first, last + 1): - runs.append(run) + runs.extend(range(first, last + 1)) else: runs.append(int(part)) except ValueError: diff --git a/pyperf/tests/test_bench.py b/pyperf/tests/test_bench.py index 8fd4e2a1..3cae3031 100644 --- a/pyperf/tests/test_bench.py +++ b/pyperf/tests/test_bench.py @@ -346,11 +346,9 @@ def test_remove_all_metadata(self): {'name': 'bench', 'unit': 'byte'}) def test_update_metadata(self): - runs = [] - for value in (1.0, 2.0, 3.0): - runs.append(pyperf.Run((value,), - metadata={'name': 'bench'}, - collect_metadata=False)) + runs = [pyperf.Run((value,), + metadata={'name': 'bench'}, + collect_metadata=False) for value in (1.0, 2.0, 3.0)] bench = pyperf.Benchmark(runs) self.assertEqual(bench.get_metadata(), {'name': 'bench'})