Skip to content

Commit

Permalink
Minor optimizations by using comprehensions
Browse files Browse the repository at this point in the history
  • Loading branch information
cclauss committed Apr 28, 2024
1 parent 8f3b470 commit de63bdf
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 27 deletions.
6 changes: 1 addition & 5 deletions pyperf/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,11 +349,7 @@ def group_by_name(self):
def group_by_name_ignored(self):
names = set(self._group_by_name_names())
for suite in self.suites:
ignored = []
for bench in suite:
if bench.get_name() not in names:
ignored.append(bench)
if ignored:
if ignored := [bench for bench in suite if bench.get_name() not in names]:
yield (suite, ignored)


Expand Down
4 changes: 1 addition & 3 deletions pyperf/_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -558,9 +558,7 @@ def _filter_runs(self, include, only_runs):
old_runs = self._runs
max_index = len(old_runs) - 1
runs = []
for index in only_runs:
if index <= max_index:
runs.append(old_runs[index])
old_runs += [old_runs[index] for index in only_runs if index <= max_index]
else:
runs = self._runs
max_index = len(runs) - 1
Expand Down
17 changes: 6 additions & 11 deletions pyperf/_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,7 @@ def __init__(self, headers, rows):
self.widths[column] = max(self.widths[column], len(cell))

def _render_line(self, char='-'):
parts = ['']
for width in self.widths:
parts.append(char * (width + 2))
parts.append('')
parts = [''] + [char * (width + 2) for width in self.widths] + ['']
return '+'.join(parts)

def _render_row(self, row):
Expand Down Expand Up @@ -250,7 +247,7 @@ def __init__(self, benchmarks, args):
for results in self.all_results:
for result in results:
self.tags.update(get_tags_for_result(result))
self.tags = sorted(list(self.tags))
self.tags = sorted(self.tags)

def compare_benchmarks(self, name, benchmarks):
min_speed = self.min_speed
Expand Down Expand Up @@ -280,9 +277,9 @@ def sort_key(results):

self.all_results.sort(key=sort_key)

headers = ['Benchmark', self.all_results[0][0].ref.name]
for item in self.all_results[0]:
headers.append(item.changed.name)
headers = ['Benchmark', self.all_results[0][0].ref.name] + [
item.changed.name for item in self.all_results[0]
]

all_norm_means = [[] for _ in range(len(headers[2:]))]

Expand Down Expand Up @@ -427,9 +424,7 @@ def list_ignored(self):
def compare_geometric_mean(self, all_results):
# use a list since two filenames can be identical,
# even if results are different
all_norm_means = []
for item in all_results[0]:
all_norm_means.append((item.changed.name, []))
all_norm_means = [(item.changed.name, []) for item in all_results[0]]

for results in all_results:
for index, result in enumerate(results):
Expand Down
3 changes: 1 addition & 2 deletions pyperf/_cpu_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,7 @@ def parse_cpu_list(cpu_list):
parts = part.split('-', 1)
first = int(parts[0])
last = int(parts[1])
for cpu in range(first, last + 1):
cpus.append(cpu)
cpus.extend(range(first, last + 1))
else:
cpus.append(int(part))
cpus.sort()
Expand Down
3 changes: 1 addition & 2 deletions pyperf/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ def parse_run_list(run_list):
parts = part.split('-', 1)
first = int(parts[0])
last = int(parts[1])
for run in range(first, last + 1):
runs.append(run)
runs.extend(range(first, last + 1))
else:
runs.append(int(part))
except ValueError:
Expand Down
6 changes: 2 additions & 4 deletions pyperf/tests/test_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,11 +346,9 @@ def test_remove_all_metadata(self):
{'name': 'bench', 'unit': 'byte'})

def test_update_metadata(self):
runs = []
for value in (1.0, 2.0, 3.0):
runs.append(pyperf.Run((value,),
runs = [pyperf.Run((value,),
metadata={'name': 'bench'},
collect_metadata=False))
collect_metadata=False)for value in (1.0, 2.0, 3.0)]
bench = pyperf.Benchmark(runs)
self.assertEqual(bench.get_metadata(),
{'name': 'bench'})
Expand Down

0 comments on commit de63bdf

Please sign in to comment.