Skip to content

Commit

Permalink
refactor out Graph-500 specific functions
Browse files Browse the repository at this point in the history
  • Loading branch information
jnke2016 committed Aug 3, 2021
1 parent e07a65d commit aa90509
Showing 1 changed file with 0 additions and 96 deletions.
96 changes: 0 additions & 96 deletions benchmarks/python_e2e/reporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,38 +56,6 @@ def generate_console_report(benchmark_result_list):

return retstring


# FIXME: refactor this out since it is Graph500-specific
def generate_graph500_console_report(benchmark_result_list):
"""
Return a string suitable for printing to the console containing the
benchmark run results.
"""
retstring = ""

# Assume results are ordered based on the order they were run, which for
# Graph500 is a single graph_create run, then a run for each search key for
# BFS, then for each search key for SSSP.
r = benchmark_result_list[0]
name = f"{r.name}({__namify_dict(r.params)})"
space = " " * (30 - len(name))
retstring += f"{name}{space}{r.runtime:.6}\n"

remaining_results = benchmark_result_list[1:]
half = len(remaining_results) // 2
bfs_results = remaining_results[:half]
sssp_results = remaining_results[half:]

for results in [bfs_results, sssp_results]:
retstring += f"{'-'*60}\n"
for r in results:
name = f"{r.name}({__namify_dict(r.params)})"
space = " " * (30 - len(name))
retstring += f"{name}{space}{r.runtime:.6}\n"

return retstring


def update_csv_report(csv_results_file, benchmark_result_list, ngpus):
"""
Update (or create if DNE) csv_results_file as a CSV file containing the
Expand Down Expand Up @@ -135,67 +103,3 @@ def update_csv_report(csv_results_file, benchmark_result_list, ngpus):
writer.writeheader()
for row in rows:
writer.writerow(row)


# FIXME: refactor this out since it is Graph500-specific
def update_graph500_csv_report(csv_results_file, benchmark_result_list, ngpus):
"""
Update (or create if DNE) csv_results_file as a CSV file containing the
benchmark results. Rows are the overall avergage for each of the three
timed kernels, columns are number of GPUs.
"""
times = {}
all_times = {}
all_names = set()

ngpus_key = f"ngpus_{ngpus}"

for r in benchmark_result_list:
name = r.name
all_names.add(name)
min_time_name = f"{name}_min"
max_time_name = f"{name}_max"

min_time = times.get(min_time_name)

if r.runtime > times.get(max_time_name, 0):
times[max_time_name] = r.runtime
if min_time is None or r.runtime < min_time:
times[min_time_name] = r.runtime
all_times.setdefault(name, []).append(r.runtime)

for name in all_times:
mean_time_name = f"{name}_mean"
times[mean_time_name] = sum(all_times[name]) / len(all_times[name])

rows = []

if path.exists(csv_results_file):
with open(csv_results_file) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
row[ngpus_key] = times[row["name"]]
rows.append(row)

else:
for name in all_names:
min_time_name = f"{name}_min"
max_time_name = f"{name}_max"
mean_time_name = f"{name}_mean"

rows.append({"name": min_time_name,
ngpus_key: times[min_time_name],
})
rows.append({"name": max_time_name,
ngpus_key: times[max_time_name],
})
rows.append({"name": mean_time_name,
ngpus_key: times[mean_time_name],
})

with open(csv_results_file, "w") as csv_file:
field_names = sorted(rows[0].keys())
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
for row in rows:
writer.writerow(row)

0 comments on commit aa90509

Please sign in to comment.