Skip to content

Commit

Permalink
Add command line option to run experiments with perf
Browse files Browse the repository at this point in the history
Use 'env' as a separate key in YAML file that should have the env variables
Add Hotness rows in HTML files of Source Code
Removed the Hotness column.
Only show Hotness coulmn if we run with perf
FIX formatting bug of hotness %. Now it shows up to 3 decimal places
  • Loading branch information
Moustafa Kahla committed Jun 12, 2021
1 parent adb400d commit 4c2a489
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 55 deletions.
46 changes: 26 additions & 20 deletions harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,10 @@ def invoke_optdiff(yaml_file_1, yaml_file_2, filter_only, out_yaml):
out_yaml) # output yaml


def run(config, program, reps, dry):
def run(config, program, reps, dry,with_perf):
print('Launching program', program, 'with modes', config[program]['build'])
exe = config[program]['run'] + ' ' + config[program]['input']
perf_command='perf record --freq=100000 -o perf.data' if with_perf else ''
exe = config[program]['env']+' '+perf_command+ ' '+ config[program]['run'] + ' ' + config[program]['input']
os.makedirs('./results', exist_ok=True)
results = {program: {}}
try:
Expand Down Expand Up @@ -114,7 +115,7 @@ def run(config, program, reps, dry):
# out=str(p.stdout)
# err=str(p.stderr)
output = out + err
print(err)
print(output)
# print('Out', p.stdout.decode('utf-8') )
# print('Err', p.stderr.decode('utf-8') )
with open('%s/stdout-%d.txt' % (bin_dir, i), 'w') as f:
Expand Down Expand Up @@ -145,12 +146,14 @@ def run(config, program, reps, dry):

with open('./results/results-%s.yaml' % (program), 'w') as f:
yaml.dump(results, f)
hotlines = get_hot_lines_percentage(config[program]['bin'], bin_dir)
reports_dir = './reports/' + program
lines_hotness_path = os.path.join(reports_dir, '{}.lines_hotness.yaml'.format(mode))
print('WRITING HOTNESS OF SRC CODE LINES TO:', lines_hotness_path)
with open(lines_hotness_path, 'w') as f:
yaml.dump(hotlines, f)
# if we run with perf, we generate the report
if with_perf:
hotlines = get_hot_lines_percentage(config[program]['bin'], bin_dir)
reports_dir = './reports/' + program
lines_hotness_path = os.path.join(reports_dir, '{}.lines_hotness.yaml'.format(mode))
print('WRITING HOTNESS OF SRC CODE LINES TO:', lines_hotness_path)
with open(lines_hotness_path, 'w') as f:
yaml.dump(hotlines, f)


def show_stats(config, program):
Expand Down Expand Up @@ -255,9 +258,10 @@ def compile_and_install(config, program, repo_dir, mode):
shutil.copy(build_dir + '/' + copy, bin_dir)


def generate_diff_reports(report_dir, builds, mode):
def generate_diff_reports(report_dir, builds, mode, with_perf):
out_yaml = report_dir + '%s-%s-%s.opt.yaml' % (builds[0], builds[1], mode)
output_html_dir = report_dir + 'html-%s-%s-%s' % (builds[0], builds[1], mode)
build_for_hotness = builds if with_perf else []

def generate_diff_yaml():
print('Creating diff remark YAML files...')
Expand Down Expand Up @@ -286,7 +290,7 @@ def generate_diff_html():
output_html_dir,
1,
True,
builds)
build_for_hotness)
print('Done generating compilation report for builds %s|%s mode %s' % (builds[0], builds[1], mode))
except:
print('Failed generating compilation report for builds %s|%s mode %s' % (builds[0], builds[1], mode))
Expand All @@ -309,18 +313,19 @@ def generate_diff_html():
generate_diff_html()


def generate_remark_reports(config, program):
def generate_remark_reports(config, program, with_perf):
report_dir = './reports/' + program + '/'

def generate_html():
print('Creating HTML report output for build %s ...' % (build))
build_for_hotness= [build] if with_perf else []
try:
invoke_optviewer(
[in_yaml],
output_html_dir,
1,
True,
[build])
build_for_hotness)
print('Done generating compilation reports!')
except:
print('Failed generating compilation reports (expects build was ' \
Expand All @@ -340,10 +345,10 @@ def generate_html():
# Create repors for 2-combinations of build options.
combos = itertools.combinations(config[program]['build'], 2)
for builds in combos:
generate_diff_reports(report_dir, builds, 'all')
generate_diff_reports(report_dir, builds, 'analysis')
generate_diff_reports(report_dir, builds, 'missed')
generate_diff_reports(report_dir, builds, 'passed')
generate_diff_reports(report_dir, builds, 'all',with_perf)
generate_diff_reports(report_dir, builds, 'analysis',with_perf)
generate_diff_reports(report_dir, builds, 'missed',with_perf)
generate_diff_reports(report_dir, builds, 'passed',with_perf)


def fetch(config, program):
Expand Down Expand Up @@ -383,7 +388,7 @@ def main():
parser.add_argument('-s', '--stats', dest='stats', action='store_true', help='show run statistics')
parser.add_argument('-d', '--dry-run', dest='dry', action='store_true', help='enable dry run')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose printing')
# parser.add_argument('-pp', '--perf', dest='perf', action='store_true', help='use perf')
parser.add_argument('-pc', '--perf', dest='perf', action='store_true', help='use perf')
args = parser.parse_args()

with open(args.input, 'r') as f:
Expand All @@ -397,6 +402,7 @@ def main():
print('args.build', args.build)
print('args.run', args.run)
print('args.generate', args.generate)
print('args.perf', args.perf)

programs = []
if args.programs:
Expand All @@ -413,9 +419,9 @@ def main():
if args.build:
build(config, p)
if args.run:
run(config, p, args.run, args.dry)
run(config, p, args.run, args.dry,args.perf)
if args.generate:
generate_remark_reports(config, p)
generate_remark_reports(config, p,args.perf)
if args.stats:
show_stats(config, p)

Expand Down
103 changes: 68 additions & 35 deletions opt-viewer/optviewer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import optrecord

import yaml
from yaml import CLoader

desc = '''Generate HTML output to visualize optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
Expand All @@ -51,9 +52,26 @@ def suppress(remark):
return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.'))
return False

def get_hotness_lines(output_dir,builds):
perf_hotness = {}
#print('available builds renderer', builds)
for build in builds:
perf_hotness_path = os.path.join(output_dir, '..', "{}.lines_hotness.yaml".format(build))
#print('perf hotness path',perf_hotness_path)
f = open(perf_hotness_path)
#print(f)
try:
hotness_dict = yaml.load(f,Loader=CLoader)
except Exception as e:
print(e)
#print('file loaded:\n\n\n',hotness_dict)
perf_hotness[build] = hotness_dict
#print('build hotnes:\n\n\n',perf_hotness[build])
f.close()
return perf_hotness

class SourceFileRenderer:
def __init__(self, source_dir, output_dir, filename, no_highlight):
def __init__(self, source_dir, output_dir, filename, no_highlight,builds=[]):
self.filename = filename
existing_filename = None
# print('filename', filename) #ggout
Expand All @@ -79,6 +97,10 @@ def __init__(self, source_dir, output_dir, filename, no_highlight):

self.html_formatter = HtmlFormatter(encoding='utf-8')
self.cpp_lexer = CppLexer(stripnl=False)
self.builds = builds
# We assume that we comparison is between each pair of builds
self.perf_hotness = get_hotness_lines(output_dir,builds)
#input("builds at srcfile: {}".format(builds))

def render_source_lines(self, stream, line_remarks):
file_text = stream.read()
Expand Down Expand Up @@ -107,13 +129,25 @@ def render_source_lines(self, stream, line_remarks):
html_highlighted = html_highlighted.replace('</pre></div>', '')

for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1):
print(u'''
html_src_line=u'''
<tr>
<td><a name=\"L{linenum}\">{linenum}</a></td>
<td></td>'''.format(**locals())
# add place holder for every hotness
for _ in range(len(self.builds)):
html_src_line+=u'''
<td></td>'''
html_src_line += u'''
<td><div class="highlight"><pre>{html_line}</pre></div></td>
</tr>'''.format(**locals())
print(html_src_line, file=self.stream)
"""print(u'''
<tr>
<td><a name=\"L{linenum}\">{linenum}</a></td>
<td></td>
<td></td>
<td><div class="highlight"><pre>{html_line}</pre></div></td>
</tr>'''.format(**locals()), file=self.stream)
</tr>'''.format(**locals()), file=self.stream)"""

for remark in line_remarks.get(linenum, []):
if not suppress(remark):
Expand All @@ -132,20 +166,28 @@ def render_inline_remarks(self, r, line):
indent = line[:max(r.Column, 1) - 1]
indent = re.sub('\S', ' ', indent)

print(u'''
entery=u'''
<tr>
<td></td>
<td>{r.RelativeHotness}</td>
<td></td>'''
for build in self.perf_hotness:
file_name, line_num, column = r.DebugLocString.split(':')
file_and_line = file_name + ':' + line_num
entery_hotness = 0 if file_and_line not in self.perf_hotness[build] else self.perf_hotness[build][
file_and_line]
entery_hotness = "{:.3f}%".format(entery_hotness)
entery += u'''
<td>{entery_hotness}</td>'''.format(**locals())
entery+=u'''
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
<td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\"> {r.message}&nbsp;</span></td>
<td class=\"column-entry-yellow\">{inlining_context}</td>
</tr>'''.format(**locals()), file=self.stream)
</tr>'''.format(**locals())
print(entery, file=self.stream)

def render(self, line_remarks):
if not self.source_stream:
return

print('''
header1=u'''
<html>
<title>{}</title>
<meta charset="utf-8" />
Expand All @@ -157,14 +199,18 @@ def render(self, line_remarks):
<table class="source">
<thead>
<tr>
<th>Line</td>
<th>Hotness</td>
<th>Line</td>'''.format(os.path.basename(self.filename))
for build in self.perf_hotness:
header1 += u'''
<th>{} Perf Hotness</td>'''.format(build)
header1+=u'''
<th>Optimization</td>
<th>Source</td>
<th>Inline Context</td>
</tr>
</thead>
<tbody>'''.format(os.path.basename(self.filename)), file=self.stream)
<tbody>'''
print(header1, file=self.stream)
self.render_source_lines(self.source_stream, line_remarks)

print('''
Expand All @@ -184,21 +230,8 @@ def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_in
# self.perf_hotness_seq = perf_hotness_seq
self.builds = builds
# We assume that we comparison is between each pair of builds
self.perf_hotness = {}
print('available builds in index renderer', builds)
for build in self.builds:
perf_hotness_path = os.path.join(output_dir, '..', "{}.lines_hotness.yaml".format(build))
# print('perf hotness path',perf_hotness_path)
f = open(perf_hotness_path)
# print(f)
try:
hotness_dict = yaml.load(f)
except Exception as e:
print(e)
# print('file loaded:\n\n\n',hotness_dict)
self.perf_hotness[build] = hotness_dict
# print('build hotnes:\n\n\n',self.perf_hotness[build])
f.close()
self.perf_hotness = get_hotness_lines(output_dir,builds)


def render_entry(self, r, odd):
escaped_name = html.escape(r.DemangledFunctionName)
Expand All @@ -212,15 +245,15 @@ def render_entry(self, r, odd):
# perf_hotness = self.perf_hotness_omp
entery = u'''
<tr>
<td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>
<td class=\"column-entry-{odd}\">{r.RelativeHotness}</td>'''.format(**locals())
<td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>'''.format(**locals())

# add perf hotness for each build
for build in self.perf_hotness:
file_name, line_num, column = r.DebugLocString.split(':')
file_and_line = file_name + ':' + line_num
entery_hotness = 0 if file_and_line not in self.perf_hotness[build] else self.perf_hotness[build][
file_and_line]
entery_hotness ="{:.3f}%".format(entery_hotness)
entery += u'''
<td class=\"column-entry-{odd}\">{entery_hotness}</td>'''.format(**locals())

Expand All @@ -243,12 +276,11 @@ def render(self, all_remarks):
<div class="centered">
<table>
<tr>
<td>Source Location</td>
<td>Hotness</td>'''
<td>Source Location</td>'''
# print('header is now: ',header)
for build in self.perf_hotness:
header += u'''
<td>{} PERF Hotness</td>'''.format(build)
<td>{} perf Hotness</td>'''.format(build)
# print('header is now: ',header)
header += u'''<td>Function</td>
<td>Pass</td>
Expand All @@ -269,11 +301,12 @@ def render(self, all_remarks):
</html>''', file=self.stream)


def _render_file(source_dir, output_dir, ctx, no_highlight, entry):
def _render_file(source_dir, output_dir, ctx, no_highlight,builds, entry):
#input('builds inside srcfile render {}'.format(builds))
global context
context = ctx
filename, remarks = entry
SourceFileRenderer(source_dir, output_dir, filename, no_highlight).render(remarks)
SourceFileRenderer(source_dir, output_dir, filename, no_highlight,builds).render(remarks)


def map_remarks(all_remarks):
Expand Down Expand Up @@ -322,7 +355,7 @@ def generate_report(all_remarks,
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"style.css"), output_dir)

_render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight)
_render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight, builds)
if should_print_progress:
print('Rendering HTML files...')
optpmap.pmap(_render_file_bound,
Expand Down

0 comments on commit 4c2a489

Please sign in to comment.