From 4c2a4892232c87ee18da5c8a6cc52789b4d0bfed Mon Sep 17 00:00:00 2001 From: Moustafa Kahla Date: Fri, 11 Jun 2021 12:52:05 -0400 Subject: [PATCH] Add command line option to run experiments with perf Use 'env' as a separate key in YAML file that should have the env variables Add Hotness rows in HTML files of Source Code Removed the Hotness column. Only show Hotness coulmn if we run with perf FIX formatting bug of hotness %. Now it shows up to 3 decimal places --- harness.py | 46 ++++++++++-------- opt-viewer/optviewer.py | 103 ++++++++++++++++++++++++++-------------- 2 files changed, 94 insertions(+), 55 deletions(-) diff --git a/harness.py b/harness.py index 2156b49..cb7a27f 100755 --- a/harness.py +++ b/harness.py @@ -72,9 +72,10 @@ def invoke_optdiff(yaml_file_1, yaml_file_2, filter_only, out_yaml): out_yaml) # output yaml -def run(config, program, reps, dry): +def run(config, program, reps, dry,with_perf): print('Launching program', program, 'with modes', config[program]['build']) - exe = config[program]['run'] + ' ' + config[program]['input'] + perf_command='perf record --freq=100000 -o perf.data' if with_perf else '' + exe = config[program]['env']+' '+perf_command+ ' '+ config[program]['run'] + ' ' + config[program]['input'] os.makedirs('./results', exist_ok=True) results = {program: {}} try: @@ -114,7 +115,7 @@ def run(config, program, reps, dry): # out=str(p.stdout) # err=str(p.stderr) output = out + err - print(err) + print(output) # print('Out', p.stdout.decode('utf-8') ) # print('Err', p.stderr.decode('utf-8') ) with open('%s/stdout-%d.txt' % (bin_dir, i), 'w') as f: @@ -145,12 +146,14 @@ def run(config, program, reps, dry): with open('./results/results-%s.yaml' % (program), 'w') as f: yaml.dump(results, f) - hotlines = get_hot_lines_percentage(config[program]['bin'], bin_dir) - reports_dir = './reports/' + program - lines_hotness_path = os.path.join(reports_dir, '{}.lines_hotness.yaml'.format(mode)) - print('WRITING HOTNESS OF SRC CODE LINES TO:', lines_hotness_path) - with open(lines_hotness_path, 'w') as f: - yaml.dump(hotlines, f) + # if we run with perf, we generate the report + if with_perf: + hotlines = get_hot_lines_percentage(config[program]['bin'], bin_dir) + reports_dir = './reports/' + program + lines_hotness_path = os.path.join(reports_dir, '{}.lines_hotness.yaml'.format(mode)) + print('WRITING HOTNESS OF SRC CODE LINES TO:', lines_hotness_path) + with open(lines_hotness_path, 'w') as f: + yaml.dump(hotlines, f) def show_stats(config, program): @@ -255,9 +258,10 @@ def compile_and_install(config, program, repo_dir, mode): shutil.copy(build_dir + '/' + copy, bin_dir) -def generate_diff_reports(report_dir, builds, mode): +def generate_diff_reports(report_dir, builds, mode, with_perf): out_yaml = report_dir + '%s-%s-%s.opt.yaml' % (builds[0], builds[1], mode) output_html_dir = report_dir + 'html-%s-%s-%s' % (builds[0], builds[1], mode) + build_for_hotness = builds if with_perf else [] def generate_diff_yaml(): print('Creating diff remark YAML files...') @@ -286,7 +290,7 @@ def generate_diff_html(): output_html_dir, 1, True, - builds) + build_for_hotness) print('Done generating compilation report for builds %s|%s mode %s' % (builds[0], builds[1], mode)) except: print('Failed generating compilation report for builds %s|%s mode %s' % (builds[0], builds[1], mode)) @@ -309,18 +313,19 @@ def generate_diff_html(): generate_diff_html() -def generate_remark_reports(config, program): +def generate_remark_reports(config, program, with_perf): report_dir = './reports/' + program + '/' def generate_html(): print('Creating HTML report output for build %s ...' % (build)) + build_for_hotness= [build] if with_perf else [] try: invoke_optviewer( [in_yaml], output_html_dir, 1, True, - [build]) + build_for_hotness) print('Done generating compilation reports!') except: print('Failed generating compilation reports (expects build was ' \ @@ -340,10 +345,10 @@ def generate_html(): # Create repors for 2-combinations of build options. combos = itertools.combinations(config[program]['build'], 2) for builds in combos: - generate_diff_reports(report_dir, builds, 'all') - generate_diff_reports(report_dir, builds, 'analysis') - generate_diff_reports(report_dir, builds, 'missed') - generate_diff_reports(report_dir, builds, 'passed') + generate_diff_reports(report_dir, builds, 'all',with_perf) + generate_diff_reports(report_dir, builds, 'analysis',with_perf) + generate_diff_reports(report_dir, builds, 'missed',with_perf) + generate_diff_reports(report_dir, builds, 'passed',with_perf) def fetch(config, program): @@ -383,7 +388,7 @@ def main(): parser.add_argument('-s', '--stats', dest='stats', action='store_true', help='show run statistics') parser.add_argument('-d', '--dry-run', dest='dry', action='store_true', help='enable dry run') parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose printing') - # parser.add_argument('-pp', '--perf', dest='perf', action='store_true', help='use perf') + parser.add_argument('-pc', '--perf', dest='perf', action='store_true', help='use perf') args = parser.parse_args() with open(args.input, 'r') as f: @@ -397,6 +402,7 @@ def main(): print('args.build', args.build) print('args.run', args.run) print('args.generate', args.generate) + print('args.perf', args.perf) programs = [] if args.programs: @@ -413,9 +419,9 @@ def main(): if args.build: build(config, p) if args.run: - run(config, p, args.run, args.dry) + run(config, p, args.run, args.dry,args.perf) if args.generate: - generate_remark_reports(config, p) + generate_remark_reports(config, p,args.perf) if args.stats: show_stats(config, p) diff --git a/opt-viewer/optviewer.py b/opt-viewer/optviewer.py index fc35ab5..f183fad 100755 --- a/opt-viewer/optviewer.py +++ b/opt-viewer/optviewer.py @@ -27,6 +27,7 @@ import optrecord import yaml +from yaml import CLoader desc = '''Generate HTML output to visualize optimization records from the YAML files generated with -fsave-optimization-record and -fdiagnostics-show-hotness. @@ -51,9 +52,26 @@ def suppress(remark): return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.')) return False +def get_hotness_lines(output_dir,builds): + perf_hotness = {} + #print('available builds renderer', builds) + for build in builds: + perf_hotness_path = os.path.join(output_dir, '..', "{}.lines_hotness.yaml".format(build)) + #print('perf hotness path',perf_hotness_path) + f = open(perf_hotness_path) + #print(f) + try: + hotness_dict = yaml.load(f,Loader=CLoader) + except Exception as e: + print(e) + #print('file loaded:\n\n\n',hotness_dict) + perf_hotness[build] = hotness_dict + #print('build hotnes:\n\n\n',perf_hotness[build]) + f.close() + return perf_hotness class SourceFileRenderer: - def __init__(self, source_dir, output_dir, filename, no_highlight): + def __init__(self, source_dir, output_dir, filename, no_highlight,builds=[]): self.filename = filename existing_filename = None # print('filename', filename) #ggout @@ -79,6 +97,10 @@ def __init__(self, source_dir, output_dir, filename, no_highlight): self.html_formatter = HtmlFormatter(encoding='utf-8') self.cpp_lexer = CppLexer(stripnl=False) + self.builds = builds + # We assume that we comparison is between each pair of builds + self.perf_hotness = get_hotness_lines(output_dir,builds) + #input("builds at srcfile: {}".format(builds)) def render_source_lines(self, stream, line_remarks): file_text = stream.read() @@ -107,13 +129,25 @@ def render_source_lines(self, stream, line_remarks): html_highlighted = html_highlighted.replace('', '') for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1): - print(u''' + html_src_line=u''' + +{linenum} +'''.format(**locals()) + # add place holder for every hotness + for _ in range(len(self.builds)): + html_src_line+=u''' +''' + html_src_line += u''' +
{html_line}
+'''.format(**locals()) + print(html_src_line, file=self.stream) + """print(u''' {linenum}
{html_line}
-'''.format(**locals()), file=self.stream) +'''.format(**locals()), file=self.stream)""" for remark in line_remarks.get(linenum, []): if not suppress(remark): @@ -132,20 +166,28 @@ def render_inline_remarks(self, r, line): indent = line[:max(r.Column, 1) - 1] indent = re.sub('\S', ' ', indent) - print(u''' + entery=u''' - -{r.RelativeHotness} +''' + for build in self.perf_hotness: + file_name, line_num, column = r.DebugLocString.split(':') + file_and_line = file_name + ':' + line_num + entery_hotness = 0 if file_and_line not in self.perf_hotness[build] else self.perf_hotness[build][ + file_and_line] + entery_hotness = "{:.3f}%".format(entery_hotness) + entery += u''' +{entery_hotness}'''.format(**locals()) + entery+=u''' {r.PassWithDiffPrefix}
{indent}
{r.message}  {inlining_context} -'''.format(**locals()), file=self.stream) +'''.format(**locals()) + print(entery, file=self.stream) def render(self, line_remarks): if not self.source_stream: return - - print(''' + header1=u''' {} @@ -157,14 +199,18 @@ def render(self, line_remarks): - -'''.format(os.path.basename(self.filename)), file=self.stream) +''' + print(header1, file=self.stream) self.render_source_lines(self.source_stream, line_remarks) print(''' @@ -184,21 +230,8 @@ def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_in # self.perf_hotness_seq = perf_hotness_seq self.builds = builds # We assume that we comparison is between each pair of builds - self.perf_hotness = {} - print('available builds in index renderer', builds) - for build in self.builds: - perf_hotness_path = os.path.join(output_dir, '..', "{}.lines_hotness.yaml".format(build)) - # print('perf hotness path',perf_hotness_path) - f = open(perf_hotness_path) - # print(f) - try: - hotness_dict = yaml.load(f) - except Exception as e: - print(e) - # print('file loaded:\n\n\n',hotness_dict) - self.perf_hotness[build] = hotness_dict - # print('build hotnes:\n\n\n',self.perf_hotness[build]) - f.close() + self.perf_hotness = get_hotness_lines(output_dir,builds) + def render_entry(self, r, odd): escaped_name = html.escape(r.DemangledFunctionName) @@ -212,8 +245,7 @@ def render_entry(self, r, odd): # perf_hotness = self.perf_hotness_omp entery = u''' - -'''.format(**locals()) +'''.format(**locals()) # add perf hotness for each build for build in self.perf_hotness: @@ -221,6 +253,7 @@ def render_entry(self, r, odd): file_and_line = file_name + ':' + line_num entery_hotness = 0 if file_and_line not in self.perf_hotness[build] else self.perf_hotness[build][ file_and_line] + entery_hotness ="{:.3f}%".format(entery_hotness) entery += u''' '''.format(**locals()) @@ -243,12 +276,11 @@ def render(self, all_remarks):
Line -Hotness +Line'''.format(os.path.basename(self.filename)) + for build in self.perf_hotness: + header1 += u''' +{} Perf Hotness'''.format(build) + header1+=u''' Optimization Source Inline Context
{r.DebugLocString}{r.RelativeHotness}{r.DebugLocString}{entery_hotness}
- -''' +''' # print('header is now: ',header) for build in self.perf_hotness: header += u''' -'''.format(build) +'''.format(build) # print('header is now: ',header) header += u''' @@ -269,11 +301,12 @@ def render(self, all_remarks): ''', file=self.stream) -def _render_file(source_dir, output_dir, ctx, no_highlight, entry): +def _render_file(source_dir, output_dir, ctx, no_highlight,builds, entry): + #input('builds inside srcfile render {}'.format(builds)) global context context = ctx filename, remarks = entry - SourceFileRenderer(source_dir, output_dir, filename, no_highlight).render(remarks) + SourceFileRenderer(source_dir, output_dir, filename, no_highlight,builds).render(remarks) def map_remarks(all_remarks): @@ -322,7 +355,7 @@ def generate_report(all_remarks, shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)), "style.css"), output_dir) - _render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight) + _render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight, builds) if should_print_progress: print('Rendering HTML files...') optpmap.pmap(_render_file_bound,
Source LocationHotnessSource Location{} PERF Hotness{} perf HotnessFunction Pass