-
Notifications
You must be signed in to change notification settings - Fork 0
/
make_profiles.py
68 lines (58 loc) · 2.95 KB
/
make_profiles.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import numpy as np
import os
import multiprocessing
from argparse import ArgumentParser
from utils import *
from definitions import *
class Kind(Enum):
DFO = 'dfo'
BATCHSIZE = 'batchsize'
EPSILON = 'epsilon'
def __str__(self):
return self.value
parser = ArgumentParser()
parser.add_argument('--threads', type=int, default=1, help='number of processes to run concurrently')
parser.add_argument('--type', type=Kind, default='batchsize', choices=list(Kind))
parser.add_argument('--tau_max', type=int, default=100, help='maximum x axis value for a performance profile')
args= parser.parse_args()
prefix = './figures/'
if args.type is Kind.DFO:
algs = np.asarray([aeb, bab, tbab, nm, bfgs_cheap])
probs_files = np.asarray(['problems/dfo.txt'])
objective = Objective.DFO
elif args.type is Kind.BATCHSIZE:
algs = np.asarray([ae, ba, tba, wolfe_cheap, co, inv])
probs_files = np.asarray(['problems/logistic_sgd_batchsize_None.txt',
'problems/logistic_sgd_batchsize_1600.txt',
'problems/logistic_sgd_batchsize_400.txt',
'problems/logistic_sgd_batchsize_100.txt',
'problems/logistic_sgd_batchsize_25.txt',
'problems/logistic_sgd_batchsize_6.txt',
'problems/logistic_sgd_batchsize_1.txt'])
objective = Objective.LOGISTIC
elif args.type is Kind.EPSILON:
algs = np.asarray([ae, ba, tba, wolfe_cheap, co, inv])
probs_files = np.asarray(['problems/logistic_sgd_epsilon_pt001.txt',
'problems/logistic_sgd_epsilon_pt0001.txt',
'problems/logistic_sgd_epsilon_pt00001.txt',
'problems/logistic_sgd_epsilon_pt000001.txt'])
objective = Objective.LOGISTIC
def perf_func(functions, algs, filename, fullsize, legend):
perf_prof(functions, algs, filename=filename, fullsize=fullsize, legend=legend, tau_max=args.tau_max)
def data_func(functions, algs, filename, fullsize, legend):
data_prof(functions, algs, filename=filename, fullsize=fullsize, legend=legend)
pool_size = multiprocessing.cpu_count()
os.system('taskset -cp 0-%d %s' % (pool_size, os.getpid()))
pool = multiprocessing.Pool(args.threads)
for probs_file in probs_files:
functions = get_functions(objective, probs_file)
if probs_file == probs_files[0] and args.type is not Kind.EPSILON:
pool.apply_async(perf_func, args=(functions, algs, prefix + probs_file[0:-4] + '.pdf', True, True))
if args.type is Kind.DFO:
pool.apply_async(data_func, args=(functions, algs, prefix + probs_file[0:-4] + '_data.pdf', True, True))
elif probs_file == probs_files[-1]:
pool.apply_async(perf_func, args=(functions, algs, prefix + probs_file[0:-4] + '.pdf', False, True))
else:
pool.apply_async(perf_func, args=(functions, algs, prefix + probs_file[0:-4] + '.pdf', False, False))
pool.close()
pool.join()