This repository has been archived by the owner on Jul 22, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 163
/
utils.py
159 lines (134 loc) · 6.64 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import argparse
import yaml
import torch
import numpy as np
import time
import random
import math
def pad_with_last_col(matrix,cols):
out = [matrix]
pad = [matrix[:,[-1]]] * (cols - matrix.size(1))
out.extend(pad)
return torch.cat(out,dim=1)
def pad_with_last_val(vect,k):
device = 'cuda' if vect.is_cuda else 'cpu'
pad = torch.ones(k - vect.size(0),
dtype=torch.long,
device = device) * vect[-1]
vect = torch.cat([vect,pad])
return vect
def sparse_prepare_tensor(tensor,torch_size, ignore_batch_dim = True):
if ignore_batch_dim:
tensor = sp_ignore_batch_dim(tensor)
tensor = make_sparse_tensor(tensor,
tensor_type = 'float',
torch_size = torch_size)
return tensor
def sp_ignore_batch_dim(tensor_dict):
tensor_dict['idx'] = tensor_dict['idx'][0]
tensor_dict['vals'] = tensor_dict['vals'][0]
return tensor_dict
def aggregate_by_time(time_vector,time_win_aggr):
time_vector = time_vector - time_vector.min()
time_vector = time_vector // time_win_aggr
return time_vector
def sort_by_time(data,time_col):
_, sort = torch.sort(data[:,time_col])
data = data[sort]
return data
def print_sp_tensor(sp_tensor,size):
print(torch.sparse.FloatTensor(sp_tensor['idx'].t(),sp_tensor['vals'],torch.Size([size,size])).to_dense())
def reset_param(t):
stdv = 2. / math.sqrt(t.size(0))
t.data.uniform_(-stdv,stdv)
def make_sparse_tensor(adj,tensor_type,torch_size):
if len(torch_size) == 2:
tensor_size = torch.Size(torch_size)
elif len(torch_size) == 1:
tensor_size = torch.Size(torch_size*2)
if tensor_type == 'float':
test = torch.sparse.FloatTensor(adj['idx'].t(),
adj['vals'].type(torch.float),
tensor_size)
return torch.sparse.FloatTensor(adj['idx'].t(),
adj['vals'].type(torch.float),
tensor_size)
elif tensor_type == 'long':
return torch.sparse.LongTensor(adj['idx'].t(),
adj['vals'].type(torch.long),
tensor_size)
else:
raise NotImplementedError('only make floats or long sparse tensors')
def sp_to_dict(sp_tensor):
return {'idx': sp_tensor._indices().t(),
'vals': sp_tensor._values()}
class Namespace(object):
'''
helps referencing object in a dictionary as dict.key instead of dict['key']
'''
def __init__(self, adict):
self.__dict__.update(adict)
def set_seeds(rank):
seed = int(time.time())+rank
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def random_param_value(param, param_min, param_max, type='int'):
if str(param) is None or str(param).lower()=='none':
if type=='int':
return random.randrange(param_min, param_max+1)
elif type=='logscale':
interval=np.logspace(np.log10(param_min), np.log10(param_max), num=100)
return np.random.choice(interval,1)[0]
else:
return random.uniform(param_min, param_max)
else:
return param
def load_data(file):
with open(file) as file:
file = file.read().splitlines()
data = torch.tensor([[float(r) for r in row.split(',')] for row in file[1:]])
return data
def load_data_from_tar(file, tar_archive, replace_unknow=False, starting_line=1, sep=',', type_fn = float, tensor_const = torch.DoubleTensor):
f = tar_archive.extractfile(file)
lines = f.read()#
lines=lines.decode('utf-8')
if replace_unknow:
lines=lines.replace('unknow', '-1')
lines=lines.replace('-1n', '-1')
lines=lines.splitlines()
data = [[type_fn(r) for r in row.split(sep)] for row in lines[starting_line:]]
data = tensor_const(data)
#print (file,'data size', data.size())
return data
def create_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config_file',default='experiments/parameters_example.yaml', type=argparse.FileType(mode='r'), help='optional, yaml file containing parameters to be used, overrides command line parameters')
return parser
def parse_args(parser):
args = parser.parse_args()
if args.config_file:
data = yaml.load(args.config_file)
delattr(args, 'config_file')
# print(data)
arg_dict = args.__dict__
for key, value in data.items():
arg_dict[key] = value
args.learning_rate =random_param_value(args.learning_rate, args.learning_rate_min, args.learning_rate_max, type='logscale')
# args.adj_mat_time_window = random_param_value(args.adj_mat_time_window, args.adj_mat_time_window_min, args.adj_mat_time_window_max, type='int')
args.num_hist_steps = random_param_value(args.num_hist_steps, args.num_hist_steps_min, args.num_hist_steps_max, type='int')
args.gcn_parameters['feats_per_node'] =random_param_value(args.gcn_parameters['feats_per_node'], args.gcn_parameters['feats_per_node_min'], args.gcn_parameters['feats_per_node_max'], type='int')
args.gcn_parameters['layer_1_feats'] =random_param_value(args.gcn_parameters['layer_1_feats'], args.gcn_parameters['layer_1_feats_min'], args.gcn_parameters['layer_1_feats_max'], type='int')
if args.gcn_parameters['layer_2_feats_same_as_l1'] or args.gcn_parameters['layer_2_feats_same_as_l1'].lower()=='true':
args.gcn_parameters['layer_2_feats'] = args.gcn_parameters['layer_1_feats']
else:
args.gcn_parameters['layer_2_feats'] =random_param_value(args.gcn_parameters['layer_2_feats'], args.gcn_parameters['layer_1_feats_min'], args.gcn_parameters['layer_1_feats_max'], type='int')
args.gcn_parameters['lstm_l1_feats'] =random_param_value(args.gcn_parameters['lstm_l1_feats'], args.gcn_parameters['lstm_l1_feats_min'], args.gcn_parameters['lstm_l1_feats_max'], type='int')
if args.gcn_parameters['lstm_l2_feats_same_as_l1'] or args.gcn_parameters['lstm_l2_feats_same_as_l1'].lower()=='true':
args.gcn_parameters['lstm_l2_feats'] = args.gcn_parameters['lstm_l1_feats']
else:
args.gcn_parameters['lstm_l2_feats'] =random_param_value(args.gcn_parameters['lstm_l2_feats'], args.gcn_parameters['lstm_l1_feats_min'], args.gcn_parameters['lstm_l1_feats_max'], type='int')
args.gcn_parameters['cls_feats'] =random_param_value(args.gcn_parameters['cls_feats'], args.gcn_parameters['cls_feats_min'], args.gcn_parameters['cls_feats_max'], type='int')
return args