-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdata_processing.py
95 lines (85 loc) · 4.55 KB
/
data_processing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
"""
Make train, val, test datasets based on train_test_split.txt, and by sampling val_ratio of the official train data to make a validation set
Each dataset is a list of metadata, each includes official image id, full image path, class label, attribute labels, attribute certainty scores, and attribute labels calibrated for uncertainty
"""
import argparse
import os
import pickle
import random
from collections import defaultdict as ddict
from os import listdir
from os.path import isfile, isdir, join
def extract_data(data_dir):
cwd = os.getcwd()
data_path = join(cwd, data_dir)
data_path = join(data_path, "images")
val_ratio = 0.2
path_to_id_map = dict() # map from full image path to image id
with open(data_path.replace('images', 'images.txt'), 'r') as f:
for line in f:
items = line.strip().split()
sub_items= items[1].split("/")
path_to_id_map[join(data_path,os.path.join(sub_items[0],sub_items[1]))] = int(items[0])
attribute_labels_all = ddict(list) # map from image id to a list of attribute labels
attribute_certainties_all = ddict(list) # map from image id to a list of attribute certainties
attribute_uncertain_labels_all = ddict(
list) # map from image id to a list of attribute labels calibrated for uncertainty
# 1 = not visible, 2 = guessing, 3 = probably, 4 = definitely
uncertainty_map = {1: {1: 0, 2: 0.5, 3: 0.75, 4: 1}, # calibrate main label based on uncertainty label
0: {1: 0, 2: 0.5, 3: 0.25, 4: 0}}
with open(join(cwd, join(data_dir, join('attributes', "image_attribute_labels.txt"))), 'r') as f:
for line in f:
file_idx, attribute_idx, attribute_label, attribute_certainty = line.strip().split()[:4]
attribute_label = int(attribute_label)
attribute_certainty = int(attribute_certainty)
uncertain_label = uncertainty_map[attribute_label][attribute_certainty]
attribute_labels_all[int(file_idx)].append(attribute_label)
attribute_uncertain_labels_all[int(file_idx)].append(uncertain_label)
attribute_certainties_all[int(file_idx)].append(attribute_certainty)
is_train_test = dict() # map from image id to 0 / 1 (1 = train)
with open(join(cwd, join(data_dir, "train_test_split.txt")), 'r') as f:
for line in f:
idx, is_train = line.strip().split()
is_train_test[int(idx)] = int(is_train)
print("Number of train images from official train test split:", sum(list(is_train_test.values())))
train_val_data, test_data = [], []
train_data, val_data = [], []
folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
folder_list.sort() # sort by class index
for i, folder in enumerate(folder_list):
folder_path = join(data_path, folder)
classfile_list = [cf for cf in listdir(folder_path) if (isfile(join(folder_path, cf)) and cf[0] != '.')]
# classfile_list.sort()
for cf in classfile_list:
img_id = path_to_id_map[join(folder_path, cf)]
img_path = join(folder_path, cf)
metadata = {'id': img_id, 'img_path': img_path, 'class_label': i,
'attribute_label': attribute_labels_all[img_id],
'attribute_certainty': attribute_certainties_all[img_id],
'uncertain_attribute_label': attribute_uncertain_labels_all[img_id]}
if is_train_test[img_id]:
train_val_data.append(metadata)
else:
test_data.append(metadata)
random.shuffle(train_val_data)
split = int(val_ratio * len(train_val_data))
train_data = train_val_data[split:]
val_data = train_val_data[: split]
print('Size of train set:', len(train_data))
return train_data, val_data,test_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Dataset preparation')
parser.add_argument('-save_dir', '-d', default="data/", help='Where to save the new datasets')
parser.add_argument('-data_dir', default=os.path.join("data", "CUB_200_2011"), help='Where to load the datasets')
args = parser.parse_args()
train_data, val_data, test_data = extract_data(args.data_dir)
for dataset in ['train', 'val', 'test']:
print("Processing %s set" % dataset)
f = open(args.save_dir + dataset + '.pkl', 'wb')
if 'train' in dataset:
pickle.dump(train_data, f)
elif 'val' in dataset:
pickle.dump(val_data, f)
else:
pickle.dump(test_data, f)
f.close()