-
Notifications
You must be signed in to change notification settings - Fork 56
/
utils.py
305 lines (243 loc) · 10.8 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
import torch
from torch import nn
import numpy as np
from collections import Counter
from nltk.tokenize import PunktSentenceTokenizer, TreebankWordTokenizer
from tqdm import tqdm
import pandas as pd
import itertools
import os
import json
import gensim
import logging
classes = ['Society & Culture',
'Science & Mathematics',
'Health',
'Education & Reference',
'Computers & Internet',
'Sports',
'Business & Finance',
'Entertainment & Music',
'Family & Relationships',
'Politics & Government']
label_map = {k: v for v, k in enumerate(classes)}
rev_label_map = {v: k for k, v in label_map.items()}
# Tokenizers
sent_tokenizer = PunktSentenceTokenizer()
word_tokenizer = TreebankWordTokenizer()
def preprocess(text):
"""
Pre-process text for use in the model. This includes lower-casing, standardizing newlines, removing junk.
:param text: a string
:return: cleaner string
"""
if isinstance(text, float):
return ''
return text.lower().replace('<br />', '\n').replace('<br>', '\n').replace('\\n', '\n').replace('
', '\n')
def read_csv(csv_folder, split, sentence_limit, word_limit):
"""
Read CSVs containing raw training data, clean documents and labels, and do a word-count.
:param csv_folder: folder containing the CSV
:param split: train or test CSV?
:param sentence_limit: truncate long documents to these many sentences
:param word_limit: truncate long sentences to these many words
:return: documents, labels, a word-count
"""
assert split in {'train', 'test'}
docs = []
labels = []
word_counter = Counter()
data = pd.read_csv(os.path.join(csv_folder, split + '.csv'), header=None)
for i in tqdm(range(data.shape[0])):
row = list(data.loc[i, :])
sentences = list()
for text in row[1:]:
for paragraph in preprocess(text).splitlines():
sentences.extend([s for s in sent_tokenizer.tokenize(paragraph)])
words = list()
for s in sentences[:sentence_limit]:
w = word_tokenizer.tokenize(s)[:word_limit]
# If sentence is empty (due to removing punctuation, digits, etc.)
if len(w) == 0:
continue
words.append(w)
word_counter.update(w)
# If all sentences were empty
if len(words) == 0:
continue
labels.append(int(row[0]) - 1) # since labels are 1-indexed in the CSV
docs.append(words)
return docs, labels, word_counter
def create_input_files(csv_folder, output_folder, sentence_limit, word_limit, min_word_count=5,
save_word2vec_data=True):
"""
Create data files to be used for training the model.
:param csv_folder: folder where the CSVs with the raw data are located
:param output_folder: folder where files must be created
:param sentence_limit: truncate long documents to these many sentences
:param word_limit: truncate long sentences to these many words
:param min_word_count: discard rare words which occur fewer times than this number
:param save_word2vec_data: whether to save the data required for training word2vec embeddings
"""
# Read training data
print('\nReading and preprocessing training data...\n')
train_docs, train_labels, word_counter = read_csv(csv_folder, 'train', sentence_limit, word_limit)
# Save text data for word2vec
if save_word2vec_data:
torch.save(train_docs, os.path.join(output_folder, 'word2vec_data.pth.tar'))
print('\nText data for word2vec saved to %s.\n' % os.path.abspath(output_folder))
# Create word map
word_map = dict()
word_map['<pad>'] = 0
for word, count in word_counter.items():
if count >= min_word_count:
word_map[word] = len(word_map)
word_map['<unk>'] = len(word_map)
print('\nDiscarding words with counts less than %d, the size of the vocabulary is %d.\n' % (
min_word_count, len(word_map)))
with open(os.path.join(output_folder, 'word_map.json'), 'w') as j:
json.dump(word_map, j)
print('Word map saved to %s.\n' % os.path.abspath(output_folder))
# Encode and pad
print('Encoding and padding training data...\n')
encoded_train_docs = list(map(lambda doc: list(
map(lambda s: list(map(lambda w: word_map.get(w, word_map['<unk>']), s)) + [0] * (word_limit - len(s)),
doc)) + [[0] * word_limit] * (sentence_limit - len(doc)), train_docs))
sentences_per_train_document = list(map(lambda doc: len(doc), train_docs))
words_per_train_sentence = list(
map(lambda doc: list(map(lambda s: len(s), doc)) + [0] * (sentence_limit - len(doc)), train_docs))
# Save
print('Saving...\n')
assert len(encoded_train_docs) == len(train_labels) == len(sentences_per_train_document) == len(
words_per_train_sentence)
# Because of the large data, saving as a JSON can be very slow
torch.save({'docs': encoded_train_docs,
'labels': train_labels,
'sentences_per_document': sentences_per_train_document,
'words_per_sentence': words_per_train_sentence},
os.path.join(output_folder, 'TRAIN_data.pth.tar'))
print('Encoded, padded training data saved to %s.\n' % os.path.abspath(output_folder))
# Free some memory
del train_docs, encoded_train_docs, train_labels, sentences_per_train_document, words_per_train_sentence
# Read test data
print('Reading and preprocessing test data...\n')
test_docs, test_labels, _ = read_csv(csv_folder, 'test', sentence_limit, word_limit)
# Encode and pad
print('\nEncoding and padding test data...\n')
encoded_test_docs = list(map(lambda doc: list(
map(lambda s: list(map(lambda w: word_map.get(w, word_map['<unk>']), s)) + [0] * (word_limit - len(s)),
doc)) + [[0] * word_limit] * (sentence_limit - len(doc)), test_docs))
sentences_per_test_document = list(map(lambda doc: len(doc), test_docs))
words_per_test_sentence = list(
map(lambda doc: list(map(lambda s: len(s), doc)) + [0] * (sentence_limit - len(doc)), test_docs))
# Save
print('Saving...\n')
assert len(encoded_test_docs) == len(test_labels) == len(sentences_per_test_document) == len(
words_per_test_sentence)
torch.save({'docs': encoded_test_docs,
'labels': test_labels,
'sentences_per_document': sentences_per_test_document,
'words_per_sentence': words_per_test_sentence},
os.path.join(output_folder, 'TEST_data.pth.tar'))
print('Encoded, padded test data saved to %s.\n' % os.path.abspath(output_folder))
print('All done!\n')
def train_word2vec_model(data_folder, algorithm='skipgram'):
"""
Train a word2vec model for word embeddings.
See the paper by Mikolov et. al. for details - https://arxiv.org/pdf/1310.4546.pdf
:param data_folder: folder with the word2vec training data
:param algorithm: use the Skip-gram or Continous Bag Of Words (CBOW) algorithm?
"""
assert algorithm in ['skipgram', 'cbow']
sg = 1 if algorithm is 'skipgram' else 0
# Read data
sentences = torch.load(os.path.join(data_folder, 'word2vec_data.pth.tar'))
sentences = list(itertools.chain.from_iterable(sentences))
# Activate logging for verbose training
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Initialize and train the model (this will take some time)
model = gensim.models.word2vec.Word2Vec(sentences=sentences, size=200, workers=8, window=10, min_count=5,
sg=sg)
# Normalize vectors and save model
model.init_sims(True)
model.wv.save(os.path.join(data_folder, 'word2vec_model'))
def init_embedding(input_embedding):
"""
Initialize embedding tensor with values from the uniform distribution.
:param input_embedding: embedding tensor
"""
bias = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform_(input_embedding, -bias, bias)
def load_word2vec_embeddings(word2vec_file, word_map):
"""
Load pre-trained embeddings for words in the word map.
:param word2vec_file: location of the trained word2vec model
:param word_map: word map
:return: embeddings for words in the word map, embedding size
"""
# Load word2vec model into memory
w2v = gensim.models.KeyedVectors.load(word2vec_file, mmap='r')
print("\nEmbedding length is %d.\n" % w2v.vector_size)
# Create tensor to hold embeddings for words that are in-corpus
embeddings = torch.FloatTensor(len(word_map), w2v.vector_size)
init_embedding(embeddings)
# Read embedding file
print("Loading embeddings...")
for word in word_map:
if word in w2v.vocab:
embeddings[word_map[word]] = torch.FloatTensor(w2v[word])
print("Done.\n Embedding vocabulary: %d.\n" % len(word_map))
return embeddings, w2v.vector_size
def clip_gradient(optimizer, grad_clip):
"""
Clip gradients computed during backpropagation to prevent gradient explosion.
:param optimizer: optimized with the gradients to be clipped
:param grad_clip: gradient clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(epoch, model, optimizer, word_map):
"""
Save model checkpoint.
:param epoch: epoch number
:param model: model
:param optimizer: optimizer
:param best_acc: best accuracy achieved so far (not necessarily in this checkpoint)
:param word_map: word map
:param epochs_since_improvement: number of epochs since last improvement
:param is_best: is this checkpoint the best so far?
"""
state = {'epoch': epoch,
'model': model,
'optimizer': optimizer,
'word_map': word_map}
filename = 'checkpoint_han.pth.tar'
torch.save(state, filename)
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, scale_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rates must be decayed
:param scale_factor: factor to scale by
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))