-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_keras_VGG.py
115 lines (75 loc) · 3.34 KB
/
test_keras_VGG.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# Testing workflow on a sample
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import pickle
from sklearn.metrics import f1_score
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
# Load train set and normalizing
with open('/home/allen/hackerEarth/DL2/train_ip.dat','rb') as f:
X_train = pickle.load(f)
# X_train =X_train / 255 fails due to it being very large
# input needs to be a 4D tensor
X_train=X_train[...,np.newaxis]
with open('/home/allen/hackerEarth/DL2/train_op.dat','rb') as f:
Y_train = pickle.load(f)
# WTF would yould normalize this!!!!Y_train /=255
# Splitting training set
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, \
test_size=0.15, shuffle=True)
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train, \
test_size=0.15, shuffle=True)
# Data generator
datagen = ImageDataGenerator()
# VGG
model=Sequential()
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, \
scale=True, beta_initializer='zeros', gamma_initializer='ones', \
moving_mean_initializer='zeros', moving_variance_initializer='ones', \
beta_regularizer=None, gamma_regularizer=None, \
beta_constraint=None, gamma_constraint=None)
model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(256, 256, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
model.add(Dense(1000, activation='relu'))
#model.add(Dropout(0.5))
# last layer has 14 classes=shape of Y_train
model.add(Dense(Y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['acc'])
model.summary()
with tf.device('/gpu:0'):
#model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32), validation_data=(X_valid,Y_valid),\
# epochs=10, verbose=1)
model.fit(X_train, Y_train, validation_data=(X_valid,Y_valid),batch_size=32, \
epochs=10, verbose=1)
score = model.evaluate(X_test, Y_test)
result = model.predict(X_test)
f1_score(Y_test, result, average='weighted')
model.save('xrayModel.h5')
# Comparing result