Skip to content

Commit

Permalink
uncomment partial keras ut test (intel-analytics#4716)
Browse files Browse the repository at this point in the history
  • Loading branch information
Le-Zheng authored Sep 13, 2021
1 parent 31b12ed commit 78f9238
Show file tree
Hide file tree
Showing 3 changed files with 114 additions and 117 deletions.
19 changes: 10 additions & 9 deletions python/dllib/src/test/bigdl/keras/test_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,15 +203,16 @@ def test_reshape(self):
m = ZModel(i1, s)
# predict should not generate exception
y = m.predict(a, distributed=False)
#uncomment when migrate nn.keras to keras
# def test_regularizer(self):
# model = ZSequential()
# model.add(ZLayer.Dense(16, W_regularizer=regularizers.l2(0.001),
# activation='relu', input_shape=(10000,)))
# model.summary()
# model.compile(optimizer='rmsprop',
# loss='binary_crossentropy',
# metrics=['acc'])

def test_regularizer(self):
model = ZSequential()
model.add(ZLayer.Dense(16, W_regularizer=regularizers.l2(0.001),
activation='relu', input_shape=(10000,)))
model.summary()
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])

# uncomment when Math310 and TimeDistributed fixed
# def test_transformer_forward_backward(self):
# layer = ZLayer.TransformerLayer.init(
Expand Down
9 changes: 4 additions & 5 deletions python/dllib/src/test/bigdl/keras/test_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,11 @@
#
# import keras.layers as KLayer
# from keras.models import Sequential as KSequential
# from test.zoo.pipeline.utils.test_utils import ZooTestCase
# import zoo.pipeline.api.keras.layers as ZLayer
# from zoo.pipeline.api.keras.models import Model as ZModel
# from zoo.pipeline.api.keras.models import Sequential as ZSequential
# from test.bigdl.test_zoo_utils import ZooTestCase
# import bigdl.dllib.keras.layers as ZLayer
# from bigdl.dllib.keras.models import Model as ZModel, Sequential as ZSequential
# from zoo.pipeline.api.net import Net
# from bigdl.nn.layer import Linear, Sigmoid, SoftMax, Model as BModel
# from bigdl.dllib.nn.layer import Linear, Sigmoid, SoftMax, Model as BModel
# from bigdl.util.common import *
# from bigdl.nn.layer import Sequential
#
Expand Down
203 changes: 100 additions & 103 deletions python/dllib/src/test/bigdl/keras/test_simple_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,91 +54,89 @@ def test_graph(self):
np.testing.assert_allclose((10, ), output_shapes[1][1:])
shutil.rmtree(tmp_log_dir)

# todo when predict is uncomment
# def test_training_with_tensorboard_checkpoint_gradientclipping(self):
# model = Sequential()
# model.add(Dense(8, input_shape=(32, 32, )))
# model.add(Flatten())
# model.add(Dense(4, activation="softmax"))
# X_train = np.random.random([200, 32, 32])
# y_train = np.random.randint(4, size=(200, ))
# X_test = np.random.random([40, 32, 32])
# y_test = np.random.randint(4, size=(40, ))
# model.compile(optimizer="adam",
# loss="sparse_categorical_crossentropy",
# metrics=['accuracy'])
# tmp_log_dir = create_tmp_path()
# tmp_checkpoint_path = create_tmp_path()
# os.mkdir(tmp_checkpoint_path)
# model.set_tensorboard(tmp_log_dir, "training_test")
# model.set_checkpoint(tmp_checkpoint_path)
# model.set_constant_gradient_clipping(0.01, 0.03)
# model.fit(X_train, y_train, batch_size=112, nb_epoch=2, validation_data=(X_test, y_test))
# model.clear_gradient_clipping()
# model.fit(X_train, y_train, batch_size=112, nb_epoch=2, validation_data=(X_test, y_test))
# model.set_gradient_clipping_by_l2_norm(0.2)
# model.fit(X_train, y_train, batch_size=112, nb_epoch=2, validation_data=(X_test, y_test))
# train_loss = model.get_train_summary("Loss")
# val_loss = model.get_validation_summary("Loss")
# np.array(train_loss)
# np.array(val_loss)
# eval = model.evaluate(X_test, y_test, batch_size=112)
# result = model.predict(X_test).collect()
# for res in result:
# assert isinstance(res, np.ndarray)
# result2 = model.predict(X_test, distributed=False)
# result_classes = model.predict_classes(X_test)
# shutil.rmtree(tmp_log_dir)
# shutil.rmtree(tmp_checkpoint_path)

# todo when predict is ok
# def test_multiple_outputs_predict(self):
# input = Input(shape=(32, ))
# dense1 = Dense(10)(input)
# dense2 = Dense(12)(input)
# model = Model(input, [dense1, dense2])
# data = np.random.random([10, 32])
# result = model.predict(data).collect()
# for res in result:
# assert isinstance(res, list) and len(res) == 2
# result2 = model.predict(data, distributed=False)
# for res in result2:
# assert isinstance(res, list) and len(res) == 2

# def test_training_without_validation(self):
# model = Sequential()
# model.add(Dense(4, activation="relu", input_shape=(10, )))
# x = np.random.random([300, 10])
# y = np.random.random([300, ])
# model.compile(optimizer="sgd", loss="mae")
# model.fit(x, y, batch_size=112, nb_epoch=2)
# model.predict(x)
#
# def test_training_imageset(self):
# images = []
# labels = []
# for i in range(0, 32):
# features = np.random.uniform(0, 1, (200, 200, 3))
# label = np.array([2])
# images.append(features)
# labels.append(label)
# image_set = DistributedImageSet(self.sc.parallelize(images),
# self.sc.parallelize(labels))
#
# transformer = ChainedPreprocessing(
# [ImageBytesToMat(), ImageResize(256, 256), ImageCenterCrop(224, 224),
# ImageChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
# ImageMatToTensor(), ImageSetToSample(target_keys=['label'])])
# data = image_set.transform(transformer)
#
# model = Sequential()
# model.add(Convolution2D(1, 5, 5, input_shape=(3, 224, 224)))
# model.add(Reshape((1*220*220, )))
# model.add(Dense(20, activation="softmax"))
# model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# model.fit(data, batch_size=8, nb_epoch=2, validation_data=data)
# result = model.predict(data, batch_per_thread=8)
# accuracy = model.evaluate(data, batch_size=8)
def test_training_with_tensorboard_checkpoint_gradientclipping(self):
model = Sequential()
model.add(Dense(8, input_shape=(32, 32, )))
model.add(Flatten())
model.add(Dense(4, activation="softmax"))
X_train = np.random.random([200, 32, 32])
y_train = np.random.randint(4, size=(200, ))
X_test = np.random.random([40, 32, 32])
y_test = np.random.randint(4, size=(40, ))
model.compile(optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
tmp_log_dir = create_tmp_path()
tmp_checkpoint_path = create_tmp_path()
os.mkdir(tmp_checkpoint_path)
model.set_tensorboard(tmp_log_dir, "training_test")
model.set_checkpoint(tmp_checkpoint_path)
model.set_constant_gradient_clipping(0.01, 0.03)
model.fit(X_train, y_train, batch_size=112, nb_epoch=2, validation_data=(X_test, y_test))
model.clear_gradient_clipping()
model.fit(X_train, y_train, batch_size=112, nb_epoch=2, validation_data=(X_test, y_test))
model.set_gradient_clipping_by_l2_norm(0.2)
model.fit(X_train, y_train, batch_size=112, nb_epoch=2, validation_data=(X_test, y_test))
train_loss = model.get_train_summary("Loss")
val_loss = model.get_validation_summary("Loss")
np.array(train_loss)
np.array(val_loss)
eval = model.evaluate(X_test, y_test, batch_size=112)
result = model.predict(X_test).collect()
for res in result:
assert isinstance(res, np.ndarray)
result2 = model.predict(X_test, distributed=False)
result_classes = model.predict_classes(X_test)
shutil.rmtree(tmp_log_dir)
shutil.rmtree(tmp_checkpoint_path)

def test_multiple_outputs_predict(self):
input = Input(shape=(32, ))
dense1 = Dense(10)(input)
dense2 = Dense(12)(input)
model = Model(input, [dense1, dense2])
data = np.random.random([10, 32])
result = model.predict(data).collect()
for res in result:
assert isinstance(res, list) and len(res) == 2
result2 = model.predict(data, distributed=False)
for res in result2:
assert isinstance(res, list) and len(res) == 2

def test_training_without_validation(self):
model = Sequential()
model.add(Dense(4, activation="relu", input_shape=(10, )))
x = np.random.random([300, 10])
y = np.random.random([300, ])
model.compile(optimizer="sgd", loss="mae")
model.fit(x, y, batch_size=112, nb_epoch=2)
model.predict(x)

def test_training_imageset(self):
images = []
labels = []
for i in range(0, 32):
features = np.random.uniform(0, 1, (200, 200, 3))
label = np.array([2])
images.append(features)
labels.append(label)
image_set = DistributedImageSet(self.sc.parallelize(images),
self.sc.parallelize(labels))

transformer = ChainedPreprocessing(
[ImageBytesToMat(), ImageResize(256, 256), ImageCenterCrop(224, 224),
ImageChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
ImageMatToTensor(), ImageSetToSample(target_keys=['label'])])
data = image_set.transform(transformer)

model = Sequential()
model.add(Convolution2D(1, 5, 5, input_shape=(3, 224, 224)))
model.add(Reshape((1*220*220, )))
model.add(Dense(20, activation="softmax"))
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.fit(data, batch_size=8, nb_epoch=2, validation_data=data)
result = model.predict(data, batch_per_thread=8)
accuracy = model.evaluate(data, batch_size=8)

def test_remove_batch(self):
from bigdl.dllib.utils.utils import remove_batch
Expand Down Expand Up @@ -185,24 +183,23 @@ def test_keras_get_layer(self):
# ImageConfigure(
# pre_processor=ChainedPreprocessing([ImageResize(224, 224), ImageResize(224, 224)]))

# uncomment when migrating nn.keras to keras
# def test_model_summary_sequential(self):
# model = Sequential()
# model.add(LSTM(input_shape=(16, 32), output_dim=8, return_sequences=True))
# model.add(Dropout(0.2))
# model.add(LSTM(32, return_sequences=True))
# model.add(Dropout(0.2))
# model.add(LSTM(15, return_sequences=False))
# model.add(Dropout(0.2))
# model.add(Dense(output_dim=1))
# model.summary()
#
# def test_model_summary_graph(self):
# x = Input(shape=(8, ))
# y = Dense(10)(x)
# z = Dense(12)(y)
# model = Model(x, z)
# model.summary()
def test_model_summary_sequential(self):
model = Sequential()
model.add(LSTM(input_shape=(16, 32), output_dim=8, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(32, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(15, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=1))
model.summary()

def test_model_summary_graph(self):
x = Input(shape=(8, ))
y = Dense(10)(x)
z = Dense(12)(y)
model = Model(x, z)
model.summary()

def test_word_embedding_without_word_index(self):
model = Sequential()
Expand Down

0 comments on commit 78f9238

Please sign in to comment.