diff --git a/examples/mnist_cnn.py b/examples/mnist_cnn.py index 25aa07b6a58..22a95048364 100644 --- a/examples/mnist_cnn.py +++ b/examples/mnist_cnn.py @@ -6,12 +6,12 @@ ''' from __future__ import print_function -import keras -from keras.datasets import mnist -from keras.models import Sequential -from keras.layers import Dense, Dropout, Flatten -from keras.layers import Conv2D, MaxPooling2D -from keras import backend as K +import tensorflow.keras +from tensorflow.keras.datasets import mnist +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense, Dropout, Flatten +from tensorflow.keras.layers import Conv2D, MaxPooling2D +from tensorflow.keras import backend as K batch_size = 128 num_classes = 10 @@ -41,8 +41,8 @@ print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices -y_train = keras.utils.to_categorical(y_train, num_classes) -y_test = keras.utils.to_categorical(y_test, num_classes) +y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) +y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), @@ -56,8 +56,8 @@ model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) -model.compile(loss=keras.losses.categorical_crossentropy, - optimizer=keras.optimizers.Adadelta(), +model.compile(loss=tensorflow.keras.losses.categorical_crossentropy, + optimizer=tensorflow.keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train,