Skip to content

Commit

Permalink
support tensorflow optimizer in keras model (#1205)
Browse files Browse the repository at this point in the history
* support tensorflow optimizer in keras model

* fix style
  • Loading branch information
yangw1234 authored Mar 11, 2019
1 parent b5fcaeb commit 1fcaaaf
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 39 deletions.
17 changes: 17 additions & 0 deletions pyzoo/test/zoo/tfpark/test_tfpark_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,23 @@ def variable_creator(**kwargs):
optimizer = TFOptimizer.from_keras(model, dataset)
optimizer.optimize()

def test_tensorflow_optimizer(self):
data = tf.keras.layers.Input(shape=[10])

x = tf.keras.layers.Flatten()(data)
x = tf.keras.layers.Dense(10, activation='relu')(x)
predictions = tf.keras.layers.Dense(2, activation='softmax')(x)

model = tf.keras.models.Model(inputs=data, outputs=predictions)
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

keras_model = KerasModel(model)

x, y = self.create_training_data()

keras_model.fit(x, y, batch_size=4, distributed=True)

if __name__ == "__main__":
pytest.main([__file__])
128 changes: 89 additions & 39 deletions pyzoo/zoo/pipeline/api/net.py
Original file line number Diff line number Diff line change
Expand Up @@ -590,47 +590,97 @@ def to_bigdl_optim_method(koptim_method):
import tensorflow.keras.backend as K
import tensorflow.keras.optimizers as koptimizers
import bigdl.optim.optimizer as boptimizer
lr = float(K.eval(koptim_method.lr))
decay = float(K.eval(koptim_method.decay))
if isinstance(koptim_method, koptimizers.Adagrad):
warnings.warn("For Adagrad, we don't support epsilon for now")
return boptimizer.Adagrad(learningrate=lr,
learningrate_decay=decay)
elif isinstance(koptim_method, koptimizers.SGD):
momentum = float(K.eval(koptim_method.momentum))
return boptimizer.SGD(learningrate=lr,
learningrate_decay=decay,
momentum=momentum,
nesterov=koptim_method.nesterov)
elif isinstance(koptim_method, koptimizers.Adam):
beta1 = float(K.eval(koptim_method.beta_1))
beta2 = float(K.eval(koptim_method.beta_2))
return boptimizer.Adam(learningrate=lr,
learningrate_decay=decay,
beta1=beta1,
beta2=beta2,
epsilon=koptim_method.epsilon)
elif isinstance(koptim_method, koptimizers.RMSprop):
rho = float(K.eval(koptim_method.rho))
return boptimizer.RMSprop(learningrate=lr,
import tensorflow.train as tftrain
import tensorflow as tf
from tensorflow.python.keras.optimizers import TFOptimizer

if isinstance(koptim_method, TFOptimizer):
koptim_method = koptim_method.optimizer

if isinstance(koptim_method, koptimizers.Optimizer):
lr = float(K.eval(koptim_method.lr))
decay = float(K.eval(koptim_method.decay))
if isinstance(koptim_method, koptimizers.Adagrad):
warnings.warn("For Adagrad, we don't support epsilon for now")
return boptimizer.Adagrad(learningrate=lr,
learningrate_decay=decay)
elif isinstance(koptim_method, koptimizers.SGD):
momentum = float(K.eval(koptim_method.momentum))
return boptimizer.SGD(learningrate=lr,
learningrate_decay=decay,
decayrate=rho,
epsilon=koptim_method.epsilon)
elif isinstance(koptim_method, koptimizers.Adadelta):
warnings.warn(
"For Adadelta, we don't support learning rate and learning rate decay for now")
return boptimizer.Adadelta(decayrate=koptim_method.rho,
momentum=momentum,
nesterov=koptim_method.nesterov)
elif isinstance(koptim_method, koptimizers.Adam):
beta1 = float(K.eval(koptim_method.beta_1))
beta2 = float(K.eval(koptim_method.beta_2))
return boptimizer.Adam(learningrate=lr,
learningrate_decay=decay,
beta1=beta1,
beta2=beta2,
epsilon=koptim_method.epsilon)
elif isinstance(koptim_method, koptimizers.Adamax):
beta1 = float(K.eval(koptim_method.beta_1))
beta2 = float(K.eval(koptim_method.beta_2))
warnings.warn("For Adamax, we don't support learning rate decay for now")
return boptimizer.Adamax(learningrate=lr,
beta1=beta1,
beta2=beta2,
epsilon=koptim_method.epsilon)
else:
raise Exception("We don't support %s for now" % koptim_method)
elif isinstance(koptim_method, koptimizers.RMSprop):
rho = float(K.eval(koptim_method.rho))
return boptimizer.RMSprop(learningrate=lr,
learningrate_decay=decay,
decayrate=rho,
epsilon=koptim_method.epsilon)
elif isinstance(koptim_method, koptimizers.Adadelta):
warnings.warn(
"For Adadelta, we don't support learning rate and learning rate decay for now")
return boptimizer.Adadelta(decayrate=koptim_method.rho,
epsilon=koptim_method.epsilon)
elif isinstance(koptim_method, koptimizers.Adamax):
beta1 = float(K.eval(koptim_method.beta_1))
beta2 = float(K.eval(koptim_method.beta_2))
warnings.warn("For Adamax, we don't support learning rate decay for now")
return boptimizer.Adamax(learningrate=lr,
beta1=beta1,
beta2=beta2,
epsilon=koptim_method.epsilon)
elif isinstance(koptim_method, tftrain.Optimizer):
def get_value(v):
if isinstance(v, (tf.Tensor, tf.SparseTensor, tf.Variable)):
return float(K.eval(v))
else:
return float(v)
if isinstance(koptim_method, tftrain.GradientDescentOptimizer):
lr = get_value(koptim_method._learning_rate)
return boptimizer.SGD(learningrate=lr)
elif isinstance(koptim_method, tftrain.MomentumOptimizer):
lr = get_value(koptim_method._learning_rate)
momentum = get_value(koptim_method._momentum)
use_nesterov = koptim_method._use_nesterov
return boptimizer.SGD(learningrate=lr, momentum=momentum, nesterov=use_nesterov)
elif isinstance(koptim_method, tftrain.AdagradOptimizer):
lr = get_value(koptim_method._learning_rate)
return boptimizer.Adagrad(learningrate=lr)
elif isinstance(koptim_method, tftrain.AdamOptimizer):
lr = get_value(koptim_method._lr)
beta1 = get_value(koptim_method._beta1)
beta2 = get_value(koptim_method._beta2)
epsilon = get_value(koptim_method._epsilon)
return boptimizer.Adam(learningrate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon)
elif isinstance(koptim_method, tftrain.RMSPropOptimizer):
lr = get_value(koptim_method._learning_rate)
decay = get_value(koptim_method._decay)
momentum = get_value(koptim_method._momentum)
epsilon = get_value(koptim_method._epsilon)
centered = get_value(koptim_method._centered)
if momentum != 0.0 or centered:
warnings.warn(
"For RMSPropOptimizer, we don't support momentum and centered for now")
return boptimizer.RMSprop(learningrate=lr,
learningrate_decay=decay,
epsilon=epsilon)
elif isinstance(koptim_method, tftrain.AdadeltaOptimizer):
lr = get_value(koptim_method._lr)
rho = get_value(koptim_method._rho)
epsilon = get_value(koptim_method._epsilon)
warnings.warn(
"For Adadelta, we don't support learning rate for now")
return boptimizer.Adadelta(decayrate=rho, epsilon=epsilon)

raise ValueError("We don't support %s for now" % koptim_method)

def refresh_weights(self):
from zoo.util.tf import export_tf
Expand Down

0 comments on commit 1fcaaaf

Please sign in to comment.