Skip to content

Commit

Permalink
add callZooFunc and change all callBigDlFunc to callZooFunc (intel-an…
Browse files Browse the repository at this point in the history
  • Loading branch information
qiuxin2012 authored Nov 26, 2019
1 parent fd7cdcd commit f93ddb3
Show file tree
Hide file tree
Showing 19 changed files with 389 additions and 356 deletions.
32 changes: 17 additions & 15 deletions pyspark/bigdl/dllib/estimator/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
# limitations under the License.
#

from bigdl.util.common import JavaValue, callBigDlFunc
from bigdl.util.common import JavaValue
from zoo.common.utils import callZooFunc


class Estimator(JavaValue):
Expand All @@ -24,9 +25,10 @@ class Estimator(JavaValue):
Estimator wraps a model, and provide an uniform training, evaluation or prediction operation on
both local host and distributed spark environment.
"""

def __init__(self, model, optim_methods=None, model_dir=None, jvalue=None, bigdl_type="float"):
self.bigdl_type = bigdl_type
self.value = jvalue if jvalue else callBigDlFunc(
self.value = jvalue if jvalue else callZooFunc(
bigdl_type, self.jvm_class_constructor(), model, optim_methods, model_dir)

def clear_gradient_clipping(self):
Expand All @@ -35,7 +37,7 @@ def clear_gradient_clipping(self):
In order to take effect, it needs to be called before fit.
:return:
"""
callBigDlFunc(self.bigdl_type, "clearGradientClipping")
callZooFunc(self.bigdl_type, "clearGradientClipping")

def set_constant_gradient_clipping(self, min, max):
"""
Expand All @@ -45,7 +47,7 @@ def set_constant_gradient_clipping(self, min, max):
:param max: The maximum value to clip by.
:return:
"""
callBigDlFunc(self.bigdl_type, "setConstantGradientClipping", self.value, min, max)
callZooFunc(self.bigdl_type, "setConstantGradientClipping", self.value, min, max)

def set_l2_norm_gradient_clipping(self, clip_norm):
"""
Expand All @@ -54,7 +56,7 @@ def set_l2_norm_gradient_clipping(self, clip_norm):
:param clip_norm: Gradient L2-Norm threshold.
:return:
"""
callBigDlFunc(self.bigdl_type, "setGradientClippingByL2Norm", self.value, clip_norm)
callZooFunc(self.bigdl_type, "setGradientClippingByL2Norm", self.value, clip_norm)

def train(self, train_set, criterion, end_trigger=None, checkpoint_trigger=None,
validation_set=None, validation_method=None, batch_size=32):
Expand All @@ -73,9 +75,9 @@ def train(self, train_set, criterion, end_trigger=None, checkpoint_trigger=None,
:param batch_size:
:return: Estimator
"""
callBigDlFunc(self.bigdl_type, "estimatorTrain", self.value, train_set,
criterion, end_trigger, checkpoint_trigger, validation_set,
validation_method, batch_size)
callZooFunc(self.bigdl_type, "estimatorTrain", self.value, train_set,
criterion, end_trigger, checkpoint_trigger, validation_set,
validation_method, batch_size)

def train_imagefeature(self, train_set, criterion, end_trigger=None, checkpoint_trigger=None,
validation_set=None, validation_method=None, batch_size=32):
Expand All @@ -94,9 +96,9 @@ def train_imagefeature(self, train_set, criterion, end_trigger=None, checkpoint_
:param batch_size: Batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "estimatorTrainImageFeature", self.value, train_set,
criterion, end_trigger, checkpoint_trigger, validation_set,
validation_method, batch_size)
callZooFunc(self.bigdl_type, "estimatorTrainImageFeature", self.value, train_set,
criterion, end_trigger, checkpoint_trigger, validation_set,
validation_method, batch_size)

def evaluate(self, validation_set, validation_method, batch_size=32):
"""
Expand All @@ -106,8 +108,8 @@ def evaluate(self, validation_set, validation_method, batch_size=32):
:param batch_size: batch size
:return: validation results
"""
callBigDlFunc(self.bigdl_type, "estimatorEvaluate", self.value,
validation_set, validation_method, batch_size)
callZooFunc(self.bigdl_type, "estimatorEvaluate", self.value,
validation_set, validation_method, batch_size)

def evaluate_imagefeature(self, validation_set, validation_method, batch_size=32):
"""
Expand All @@ -117,5 +119,5 @@ def evaluate_imagefeature(self, validation_set, validation_method, batch_size=32
:param batch_size: batch size
:return: validation results
"""
callBigDlFunc(self.bigdl_type, "estimatorEvaluateImageFeature", self.value,
validation_set, validation_method, batch_size)
callZooFunc(self.bigdl_type, "estimatorEvaluateImageFeature", self.value,
validation_set, validation_method, batch_size)
108 changes: 55 additions & 53 deletions pyspark/bigdl/dllib/inference/inference_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
# limitations under the License.
#

from bigdl.util.common import JavaValue, callBigDlFunc
from bigdl.util.common import JavaValue
from zoo.common.utils import callZooFunc
from bigdl.nn.layer import Layer
from zoo.pipeline.api.keras.engine import KerasNet

Expand All @@ -28,6 +29,7 @@ class InferenceModel(JavaValue):
# Arguments
supported_concurrent_num: Int. How many concurrent threads to invoke. Default is 1.
"""

def __init__(self, supported_concurrent_num=1, bigdl_type="float"):
super(InferenceModel, self).__init__(None, bigdl_type, supported_concurrent_num)

Expand All @@ -38,8 +40,8 @@ def load(self, model_path, weight_path=None):
:param model_path: String. The file path to the model.
:param weight_path: String. The file path to the weights if any. Default is None.
"""
callBigDlFunc(self.bigdl_type, "inferenceModelLoad",
self.value, model_path, weight_path)
callZooFunc(self.bigdl_type, "inferenceModelLoad",
self.value, model_path, weight_path)

def load_caffe(self, model_path, weight_path):
"""
Expand All @@ -48,8 +50,8 @@ def load_caffe(self, model_path, weight_path):
:param model_path: String. The file path to the prototxt file.
:param weight_path: String. The file path to the Caffe model.
"""
callBigDlFunc(self.bigdl_type, "inferenceModelLoadCaffe",
self.value, model_path, weight_path)
callZooFunc(self.bigdl_type, "inferenceModelLoadCaffe",
self.value, model_path, weight_path)

def load_openvino(self, model_path, weight_path, batch_size=0):
"""
Expand All @@ -59,8 +61,8 @@ def load_openvino(self, model_path, weight_path, batch_size=0):
:param weight_path: String. The file path to the OpenVINO IR bin file.
:param batch_size: Int. Set batch Size, default is 0 (use default batch size).
"""
callBigDlFunc(self.bigdl_type, "inferenceModelLoadOpenVINO",
self.value, model_path, weight_path, batch_size)
callZooFunc(self.bigdl_type, "inferenceModelLoadOpenVINO",
self.value, model_path, weight_path, batch_size)

def load_tf(self, model_path, backend="tensorflow",
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
Expand Down Expand Up @@ -94,24 +96,24 @@ def load_tf(self, model_path, backend="tensorflow",
"""
backend = backend.lower()
if backend == "tensorflow" or backend == "tf":
callBigDlFunc(self.bigdl_type, "inferenceModelTensorFlowLoadTF",
self.value, model_path, intra_op_parallelism_threads,
inter_op_parallelism_threads, use_per_session_threads)
callZooFunc(self.bigdl_type, "inferenceModelTensorFlowLoadTF",
self.value, model_path, intra_op_parallelism_threads,
inter_op_parallelism_threads, use_per_session_threads)
elif backend == "openvino" or backend == "ov":
if model_type:
if ov_pipeline_config_path:
callBigDlFunc(self.bigdl_type, "inferenceModelOpenVINOLoadTF",
self.value, model_path, model_type, ov_pipeline_config_path, None)
callZooFunc(self.bigdl_type, "inferenceModelOpenVINOLoadTF",
self.value, model_path, model_type, ov_pipeline_config_path, None)
else:
callBigDlFunc(self.bigdl_type, "inferenceModelOpenVINOLoadTF",
self.value, model_path, model_type)
callZooFunc(self.bigdl_type, "inferenceModelOpenVINOLoadTF",
self.value, model_path, model_type)
else:
if ov_pipeline_config_path is None and ov_extensions_config_path is None:
raise Exception("For openvino backend, you must provide either model_type or "
"both pipeline_config_path and extensions_config_path")
callBigDlFunc(self.bigdl_type, "inferenceModelOpenVINOLoadTF",
self.value, model_path, ov_pipeline_config_path,
ov_extensions_config_path)
callZooFunc(self.bigdl_type, "inferenceModelOpenVINOLoadTF",
self.value, model_path, ov_pipeline_config_path,
ov_extensions_config_path)
else:
raise ValueError("Currently only tensorflow and openvino are supported as backend")

Expand All @@ -129,13 +131,13 @@ def load_tf_object_detection_as_openvino(self,
:param extensions_config_path: String, the path of the extensions configure file
:return:
"""
callBigDlFunc(self.bigdl_type,
"inferenceModelOpenVINOLoadTF",
self.value,
model_path,
object_detection_model_type,
pipeline_config_path,
extensions_config_path)
callZooFunc(self.bigdl_type,
"inferenceModelOpenVINOLoadTF",
self.value,
model_path,
object_detection_model_type,
pipeline_config_path,
extensions_config_path)

def load_tf_image_classification_as_openvino(self,
model_path,
Expand All @@ -159,16 +161,16 @@ def load_tf_image_classification_as_openvino(self,
:param scale: Float, the scale value, to be used for the input image per channel.
:return:
"""
callBigDlFunc(self.bigdl_type,
"inferenceModelOpenVINOLoadTF",
self.value,
model_path,
image_classification_model_type,
checkpoint_path,
input_shape,
if_reverse_input_channels,
[float(value) for value in mean_values],
float(scale))
callZooFunc(self.bigdl_type,
"inferenceModelOpenVINOLoadTF",
self.value,
model_path,
image_classification_model_type,
checkpoint_path,
input_shape,
if_reverse_input_channels,
[float(value) for value in mean_values],
float(scale))

def load_tf_as_calibrated_openvino(self,
model_path,
Expand Down Expand Up @@ -207,20 +209,20 @@ def load_tf_as_calibrated_openvino(self,
please also refer to https://github.com/opencv/opencv.
:return:
"""
callBigDlFunc(self.bigdl_type,
"inferenceModelOpenVINOLoadTFAsCalibratedOpenVINO",
self.value,
model_path,
model_type,
checkpoint_path,
input_shape,
if_reverse_input_channels,
[float(value) for value in mean_values],
float(scale),
network_type,
validation_file_path,
subset,
opencv_lib_path)
callZooFunc(self.bigdl_type,
"inferenceModelOpenVINOLoadTFAsCalibratedOpenVINO",
self.value,
model_path,
model_type,
checkpoint_path,
input_shape,
if_reverse_input_channels,
[float(value) for value in mean_values],
float(scale),
network_type,
validation_file_path,
subset,
opencv_lib_path)

def predict(self, inputs):
"""
Expand All @@ -229,9 +231,9 @@ def predict(self, inputs):
:param inputs: A numpy array or a list of numpy arrays or JTensor or a list of JTensors.
"""
jinputs, input_is_table = Layer.check_input(inputs)
output = callBigDlFunc(self.bigdl_type,
"inferenceModelPredict",
self.value,
jinputs,
input_is_table)
output = callZooFunc(self.bigdl_type,
"inferenceModelPredict",
self.value,
jinputs,
input_is_table)
return KerasNet.convert_output(output)
37 changes: 19 additions & 18 deletions pyspark/bigdl/dllib/inference/net/graph_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from zoo.pipeline.api.keras.base import ZooKerasLayer
from zoo.pipeline.api.keras.utils import *
from bigdl.nn.layer import Layer
from zoo.common.utils import callZooFunc

if sys.version >= '3':
long = int
Expand Down Expand Up @@ -50,10 +51,10 @@ def predict(self, x, batch_per_thread=4, distributed=True):
Default is True. In local mode, x must be a Numpy array.
"""
if isinstance(x, ImageSet) or isinstance(x, TextSet):
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
x,
batch_per_thread)
results = callZooFunc(self.bigdl_type, "zooPredict",
self.value,
x,
batch_per_thread)
return ImageSet(results) if isinstance(x, ImageSet) else TextSet(results)
if distributed:
if isinstance(x, np.ndarray):
Expand All @@ -62,29 +63,29 @@ def predict(self, x, batch_per_thread=4, distributed=True):
data_rdd = x
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
data_rdd,
batch_per_thread)
results = callZooFunc(self.bigdl_type, "zooPredict",
self.value,
data_rdd,
batch_per_thread)
return results.map(lambda result: Layer.convert_output(result))
else:
if isinstance(x, np.ndarray) or isinstance(x, list):
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
self._to_jtensors(x),
batch_per_thread)
results = callZooFunc(self.bigdl_type, "zooPredict",
self.value,
self._to_jtensors(x),
batch_per_thread)
return [Layer.convert_output(result) for result in results]
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))

def flattened_layers(self, include_container=False):
jlayers = callBigDlFunc(self.bigdl_type, "getFlattenSubModules", self, include_container)
jlayers = callZooFunc(self.bigdl_type, "getFlattenSubModules", self, include_container)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers

@property
def layers(self):
jlayers = callBigDlFunc(self.bigdl_type, "getSubModules", self)
jlayers = callZooFunc(self.bigdl_type, "getSubModules", self)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers

Expand All @@ -107,7 +108,7 @@ def new_graph(self, outputs):
:param outputs: A list of nodes specified
:return: A graph model
"""
value = callBigDlFunc(self.bigdl_type, "newGraph", self.value, outputs)
value = callZooFunc(self.bigdl_type, "newGraph", self.value, outputs)
return self.from_jvalue(value, self.bigdl_type)

def freeze_up_to(self, names):
Expand All @@ -118,7 +119,7 @@ def freeze_up_to(self, names):
:param names: A list of module names to be Freezed
:return: current graph model
"""
callBigDlFunc(self.bigdl_type, "freezeUpTo", self.value, names)
callZooFunc(self.bigdl_type, "freezeUpTo", self.value, names)

def unfreeze(self, names=None):
"""
Expand All @@ -129,8 +130,8 @@ def unfreeze(self, names=None):
:param names: list of module names to be unFreezed. Default is None.
:return: current graph model
"""
callBigDlFunc(self.bigdl_type, "unFreeze", self.value, names)
callZooFunc(self.bigdl_type, "unFreeze", self.value, names)

def to_keras(self):
value = callBigDlFunc(self.bigdl_type, "netToKeras", self.value)
value = callZooFunc(self.bigdl_type, "netToKeras", self.value)
return ZooKerasLayer.of(value, self.bigdl_type)
Loading

0 comments on commit f93ddb3

Please sign in to comment.