diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/movement/scatter-nd-update-15.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/movement/scatter-nd-update-15.rst
index 283779d6368caa..794fe707695ff8 100644
--- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/movement/scatter-nd-update-15.rst
+++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/movement/scatter-nd-update-15.rst
@@ -16,7 +16,7 @@ ScatterNDUpdate
**Detailed description**: The operation produces a copy of ``data`` tensor and updates its value using logic from ``reduction`` attribute, using values specified
by ``updates`` at specific index positions specified by ``indices``. The output shape is the same as the shape of ``data``.
-Input ``indices`` can contain duplicated index values, however, in case when *reduction* is set to ``none``, only last update for given duplicated index is used.
+If multiple indices point to the same output location then the order of updating the values is undefined.
The last dimension of ``indices`` corresponds to indices into elements if ``indices.shape[-1]`` = ``data.shape.rank`` or slices
if ``indices.shape[-1]`` < ``data.shape.rank``.
@@ -41,8 +41,9 @@ Operator ScatterNDUpdate-15 is an equivalent to following NumPy snippet:
elif reduction == "min":
func = min
out = np.copy(data)
+ # Order of loop iteration is undefined.
for ndidx in np.ndindex(indices.shape[:-1]):
- out[indices[ndidx]] = func(out[indices[ndidx]], updates[ndidx])
+ out[tuple(indices[ndidx])] = func(tuple(out[indices[ndidx]]), updates[ndidx])
return out
Example 1 that shows simple case of update with *reduction* set to ``none``.:
@@ -52,7 +53,7 @@ Example 1 that shows simple case of update with *reduction* set to ``none``.:
data = [1, 2, 3, 4, 5, 6, 7, 8]
indices = [[4], [3], [1], [7], [-2], [-4]]
updates = [9, 10, 11, 12, 13, 14]
- output = [1, 11, 3, 10, 14, 6, 13, 12]
+ output = [1, 11, 3, 10, 4, 6, 13, 12]
Example that shows update of two slices of ``4x4`` shape in ``data``, with *reduction* set to ``none``:
@@ -87,7 +88,7 @@ Example that shows update of two slices of ``4x4`` shape in ``data``, with *redu
* **1**: ``data`` tensor of arbitrary rank ``r`` >= 1 and of type *T*. **Required.**
-* **2**: ``indices`` tensor with indices of arbitrary rank ``q`` >= 1 and of type *T_IND*. All index values ``i_j`` in index entry ``(i_0, i_1, ...,i_k)`` (where ``k = indices.shape[-1]``) must be within bounds ``[-s_j, s_j - 1]`` where ``s_j = data.shape[j]``. ``k`` must be at most ``r``. If multiple indices point to the same output location then values will be updated in order of their occurrence. Negative value of index means reverse indexing and will be normalized to value ``len(data.shape[j] + index)``. If an index points to non-existing element then exception is raised. **Required.**
+* **2**: ``indices`` tensor with indices of arbitrary rank ``q`` >= 1 and of type *T_IND*. All index values ``i_j`` in index entry ``(i_0, i_1, ...,i_k)`` (where ``k = indices.shape[-1]``) must be within bounds ``[-s_j, s_j - 1]`` where ``s_j = data.shape[j]``. ``k`` must be at most ``r``. If multiple indices point to the same output location then the order of updating the values is undefined. Negative value of index means reverse indexing and will be normalized to value ``len(data.shape[j] + index)``. If an index points to non-existing element then exception is raised. **Required.**
* **3**: ``updates`` tensor of rank ``r - indices.shape[-1] + q - 1`` of type *T*. If expected ``updates`` rank is 0D it can be a tensor with single element. **Required.**
@@ -121,7 +122,7 @@ Example that shows update of two slices of ``4x4`` shape in ``data``, with *redu
diff --git a/src/bindings/js/node/include/core_wrap.hpp b/src/bindings/js/node/include/core_wrap.hpp
index 169812234f901a..29149aaf157ea8 100644
--- a/src/bindings/js/node/include/core_wrap.hpp
+++ b/src/bindings/js/node/include/core_wrap.hpp
@@ -91,6 +91,9 @@ class CoreWrap : public Napi::ObjectWrap {
/** @brief Imports a compiled model from the previously exported one. */
Napi::Value import_model(const Napi::CallbackInfo& info);
+ /** @brief Implements Core.importModel() defined in ../lib/addon.ts. */
+ Napi::Value import_model_async(const Napi::CallbackInfo& info);
+
/** @brief Returns devices available for inference. */
Napi::Value get_available_devices(const Napi::CallbackInfo& info);
@@ -99,6 +102,7 @@ class CoreWrap : public Napi::ObjectWrap {
private:
ov::Core _core;
+ std::mutex _mutex;
};
struct TsfnContextModel {
@@ -127,6 +131,20 @@ struct TsfnContextPath {
std::map _config = {};
};
+struct ImportModelContext {
+ ImportModelContext(Napi::Env env, ov::Core& core) : deferred(Napi::Promise::Deferred::New(env)), _core{core} {};
+ std::thread nativeThread;
+
+ Napi::Promise::Deferred deferred;
+ Napi::ThreadSafeFunction tsfn;
+
+ std::stringstream _stream;
+ std::string _device;
+ std::map _config = {};
+ ov::Core& _core;
+ ov::CompiledModel _compiled_model;
+};
+
void FinalizerCallbackModel(Napi::Env env, void* finalizeData, TsfnContextModel* context);
void FinalizerCallbackPath(Napi::Env env, void* finalizeData, TsfnContextPath* context);
void compileModelThreadModel(TsfnContextModel* context);
diff --git a/src/bindings/js/node/include/helper.hpp b/src/bindings/js/node/include/helper.hpp
index 15132cbe5455f4..201dd3703ad3f0 100644
--- a/src/bindings/js/node/include/helper.hpp
+++ b/src/bindings/js/node/include/helper.hpp
@@ -105,9 +105,17 @@ Napi::Array cpp_to_js(const Napi::CallbackInfo& i
template <>
Napi::Array cpp_to_js(const Napi::CallbackInfo& info, const ov::Dimension dim);
+/**
+ * @brief Creates JavaScript Model and wraps ov::Model inside of it.
+ * @return Javascript Model as Napi::Object. (Not ModelWrap object)
+ */
+Napi::Object cpp_to_js(const Napi::Env& env, std::shared_ptr model);
+
template <>
Napi::Boolean cpp_to_js(const Napi::CallbackInfo& info, const bool value);
+Napi::Object cpp_to_js(const Napi::Env& env, const ov::CompiledModel& compiled_model);
+
/** @brief Takes Napi::Value and parse Napi::Array or Napi::Object to ov::TensorVector. */
ov::TensorVector parse_input_data(const Napi::Value& input);
diff --git a/src/bindings/js/node/include/model_wrap.hpp b/src/bindings/js/node/include/model_wrap.hpp
index 38fe9835d9378d..cda9ff8b6ee65a 100644
--- a/src/bindings/js/node/include/model_wrap.hpp
+++ b/src/bindings/js/node/include/model_wrap.hpp
@@ -23,13 +23,6 @@ class ModelWrap : public Napi::ObjectWrap {
static Napi::Function get_class(Napi::Env env);
void set_model(const std::shared_ptr& model);
- /**
- * @brief Creates JavaScript Model object and wraps inside of it ov::Model object.
- * @param env The environment in which to construct a JavaScript object.
- * @param model a pointer to ov::Model to wrap.
- * @return Javascript Model as Napi::Object. (Not ModelWrap object)
- */
- static Napi::Object wrap(Napi::Env env, std::shared_ptr model);
/** @return Napi::String containing a model name. */
Napi::Value get_name(const Napi::CallbackInfo& info);
@@ -91,7 +84,7 @@ class ModelWrap : public Napi::ObjectWrap {
* @return number indicating the quantity of outputs for the model
*/
Napi::Value get_output_size(const Napi::CallbackInfo& info);
-
+
/**
* @brief Sets a friendly name for a model.
* @param info Contains information about the environment and passed arguments
diff --git a/src/bindings/js/node/include/read_model_args.hpp b/src/bindings/js/node/include/read_model_args.hpp
index fd6dc35d7334a4..4eb7e39885904b 100644
--- a/src/bindings/js/node/include/read_model_args.hpp
+++ b/src/bindings/js/node/include/read_model_args.hpp
@@ -2,6 +2,8 @@
#include
+#include "node/include/helper.hpp"
+#include "node/include/type_validation.hpp"
#include "openvino/runtime/core.hpp"
/**
@@ -15,41 +17,29 @@ struct ReadModelArgs {
ReadModelArgs() {}
ReadModelArgs(const Napi::CallbackInfo& info) {
- if (!is_valid_read_model_input(info))
- throw std::runtime_error("Invalid arguments of read model function");
-
- const size_t argsLength = info.Length();
- std::shared_ptr model;
-
- if (info[0].IsBuffer()) {
- Napi::Buffer model_data = info[0].As>();
- model_str = std::string(reinterpret_cast(model_data.Data()), model_data.Length());
-
- if (argsLength == 2) {
- Napi::Buffer weights = info[1].As>();
- const uint8_t* bin = reinterpret_cast(weights.Data());
-
- size_t bin_size = weights.Length();
- weight_tensor = ov::Tensor(ov::element::Type_t::u8, {bin_size});
- std::memcpy(weight_tensor.data(), bin, bin_size);
- }
- else {
- weight_tensor = ov::Tensor(ov::element::Type_t::u8, {0});
- }
+ std::vector allowed_signatures;
+
+ if (ov::js::validate(info, allowed_signatures)) {
+ model_path = info[0].ToString();
+ } else if (ov::js::validate(info, allowed_signatures)) {
+ model_path = info[0].ToString();
+ bin_path = info[1].ToString();
+ } else if (ov::js::validate>(info, allowed_signatures)) {
+ model_str = buffer_to_string(info[0]);
+ weight_tensor = ov::Tensor(ov::element::Type_t::u8, {0});
+ } else if (ov::js::validate, Napi::Buffer>(info, allowed_signatures)) {
+ model_str = buffer_to_string(info[0]);
+ Napi::Buffer weights = info[1].As>();
+ const uint8_t* bin = reinterpret_cast(weights.Data());
+
+ size_t bin_size = weights.Length();
+ weight_tensor = ov::Tensor(ov::element::Type_t::u8, {bin_size});
+ std::memcpy(weight_tensor.data(), bin, bin_size);
+ } else if (ov::js::validate(info, allowed_signatures)) {
+ model_str = info[0].ToString();
+ weight_tensor = cast_to_tensor(info, 1);
} else {
- model_path = std::string(info[0].ToString());
-
- if (argsLength == 2) bin_path = info[1].ToString();
+ OPENVINO_THROW("'readModel'", ov::js::get_parameters_error_msg(info, allowed_signatures));
}
}
-
- bool is_valid_read_model_input(const Napi::CallbackInfo& info) {
- const size_t argsLength = info.Length();
- const size_t is_buffers_input = info[0].IsBuffer()
- && (argsLength == 1 || info[1].IsBuffer());
-
- if (is_buffers_input) return true;
-
- return info[0].IsString() && (argsLength == 1 || info[1].IsString());
- }
};
diff --git a/src/bindings/js/node/lib/addon.ts b/src/bindings/js/node/lib/addon.ts
index 5a48ce963b0e45..b5909ea9f3ae03 100644
--- a/src/bindings/js/node/lib/addon.ts
+++ b/src/bindings/js/node/lib/addon.ts
@@ -123,7 +123,7 @@ interface Core {
},
};
/**
- * It imports a previously exported compiled model.
+ * Asynchronously imports a previously exported compiled model.
* @param modelStream The input stream that contains a model,
* previously exported with the {@link CompiledModel.exportModelSync} method.
* @param device The name of a device, for which you import a compiled model.
@@ -132,6 +132,15 @@ interface Core {
* @param config An object with the key-value pairs
* (property name, property value): relevant only for this load operation.
*/
+ importModel(
+ modelStream: Buffer,
+ device: string,
+ config?: { [key: string]: string | number | boolean }
+ ): Promise;
+ /**
+ * A synchronous version of {@link Core.importModel}.
+ * It imports a previously exported compiled model.
+ */
importModelSync(
modelStream: Buffer,
device: string,
@@ -151,7 +160,14 @@ interface Core {
* For the TFLite format (*.tflite), the weights parameter is not used.
*/
readModel(modelPath: string, weightsPath?: string): Promise;
-
+ /**
+ * It reads models from IR / ONNX / PDPD / TF and TFLite formats.
+ * @param model A string with model in IR / ONNX / PDPD / TF
+ * and TFLite format.
+ * @param weights Tensor with weights. Reading ONNX / PDPD / TF
+ * and TFLite models doesn’t support loading weights from weights tensors.
+ */
+ readModel(model: string, weights: Tensor): Promise;
/**
* It reads models from the IR / ONNX / PDPD / TF and TFLite formats.
* @param modelBuffer Binary data with a model
@@ -165,6 +181,11 @@ interface Core {
* It reads models from the IR / ONNX / PDPD / TF and TFLite formats.
*/
readModelSync(modelPath: string, weightsPath?: string): Model;
+ /**
+ * A synchronous version of {@link Core.readModel}.
+ * It reads models from the IR / ONNX / PDPD / TF and TFLite formats.
+ */
+ readModelSync(model: string, weights: Tensor): Model;
/**
* A synchronous version of {@link Core.readModel}.
* It reads models from the IR / ONNX / PDPD / TF and TFLite formats.
diff --git a/src/bindings/js/node/src/async_reader.cpp b/src/bindings/js/node/src/async_reader.cpp
index 8faaf47ab0a105..bf8ca8105b4c0f 100644
--- a/src/bindings/js/node/src/async_reader.cpp
+++ b/src/bindings/js/node/src/async_reader.cpp
@@ -13,14 +13,11 @@ void ReaderWorker::Execute() {
}
void ReaderWorker::OnOK() {
- Napi::HandleScope scope(Env());
- Napi::Object mw = ModelWrap::get_class(Env()).New({});
- ModelWrap* m = Napi::ObjectWrap::Unwrap(mw);
- m->set_model(_model);
+ auto model = cpp_to_js(Env(), _model);
delete _args;
- _deferred.Resolve(mw);
+ _deferred.Resolve(model);
}
void ReaderWorker::OnError(Napi::Error const& error) {
diff --git a/src/bindings/js/node/src/core_wrap.cpp b/src/bindings/js/node/src/core_wrap.cpp
index 33350056fc443c..20422b7d683d3d 100644
--- a/src/bindings/js/node/src/core_wrap.cpp
+++ b/src/bindings/js/node/src/core_wrap.cpp
@@ -51,6 +51,7 @@ Napi::Function CoreWrap::get_class(Napi::Env env) {
InstanceMethod("compileModelSync", &CoreWrap::compile_model_sync_dispatch),
InstanceMethod("compileModel", &CoreWrap::compile_model_async),
InstanceMethod("getAvailableDevices", &CoreWrap::get_available_devices),
+ InstanceMethod("importModel", &CoreWrap::import_model_async),
InstanceMethod("importModelSync", &CoreWrap::import_model),
InstanceMethod("getAvailableDevices", &CoreWrap::get_available_devices),
InstanceMethod("getVersions", &CoreWrap::get_versions),
@@ -85,11 +86,13 @@ Napi::Value CoreWrap::read_model_sync(const Napi::CallbackInfo& info) {
model = _core.read_model(model_str, weight_tensor);
} else if (ov::js::validate(info, allowed_signatures)) {
model = _core.read_model(info[0].ToString());
+ } else if (ov::js::validate(info, allowed_signatures)) {
+ model = _core.read_model(info[0].ToString(), cast_to_tensor(info, 1));
} else {
OPENVINO_THROW("'readModelSync'", ov::js::get_parameters_error_msg(info, allowed_signatures));
}
- return ModelWrap::wrap(info.Env(), model);
+ return cpp_to_js(info.Env(), model);
} catch (std::runtime_error& err) {
reportError(info.Env(), err.what());
@@ -350,6 +353,66 @@ Napi::Value CoreWrap::import_model(const Napi::CallbackInfo& info) {
}
}
+void ImportModelFinalizer(Napi::Env env, void* finalizeData, ImportModelContext* context) {
+ context->nativeThread.join();
+ delete context;
+};
+
+void importModelThread(ImportModelContext* context, std::mutex& mutex) {
+ // Imports model without blocking the main thread.
+ {
+ const std::lock_guard lock(mutex);
+ context->_compiled_model = context->_core.import_model(context->_stream, context->_device, context->_config);
+ }
+
+ // Callback to return to JS the results of core.import_model()
+ auto callback = [](Napi::Env env, Napi::Function, ImportModelContext* context) {
+ context->deferred.Resolve(cpp_to_js(env, context->_compiled_model));
+ };
+
+ // Addon's main thread will safely invoke the JS callback function on the behalf of the additional thread.
+ context->tsfn.BlockingCall(context, callback);
+ context->tsfn.Release();
+}
+
+Napi::Value CoreWrap::import_model_async(const Napi::CallbackInfo& info) {
+ const auto& env = info.Env();
+ std::vector allowed_signatures;
+
+ try {
+ if (ov::js::validate, Napi::String>(info, allowed_signatures) ||
+ ov::js::validate, Napi::String, Napi::Object>(info, allowed_signatures)) {
+ // Prepare validated data that will be transferred to the new thread.
+ auto context_data = new ImportModelContext(env, _core);
+
+ const auto& model_data = info[0].As>();
+ const auto model_stream = std::string(reinterpret_cast(model_data.Data()), model_data.Length());
+ context_data->_stream << model_stream;
+ context_data->_device = info[1].ToString();
+ context_data->_config = info.Length() == 3 ? to_anyMap(env, info[2]) : ov::AnyMap();
+
+ context_data->tsfn = Napi::ThreadSafeFunction::New(env,
+ Napi::Function(),
+ "TSFN",
+ 0,
+ 1,
+ context_data,
+ ImportModelFinalizer,
+ (void*)nullptr);
+
+ context_data->nativeThread = std::thread(importModelThread, context_data, std::ref(_mutex));
+ // Returns a Promise to JS. Method import_model() is performed on additional thread.
+ return context_data->deferred.Promise();
+ } else {
+ OPENVINO_THROW("'importModel'", ov::js::get_parameters_error_msg(info, allowed_signatures));
+ }
+
+ } catch (std::exception& e) {
+ reportError(info.Env(), e.what());
+ return info.Env().Undefined();
+ }
+}
+
Napi::Value CoreWrap::set_property(const Napi::CallbackInfo& info) {
try {
auto args = try_get_set_property_parameters(info);
diff --git a/src/bindings/js/node/src/helper.cpp b/src/bindings/js/node/src/helper.cpp
index 01474a49c4ae3c..09161deb2bc30e 100644
--- a/src/bindings/js/node/src/helper.cpp
+++ b/src/bindings/js/node/src/helper.cpp
@@ -3,6 +3,7 @@
#include "node/include/helper.hpp"
+#include "node/include/compiled_model.hpp"
#include "node/include/tensor.hpp"
#include "node/include/type_validation.hpp"
@@ -251,11 +252,33 @@ Napi::Array cpp_to_js(const Napi::CallbackInfo& info
return interval;
}
+Napi::Object cpp_to_js(const Napi::Env& env, std::shared_ptr model) {
+ const auto& prototype = env.GetInstanceData()->model;
+ if (!prototype) {
+ OPENVINO_THROW("Invalid pointer to Model prototype.");
+ }
+ const auto& model_js = prototype.New({});
+ const auto mw = Napi::ObjectWrap::Unwrap(model_js);
+ mw->set_model(model);
+ return model_js;
+}
+
template <>
Napi::Boolean cpp_to_js(const Napi::CallbackInfo& info, const bool value) {
return Napi::Boolean::New(info.Env(), value);
}
+Napi::Object cpp_to_js(const Napi::Env& env, const ov::CompiledModel& compiled_model) {
+ const auto& prototype = env.GetInstanceData()->compiled_model;
+ if (!prototype) {
+ OPENVINO_THROW("Invalid pointer to CompiledModel prototype.");
+ }
+ auto obj = prototype.New({});
+ const auto cm = Napi::ObjectWrap::Unwrap(obj);
+ cm->set_compiled_model(compiled_model);
+ return obj;
+}
+
ov::TensorVector parse_input_data(const Napi::Value& input) {
ov::TensorVector parsed_input;
if (input.IsArray()) {
diff --git a/src/bindings/js/node/src/model_wrap.cpp b/src/bindings/js/node/src/model_wrap.cpp
index a10bc3dd6861a6..e8359b83ff6da3 100644
--- a/src/bindings/js/node/src/model_wrap.cpp
+++ b/src/bindings/js/node/src/model_wrap.cpp
@@ -33,18 +33,6 @@ void ModelWrap::set_model(const std::shared_ptr& model) {
_model = model;
}
-Napi::Object ModelWrap::wrap(Napi::Env env, std::shared_ptr model) {
- Napi::HandleScope scope(env);
- const auto& prototype = env.GetInstanceData()->model;
- if (!prototype) {
- OPENVINO_THROW("Invalid pointer to model prototype.");
- }
- const auto& model_js = prototype.New({});
- const auto mw = Napi::ObjectWrap::Unwrap(model_js);
- mw->set_model(model);
- return model_js;
-}
-
Napi::Value ModelWrap::get_name(const Napi::CallbackInfo& info) {
if (_model->get_name() != "")
return Napi::String::New(info.Env(), _model->get_name());
diff --git a/src/bindings/js/node/tests/basic.test.js b/src/bindings/js/node/tests/basic.test.js
index ba3e585e40ce7d..8c8e0900a127f7 100644
--- a/src/bindings/js/node/tests/basic.test.js
+++ b/src/bindings/js/node/tests/basic.test.js
@@ -236,7 +236,7 @@ describe('Test exportModel()/importModel()', () => {
const inferRequest = compiledModel.createInferRequest();
const res1 = inferRequest.infer([tensor]);
- it('Test importModel(stream, device)', () => {
+ it('Test importModelSync(stream, device)', () => {
const newCompiled = core.importModelSync(userStream, 'CPU');
const newInferRequest = newCompiled.createInferRequest();
const res2 = newInferRequest.infer([tensor]);
@@ -244,7 +244,7 @@ describe('Test exportModel()/importModel()', () => {
assert.deepStrictEqual(res1['fc_out'].data[0], res2['fc_out'].data[0]);
});
- it('Test importModel(stream, device, config)', () => {
+ it('Test importModelSync(stream, device, config)', () => {
const newCompiled = core.importModelSync(userStream, 'CPU', { 'NUM_STREAMS': 1 });
const newInferRequest = newCompiled.createInferRequest();
const res2 = newInferRequest.infer([tensor]);
@@ -252,27 +252,27 @@ describe('Test exportModel()/importModel()', () => {
assert.deepStrictEqual(res1['fc_out'].data[0], res2['fc_out'].data[0]);
});
- it('Test importModel(stream, device) throws', () => {
+ it('Test importModelSync(stream, device) throws', () => {
assert.throws(
() => core.importModelSync(epsilon, 'CPU'),
/The first argument must be of type Buffer./
);
});
- it('Test importModel(stream, device) throws', () => {
+ it('Test importModelSync(stream, device) throws', () => {
assert.throws(
() => core.importModelSync(userStream, tensor),
/The second argument must be of type String./
);
});
- it('Test importModel(stream, device, config: tensor) throws', () => {
+ it('Test importModelSync(stream, device, config: tensor) throws', () => {
assert.throws(
() => core.importModelSync(userStream, 'CPU', tensor),
/NotFound: Unsupported property 0 by CPU plugin./
);
});
- it('Test importModel(stream, device, config: string) throws', () => {
+ it('Test importModelSync(stream, device, config: string) throws', () => {
const testString = 'test';
assert.throws(
() => core.importModelSync(userStream, 'CPU', testString),
@@ -280,11 +280,53 @@ describe('Test exportModel()/importModel()', () => {
);
});
- it('Test importModel(stream, device, config: unsupported property) throws', () => {
+ it('Test importModelSync(stream, device, config: unsupported property) \
+ throws', () => {
const tmpDir = '/tmp';
assert.throws(
() => core.importModelSync(userStream, 'CPU', { 'CACHE_DIR': tmpDir }),
/Unsupported property CACHE_DIR by CPU plugin./
);
});
+
+ it('Test importModel(stream, device)', () => {
+ core.importModel(userStream, 'CPU').then(newCompiled => {
+ const newInferRequest = newCompiled.createInferRequest();
+ const res2 = newInferRequest.infer([tensor]);
+ assert.deepStrictEqual(res1['fc_out'].data[0], res2['fc_out'].data[0]);
+ });
+ });
+
+ it('Test importModel(stream, device, config)', () => {
+ core.importModel(userStream, 'CPU', { 'NUM_STREAMS': 1 }).then(
+ newCompiled => {
+ const newInferRequest = newCompiled.createInferRequest();
+ const res2 = newInferRequest.infer([tensor]);
+
+ assert.deepStrictEqual(res1['fc_out'].data[0], res2['fc_out'].data[0]);
+ });
+ });
+
+ it('Test importModel(stream, device) throws', () => {
+ assert.throws(
+ () => core.importModel(epsilon, 'CPU').then(),
+ /'importModel' method called with incorrect parameters./
+ );
+ });
+
+ it('Test importModel(stream, device) throws', () => {
+ assert.throws(
+ () => core.importModel(userStream, tensor).then(),
+ /'importModel' method called with incorrect parameters./
+ );
+ });
+
+ it('Test importModel(stream, device, config: string) throws', () => {
+ const testString = 'test';
+ assert.throws(
+ () => core.importModel(userStream, 'CPU', testString).then(),
+ /'importModel' method called with incorrect parameters./
+ );
+ });
+
});
diff --git a/src/bindings/js/node/tests/read_model.test.js b/src/bindings/js/node/tests/read_model.test.js
index 731001fbb861b7..83c5c216f3c759 100644
--- a/src/bindings/js/node/tests/read_model.test.js
+++ b/src/bindings/js/node/tests/read_model.test.js
@@ -10,7 +10,9 @@ const { getModelPath } = require('./utils.js');
const { xml: modelPath, bin: weightsPath } = getModelPath();
const modelFile = fs.readFileSync(modelPath);
+const modelStr = fs.readFileSync(modelPath, 'utf8');
const weightsFile = fs.readFileSync(weightsPath);
+const weightsTensor = new ov.Tensor(ov.element.u8, [weightsFile.buffer.byteLength], new Uint8Array(weightsFile.buffer));
const core = new ov.Core();
@@ -34,7 +36,16 @@ describe('Core.readModeSync', () => {
)
});
- it('readModeSync(modelUint8ArrayBuffer, weightsUint8ArrayBuffer) ', () => {
+ it('readModelSync(modelString, weightsTensor) ', () => {
+ const model = core.readModelSync(
+ modelStr,
+ weightsTensor,
+ );
+ assert.ok(model instanceof ov.Model);
+ assert.equal(model.inputs.length, 1);
+ });
+
+ it('readModelSync(modelUint8ArrayBuffer, weightsUint8ArrayBuffer) ', () => {
const model = core.readModelSync(
new Uint8Array(modelFile.buffer),
new Uint8Array(weightsFile.buffer),
@@ -55,7 +66,16 @@ describe('Core.readModel', () => {
assert.equal(model.inputs.length, 1);
});
- it('readMode(modelUint8ArrayBuffer, weightsUint8ArrayBuffer) ', async () => {
+ it('readModel(modelString, weightsTensor) ', async () => {
+ const model = await core.readModel(
+ modelStr,
+ weightsTensor,
+ );
+ assert.ok(model instanceof ov.Model);
+ assert.equal(model.inputs.length, 1);
+ });
+
+ it('readModel(modelUint8ArrayBuffer, weightsUint8ArrayBuffer) ', async () => {
const model = await core.readModel(
new Uint8Array(modelFile.buffer),
new Uint8Array(weightsFile.buffer),
diff --git a/src/common/transformations/src/transformations/convert_precision.cpp b/src/common/transformations/src/transformations/convert_precision.cpp
index b2e80048e66cf7..f6cd1ab20012f7 100644
--- a/src/common/transformations/src/transformations/convert_precision.cpp
+++ b/src/common/transformations/src/transformations/convert_precision.cpp
@@ -441,6 +441,7 @@ bool ov::pass::ConvertPrecision::run_on_model(const std::shared_ptr&
{ov::op::v3::TopK::get_type_info_static(), fuse_type_to_topk},
{ov::op::v11::TopK::get_type_info_static(), fuse_type_to_topk},
{ov::op::v8::MaxPool::get_type_info_static(), fuse_type_to_maxpool},
+ {ov::op::v14::MaxPool::get_type_info_static(), fuse_type_to_maxpool},
{ov::op::v3::NonZero::get_type_info_static(), fuse_type_to_nonzero},
{ov::op::v3::Bucketize::get_type_info_static(), fuse_type_to_bucketize},
{ov::op::v1::Equal::get_type_info_static(), fuse_type_to_binary_comparision},
@@ -924,9 +925,15 @@ bool fuse_type_to_topk(const std::shared_ptr& node, const precisions_m
}
bool fuse_type_to_maxpool(const std::shared_ptr& node, const precisions_map& precisions) {
- if (auto maxpool = ov::as_type_ptr(node)) {
+ auto maxpool_v8 = ov::as_type_ptr(node);
+ auto maxpool_v14 = ov::as_type_ptr(node);
+ if (maxpool_v14) {
return update_type(1, node, precisions, [&](const element::Type& to) {
- maxpool->set_index_element_type(to);
+ maxpool_v14->set_index_element_type(to);
+ });
+ } else if (maxpool_v8) {
+ return update_type(1, node, precisions, [&](const element::Type& to) {
+ maxpool_v8->set_index_element_type(to);
});
}
return false;
diff --git a/src/common/transformations/src/transformations/op_conversions/convert_avgpool_downgrade.cpp b/src/common/transformations/src/transformations/op_conversions/convert_avgpool_downgrade.cpp
index 3333c1d6885f08..24d3ecca334c73 100644
--- a/src/common/transformations/src/transformations/op_conversions/convert_avgpool_downgrade.cpp
+++ b/src/common/transformations/src/transformations/op_conversions/convert_avgpool_downgrade.cpp
@@ -24,8 +24,11 @@ ov::pass::ConvertAvgPool14ToAvgPool1::ConvertAvgPool14ToAvgPool1() {
const auto avg_pool_v14_pattern = pattern::wrap_type();
- const matcher_pass_callback callback = [](pattern::Matcher& m) {
+ const matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) {
const auto avg_pool_v14 = std::dynamic_pointer_cast(m.get_match_root());
+ if (!avg_pool_v14 || transformation_callback(avg_pool_v14)) {
+ return false;
+ }
const auto rounding_type_v14 = avg_pool_v14->get_rounding_type();
const auto rounding_type_v1 =
rounding_type_v14 == ov::op::RoundingType::CEIL_TORCH ? ov::op::RoundingType::CEIL : rounding_type_v14;
diff --git a/src/common/transformations/src/transformations/op_conversions/convert_maxpool_downgrade.cpp b/src/common/transformations/src/transformations/op_conversions/convert_maxpool_downgrade.cpp
index 0edff11b8429ed..9c3f1178125851 100644
--- a/src/common/transformations/src/transformations/op_conversions/convert_maxpool_downgrade.cpp
+++ b/src/common/transformations/src/transformations/op_conversions/convert_maxpool_downgrade.cpp
@@ -140,7 +140,7 @@ ov::pass::ConvertMaxPool14ToMaxPool8::ConvertMaxPool14ToMaxPool8() {
const auto selected_pads = node_registry.make