Skip to content

Commit

Permalink
Merge pull request openvinotoolkit#62 from almilosz/almilosz/infer-async
Browse files Browse the repository at this point in the history
Rename inferAsync()to infer()
  • Loading branch information
almilosz authored Nov 6, 2023
2 parents 20cb0f7 + 582736e commit 224d17a
Show file tree
Hide file tree
Showing 11 changed files with 70 additions and 70 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ async function main(modelPath, images, deviceName) {
const inferRequest = compiledModel.createInferRequest();

const promises = preprocessedImages.map((tensorData, i) => {
const inferPromise = inferRequest.inferAsync([new ov.Tensor(ov.element.u8, tensorShape, tensorData)]);
const inferPromise = inferRequest.infer([new ov.Tensor(ov.element.u8, tensorShape, tensorData)]);

inferPromise.then(result =>
completionCallback(result[outputName], images[i]));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ async function main(modelPath, imagePath, deviceName) {
console.log('Starting inference in synchronous mode');
const inferRequest = compiledModel.createInferRequest();
inferRequest.setInputTensor(inputTensor);
inferRequest.infer();
inferRequest.inferSync();

//----------------- Step 7. Process output -----------------------------------
const outputLayer = compiledModel.outputs[0];
Expand Down
2 changes: 1 addition & 1 deletion samples/js/node/hello_reshape_ssd/hello_reshape_ssd.js
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ async function main(modelPath, imagePath, deviceName) {
console.log('Starting inference in synchronous mode');
const inferRequest = compiledModel.createInferRequest();
inferRequest.setInputTensor(inputTensor);
inferRequest.infer();
inferRequest.inferSync();

//----------------- Step 7. Process output -----------------------------------
const outputLayer = compiledModel.outputs[0];
Expand Down
16 changes: 8 additions & 8 deletions samples/js/node/notebooks/001-hello-world.nnb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
{
"language": "javascript",
"source": [
"const { cv } = require('opencv-wasm');\nconst { display } = require('node-kernel');\nconst { getImageData, displayImage, downloadFile } = require('../helpers.js');\n\nconst { addon: ov } = require('openvinojs-node');"
"const { cv } = require('opencv-wasm');\nconst { display } = require('node-kernel');\nconst { getImageData, displayImage, downloadFile } = require('../helpers.js');\n\nconst { addon: ov } = require('openvinojs-node');\n"
],
"outputs": []
},
Expand All @@ -31,7 +31,7 @@
{
"language": "javascript",
"source": [
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'v3-small_224_1.0_float';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/mobelinet-v3-tf/FP32/';\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);"
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'v3-small_224_1.0_float';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/mobelinet-v3-tf/FP32/';\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);\n"
],
"outputs": [
{
Expand Down Expand Up @@ -59,7 +59,7 @@
{
"language": "javascript",
"source": [
"const imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg';\nconst classesUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/datasets/imagenet/imagenet_class_index.json';\n\nawait downloadFile(imgUrl, 'coco.jpg', '../../assets/images');\nawait downloadFile(classesUrl, 'imagenet_class_index.json', '../../assets/datasets');"
"const imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg';\nconst classesUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/datasets/imagenet/imagenet_class_index.json';\n\nawait downloadFile(imgUrl, 'coco.jpg', '../../assets/images');\nawait downloadFile(classesUrl, 'imagenet_class_index.json', '../../assets/datasets');\n"
],
"outputs": [
{
Expand Down Expand Up @@ -87,7 +87,7 @@
{
"language": "javascript",
"source": [
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst outputLayer = compiledModel.outputs[0];"
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst outputLayer = compiledModel.outputs[0];\n"
],
"outputs": []
},
Expand All @@ -101,7 +101,7 @@
{
"language": "javascript",
"source": [
"const imgData = await getImageData('../../assets/images/coco.jpg');\n\n// Use opencv-wasm to preprocess image.\nconst originalImage = cv.matFromImageData(imgData);\nconst image = new cv.Mat();\n// The MobileNet model expects images in RGB format.\ncv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);\n// Resize to MobileNet image shape.\ncv.resize(image, image, new cv.Size(224, 224));\n\ndisplayImage(imgData, display);"
"const imgData = await getImageData('../../assets/images/coco.jpg');\n\n// Use opencv-wasm to preprocess image.\nconst originalImage = cv.matFromImageData(imgData);\nconst image = new cv.Mat();\n// The MobileNet model expects images in RGB format.\ncv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);\n// Resize to MobileNet image shape.\ncv.resize(image, image, new cv.Size(224, 224));\n\ndisplayImage(imgData, display);\n"
],
"outputs": [
{
Expand All @@ -124,7 +124,7 @@
{
"language": "javascript",
"source": [
"const tensorData = new Float32Array(image.data);\nconst tensor = new ov.Tensor(ov.element.f32, Int32Array.from([1, 224, 224, 3]), tensorData);"
"const tensorData = new Float32Array(image.data);\nconst tensor = new ov.Tensor(ov.element.f32, Int32Array.from([1, 224, 224, 3]), tensorData);\n"
],
"outputs": []
},
Expand All @@ -138,14 +138,14 @@
{
"language": "javascript",
"source": [
"const inferRequest = compiledModel.createInferRequest();\ninferRequest.setInputTensor(tensor);\ninferRequest.infer();\n\nconst resultInfer = inferRequest.getTensor(outputLayer);\nconst resultIndex = resultInfer.data.indexOf(Math.max(...resultInfer.data));"
"const inferRequest = compiledModel.createInferRequest();\ninferRequest.setInputTensor(tensor);\ninferRequest.inferSync();\n\nconst resultInfer = inferRequest.getTensor(outputLayer);\nconst resultIndex = resultInfer.data.indexOf(Math.max(...resultInfer.data));\n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"const imagenetClassesMap = require('../../assets/datasets/imagenet_class_index.json');\nconst imagenetClasses = ['background', ...Object.values(imagenetClassesMap)];\n\nconsole.log(`Result: ${imagenetClasses[resultIndex][1]}`);"
"const imagenetClassesMap = require('../../assets/datasets/imagenet_class_index.json');\nconst imagenetClasses = ['background', ...Object.values(imagenetClassesMap)];\n\nconsole.log(`Result: ${imagenetClasses[resultIndex][1]}`);\n"
],
"outputs": [
{
Expand Down
14 changes: 7 additions & 7 deletions samples/js/node/notebooks/003-hello-segmentation.nnb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
{
"language": "typescript",
"source": [
"const {\n getImageData, \n displayArrayAsImage, \n arrayToImageData,\n transform,\n downloadFile,\n} = require('../helpers');\n\nconst { cv } = require('opencv-wasm');\nconst { display } = require('node-kernel');\n\nconst { addon: ov } = require('openvinojs-node'); "
"const {\n getImageData, \n displayArrayAsImage, \n arrayToImageData,\n transform,\n downloadFile,\n} = require('../helpers');\n\nconst { cv } = require('opencv-wasm');\nconst { display } = require('node-kernel');\n\nconst { addon: ov } = require('openvinojs-node'); \n"
],
"outputs": []
},
Expand All @@ -31,7 +31,7 @@
{
"language": "typescript",
"source": [
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'road-segmentation-adas-0001';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/road-segmentation-adas-0001/FP32/';\n\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);"
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'road-segmentation-adas-0001';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/road-segmentation-adas-0001/FP32/';\n\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);\n"
],
"outputs": [
{
Expand Down Expand Up @@ -59,7 +59,7 @@
{
"language": "typescript",
"source": [
"const baseImagesDir = '../../assets/images';\nconst imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/empty_road_mapillary.jpg';\n\nawait downloadFile(imgUrl, 'empty_road_mapillary.jpg', baseImagesDir);"
"const baseImagesDir = '../../assets/images';\nconst imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/empty_road_mapillary.jpg';\n\nawait downloadFile(imgUrl, 'empty_road_mapillary.jpg', baseImagesDir);\n"
],
"outputs": [
{
Expand All @@ -86,7 +86,7 @@
{
"language": "typescript",
"source": [
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst inputLayer = compiledModel.input(0);\nconst outputLayer = compiledModel.output(0);"
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst inputLayer = compiledModel.input(0);\nconst outputLayer = compiledModel.output(0);\n"
],
"outputs": []
},
Expand All @@ -100,7 +100,7 @@
{
"language": "typescript",
"source": [
"const imgData = await getImageData('../../assets/images/empty_road_mapillary.jpg');\n\nconst originalImage = cv.matFromImageData(imgData);\nconst { cols: originalWidth, rows: originalHeight } = originalImage;\n\nconst image = new cv.Mat();\ncv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);\ncv.cvtColor(image, image, cv.COLOR_BGR2RGB); \n\nconst [B, C, H, W] = inputLayer.shape;\n\ncv.resize(image, image, new cv.Size(W, H));\n\nconst inputImage = transform(image.data, { width: W, height: H }, [0, 1, 2]); // NHWC to NCHW\n\ndisplayArrayAsImage(originalImage.data, originalWidth, originalHeight, display);"
"const imgData = await getImageData('../../assets/images/empty_road_mapillary.jpg');\n\nconst originalImage = cv.matFromImageData(imgData);\nconst { cols: originalWidth, rows: originalHeight } = originalImage;\n\nconst image = new cv.Mat();\ncv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);\ncv.cvtColor(image, image, cv.COLOR_BGR2RGB); \n\nconst [B, C, H, W] = inputLayer.shape;\n\ncv.resize(image, image, new cv.Size(W, H));\n\nconst inputImage = transform(image.data, { width: W, height: H }, [0, 1, 2]); // NHWC to NCHW\n\ndisplayArrayAsImage(originalImage.data, originalWidth, originalHeight, display);\n"
],
"outputs": [
{
Expand All @@ -123,7 +123,7 @@
{
"language": "typescript",
"source": [
"const tensor_data = new Float32Array(inputImage);\nconst tensor = new ov.Tensor(ov.element.f32, inputLayer.shape, tensor_data);\n\nconst inferRequest = compiledModel.createInferRequest();\ninferRequest.setInputTensor(tensor);\ninferRequest.infer();\n\nconst output = inferRequest.getTensor(outputLayer);\n\nconst { data: outputData } = output;\nconst layers = { bg: [], c: [], h: [], w: [] };\nconst resultLayer = [];\nconst colormap = [[68, 1, 84, 255], [48, 103, 141, 255], [53, 183, 120, 255], [199, 216, 52, 255]];\n\nconst size = outputData.length/4;\n\nfor (let i = 0; i < size; i++) {\n const valueAt = (i, number) => outputData[i + number*size];\n\n const currentValues = { \n bg: valueAt(i, 0),\n c: valueAt(i, 1),\n h: valueAt(i, 2),\n w: valueAt(i, 3),\n };\n const values = Object.values(currentValues);\n const maxIndex = values.indexOf(Math.max(...values));\n\n resultLayer.push(maxIndex);\n}\n\nconst pixels = [];\nresultLayer.forEach(i => pixels.push(...colormap[i]));\n\ndisplayArrayAsImage(pixels, W, H, display);\n"
"const tensor_data = new Float32Array(inputImage);\nconst tensor = new ov.Tensor(ov.element.f32, inputLayer.shape, tensor_data);\n\nconst inferRequest = compiledModel.createInferRequest();\ninferRequest.setInputTensor(tensor);\ninferRequest.inferSync();\n\nconst output = inferRequest.getTensor(outputLayer);\n\nconst { data: outputData } = output;\nconst layers = { bg: [], c: [], h: [], w: [] };\nconst resultLayer = [];\nconst colormap = [[68, 1, 84, 255], [48, 103, 141, 255], [53, 183, 120, 255], [199, 216, 52, 255]];\n\nconst size = outputData.length/4;\n\nfor (let i = 0; i < size; i++) {\n const valueAt = (i, number) => outputData[i + number*size];\n\n const currentValues = { \n bg: valueAt(i, 0),\n c: valueAt(i, 1),\n h: valueAt(i, 2),\n w: valueAt(i, 3),\n };\n const values = Object.values(currentValues);\n const maxIndex = values.indexOf(Math.max(...values));\n\n resultLayer.push(maxIndex);\n}\n\nconst pixels = [];\nresultLayer.forEach(i => pixels.push(...colormap[i]));\n\ndisplayArrayAsImage(pixels, W, H, display);\n"
],
"outputs": [
{
Expand All @@ -146,7 +146,7 @@
{
"language": "typescript",
"source": [
"const alpha = 0.3;\n\nconst pixelsAsImageData = arrayToImageData(pixels, W, H);\nconst mask = cv.matFromImageData(pixelsAsImageData);\n\ncv.resize(mask, mask, new cv.Size(originalWidth, originalHeight));\n\ncv.addWeighted(mask, alpha, originalImage, 1 - alpha, 0, mask);\n\ndisplayArrayAsImage(mask.data, originalWidth, originalHeight, display);"
"const alpha = 0.3;\n\nconst pixelsAsImageData = arrayToImageData(pixels, W, H);\nconst mask = cv.matFromImageData(pixelsAsImageData);\n\ncv.resize(mask, mask, new cv.Size(originalWidth, originalHeight));\n\ncv.addWeighted(mask, alpha, originalImage, 1 - alpha, 0, mask);\n\ndisplayArrayAsImage(mask.data, originalWidth, originalHeight, display);\n"
],
"outputs": [
{
Expand Down
10 changes: 5 additions & 5 deletions samples/js/node/notebooks/004-hello-detection.nnb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
{
"language": "typescript",
"source": [
"const { cv } = require('opencv-wasm');\nconst { display } = require('node-kernel');\nconst { transform, getImageData, displayArrayAsImage, downloadFile } = require('../helpers.js');\n\nconst { addon: ov } = require('openvinojs-node'); "
"const { cv } = require('opencv-wasm');\nconst { display } = require('node-kernel');\nconst { transform, getImageData, displayArrayAsImage, downloadFile } = require('../helpers.js');\n\nconst { addon: ov } = require('openvinojs-node'); \n"
],
"outputs": []
},
Expand All @@ -31,7 +31,7 @@
{
"language": "typescript",
"source": [
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'horizontal-text-detection-0001';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/horizontal-text-detection-0001/FP32/';\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);"
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'horizontal-text-detection-0001';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/horizontal-text-detection-0001/FP32/';\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);\n"
],
"outputs": [
{
Expand Down Expand Up @@ -59,7 +59,7 @@
{
"language": "typescript",
"source": [
"const baseImagesDir = '../../assets/images';\nconst imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/intel_rnb.jpg';\n\nawait downloadFile(imgUrl, 'intel_rnb.jpg', baseImagesDir);"
"const baseImagesDir = '../../assets/images';\nconst imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/intel_rnb.jpg';\n\nawait downloadFile(imgUrl, 'intel_rnb.jpg', baseImagesDir);\n"
],
"outputs": [
{
Expand All @@ -86,7 +86,7 @@
{
"language": "typescript",
"source": [
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst inputLayer = compiledModel.input(0);\nconst outputLayer = compiledModel.output('boxes');"
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst inputLayer = compiledModel.input(0);\nconst outputLayer = compiledModel.output('boxes');\n"
],
"outputs": []
},
Expand Down Expand Up @@ -123,7 +123,7 @@
{
"language": "typescript",
"source": [
"const tensorData = new Float32Array(inputImage);\nconst tensor = new ov.Tensor(ov.element.f32, Int32Array.from(inputLayer.shape), tensorData);\n\nconst inferRequest = compiledModel.createInferRequest();\ninferRequest.setInputTensor(tensor);\ninferRequest.infer();\n\nconst output = inferRequest.getTensor(outputLayer);\nconst { data: boxes } = output;\n"
"const tensorData = new Float32Array(inputImage);\nconst tensor = new ov.Tensor(ov.element.f32, Int32Array.from(inputLayer.shape), tensorData);\n\nconst inferRequest = compiledModel.createInferRequest();\ninferRequest.setInputTensor(tensor);\ninferRequest.inferSync();\n\nconst output = inferRequest.getTensor(outputLayer);\nconst { data: boxes } = output;\n"
],
"outputs": []
},
Expand Down
Loading

0 comments on commit 224d17a

Please sign in to comment.