From e76a3e8f333055335b0d1928f8f9e85b1c07023b Mon Sep 17 00:00:00 2001 From: David Li Date: Sat, 4 Nov 2017 01:56:05 +0800 Subject: [PATCH] Fix formatting for all jsons (#1965) --- .../data-plane/Common/BasicRegions.json | 56 +- .../data-plane/Common/ExtendedRegions.json | 70 +- .../data-plane/Common/Parameters.json | 74 +- .../ComputerVision/v1.0/ComputerVision.json | 2676 ++++----- .../examples/SuccessfulAnalyzeWithUrl.json | 228 +- .../SuccessfulDomainModelWithUrl.json | 62 +- .../examples/SuccessfulGenerateThumbnail.json | 31 +- .../SuccessfulGetTextOperationResult.json | 530 +- .../examples/SuccessfulListDomainModels.json | 48 +- .../v1.0/examples/SuccessfulOcrWithUrl.json | 142 +- .../SuccessfulRecognizeTextWithUrl.json | 28 +- .../EntitySearch/v1.0/EntitySearch.json | 12 +- .../data-plane/Face/v1.0/Face.json | 4804 ++++++++--------- .../TextAnalytics/v2.0/TextAnalytics.json | 850 +-- .../examples/SuccessfulKeyPhrasesRequest.json | 118 +- .../examples/SuccessfulLanguagesRequest.json | 142 +- .../examples/SuccessfulSentimentRequest.json | 102 +- 17 files changed, 4988 insertions(+), 4985 deletions(-) diff --git a/specification/cognitiveservices/data-plane/Common/BasicRegions.json b/specification/cognitiveservices/data-plane/Common/BasicRegions.json index dd40f021d2d5..a02c385efae1 100644 --- a/specification/cognitiveservices/data-plane/Common/BasicRegions.json +++ b/specification/cognitiveservices/data-plane/Common/BasicRegions.json @@ -1,31 +1,31 @@ { - "swagger": "2.0", - "info": { - "version": "2017-08-30", - "title": "Common Referenced Parameters File", - "description": "File containing commonly referenced parameters." - }, - "paths": {}, - "parameters": { - "AzureRegion": { - "name": "AzureRegion", - "description": "Supported Azure regions for Cognitive Services endpoints", - "x-ms-parameter-location": "client", - "required": true, - "type": "string", - "in": "path", - "x-ms-skip-url-encoding": true, - "x-ms-enum": { - "name": "AzureRegions", - "modelAsString": false - }, - "enum": [ - "westus", - "westeurope", - "southeastasia", - "eastus2", - "westcentralus" - ] - } + "swagger": "2.0", + "info": { + "version": "2017-08-30", + "title": "Common Referenced Parameters File", + "description": "File containing commonly referenced parameters." + }, + "paths": {}, + "parameters": { + "AzureRegion": { + "name": "AzureRegion", + "description": "Supported Azure regions for Cognitive Services endpoints", + "x-ms-parameter-location": "client", + "required": true, + "type": "string", + "in": "path", + "x-ms-skip-url-encoding": true, + "x-ms-enum": { + "name": "AzureRegions", + "modelAsString": false + }, + "enum": [ + "westus", + "westeurope", + "southeastasia", + "eastus2", + "westcentralus" + ] } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/Common/ExtendedRegions.json b/specification/cognitiveservices/data-plane/Common/ExtendedRegions.json index 33249508e45d..f84af7057dd3 100644 --- a/specification/cognitiveservices/data-plane/Common/ExtendedRegions.json +++ b/specification/cognitiveservices/data-plane/Common/ExtendedRegions.json @@ -1,38 +1,38 @@ { - "swagger": "2.0", - "info": { - "version": "2017-08-30", - "title": "Common Referenced Parameters File", - "description": "File containing commonly referenced parameters." - }, - "paths": {}, - "parameters": { - "AzureRegion": { - "name": "AzureRegion", - "description": "Supported Azure regions for Cognitive Services endpoints", - "x-ms-parameter-location": "client", - "required": true, - "type": "string", - "in": "path", - "x-ms-skip-url-encoding": true, - "x-ms-enum": { - "name": "AzureRegions", - "modelAsString": false - }, - "enum": [ - "westus", - "westeurope", - "southeastasia", - "eastus2", - "westcentralus", - "westus2", - "eastus", - "southcentralus", - "northeurope", - "eastasia", - "australiaeast", - "brazilsouth" - ] - } + "swagger": "2.0", + "info": { + "version": "2017-08-30", + "title": "Common Referenced Parameters File", + "description": "File containing commonly referenced parameters." + }, + "paths": {}, + "parameters": { + "AzureRegion": { + "name": "AzureRegion", + "description": "Supported Azure regions for Cognitive Services endpoints", + "x-ms-parameter-location": "client", + "required": true, + "type": "string", + "in": "path", + "x-ms-skip-url-encoding": true, + "x-ms-enum": { + "name": "AzureRegions", + "modelAsString": false + }, + "enum": [ + "westus", + "westeurope", + "southeastasia", + "eastus2", + "westcentralus", + "westus2", + "eastus", + "southcentralus", + "northeurope", + "eastasia", + "australiaeast", + "brazilsouth" + ] } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/Common/Parameters.json b/specification/cognitiveservices/data-plane/Common/Parameters.json index efc1d4288d58..05038c300269 100644 --- a/specification/cognitiveservices/data-plane/Common/Parameters.json +++ b/specification/cognitiveservices/data-plane/Common/Parameters.json @@ -1,46 +1,46 @@ { - "swagger": "2.0", - "info": { - "version": "2017-08-30", - "title": "Common Referenced Parameters File", - "description": "File containing commonly referenced parameters." - }, - "paths": {}, - "parameters": { - "ImageStream": { - "name": "Image", - "in": "body", - "required": true, - "x-ms-parameter-location": "method", - "description": "An image stream.", - "schema": { - "type": "object", - "format": "file" - } - }, - "ImageUrl": { - "name": "ImageUrl", - "in": "body", - "required": true, - "x-ms-parameter-location": "method", - "x-ms-client-flatten": true, - "description": "A JSON document with a URL pointing to the image that is to be analyzed.", - "schema": { - "$ref": "#/definitions/ImageUrl" - } - } + "swagger": "2.0", + "info": { + "version": "2017-08-30", + "title": "Common Referenced Parameters File", + "description": "File containing commonly referenced parameters." + }, + "paths": {}, + "parameters": { + "ImageStream": { + "name": "Image", + "in": "body", + "required": true, + "x-ms-parameter-location": "method", + "description": "An image stream.", + "schema": { + "type": "object", + "format": "file" + } }, - "definitions": { - "ImageUrl": { + "ImageUrl": { + "name": "ImageUrl", + "in": "body", + "required": true, + "x-ms-parameter-location": "method", + "x-ms-client-flatten": true, + "description": "A JSON document with a URL pointing to the image that is to be analyzed.", + "schema": { + "$ref": "#/definitions/ImageUrl" + } + } + }, + "definitions": { + "ImageUrl": { "type": "object", "required": [ - "url" + "url" ], "properties": { - "url": { - "type": "string" - } + "url": { + "type": "string" + } } - } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/ComputerVision.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/ComputerVision.json index c3e3b74c5ac1..864bc1fde0e9 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/ComputerVision.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/ComputerVision.json @@ -1,1408 +1,1408 @@ { - "swagger": "2.0", - "info": { - "version": "1.0", - "title": "Computer Vision API", - "description": "The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively." - }, - "securityDefinitions": { - "apim_key": { - "type": "apiKey", - "name": "Ocp-Apim-Subscription-Key", - "in": "header" + "swagger": "2.0", + "info": { + "version": "1.0", + "title": "Computer Vision API", + "description": "The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively." + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + { + "apim_key": [] + } + ], + "x-ms-parameterized-host": { + "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", + "parameters": [ + { + "$ref": "../../Common/ExtendedRegions.json#/parameters/AzureRegion" + } + ] + }, + "basePath": "/vision/v1.0", + "schemes": [ + "https" + ], + "paths": { + "/models": { + "get": { + "description": "This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "ListModels", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "List of available domain models.", + "schema": { + "$ref": "#/definitions/ListModelsResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful List Domains request": { + "$ref": "./examples/SuccessfulListDomainModels.json" + } } + } }, - "security": [ - { - "apim_key": [] - } - ], - "x-ms-parameterized-host": { - "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", + "/analyze?overload=url": { + "post": { + "description": "This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response.", + "operationId": "AnalyzeImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "parameters": [ - { - "$ref": "../../Common/ExtendedRegions.json#/parameters/AzureRegion" + { + "$ref": "#/parameters/VisualFeatures" + }, + { + "name": "details", + "in": "query", + "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image.", + "type": "array", + "required": false, + "collectionFormat": "csv", + "items": { + "type": "string", + "x-nullable": false, + "x-ms-enum": { + "name": "Details", + "modelAsString": false + }, + "enum": [ + "Celebrities", + "Landmarks" + ] } - ] - }, - "basePath": "/vision/v1.0", - "schemes": [ - "https" - ], - "paths": { - "/models": { - "get": { - "description": "This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", - "operationId": "ListModels", - "produces": [ - "application/json" - ], - "responses": { - "200": { - "description": "List of available domain models.", - "schema": { - "$ref": "#/definitions/ListModelsResult" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful List Domains request": { - "$ref": "./examples/SuccessfulListDomainModels.json" - } - } + }, + { + "name": "language", + "in": "query", + "description": "A string indicating which language to return. The service will return recognition results in specified language. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.zh - Simplified Chinese.", + "type": "string", + "required": false, + "default": "en", + "x-nullable": false, + "x-ms-enum": { + "name": "Language", + "modelAsString": false + }, + "enum": [ + "en", + "zh" + ] + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The response include the extracted features in JSON format.Here is the definitions for enumeration typesClipartTypeNon-clipart = 0, ambiguous = 1, normal-clipart = 2, good-clipart = 3.LineDrawingTypeNon-LineDrawing = 0,LineDrawing = 1.", + "schema": { + "$ref": "#/definitions/ImageAnalysis" } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } }, - "/analyze?overload=url": { - "post": { - "description": "This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response.", - "operationId": "AnalyzeImage", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "$ref": "#/parameters/VisualFeatures" - }, - { - "name": "details", - "in": "query", - "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image.", - "type": "array", - "required": false, - "collectionFormat": "csv", - "items": { - "type": "string", - "x-nullable": false, - "x-ms-enum": { - "name": "Details", - "modelAsString": false - }, - "enum": [ - "Celebrities", - "Landmarks" - ] - } - }, - { - "name": "language", - "in": "query", - "description": "A string indicating which language to return. The service will return recognition results in specified language. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.zh - Simplified Chinese.", - "type": "string", - "required": false, - "default": "en", - "x-nullable": false, - "x-ms-enum": { - "name": "Language", - "modelAsString": false - }, - "enum": [ - "en", - "zh" - ] - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - } - ], - "responses": { - "200": { - "description": "The response include the extracted features in JSON format.Here is the definitions for enumeration typesClipartTypeNon-clipart = 0, ambiguous = 1, normal-clipart = 2, good-clipart = 3.LineDrawingTypeNon-LineDrawing = 0,LineDrawing = 1.", - "schema": { - "$ref": "#/definitions/ImageAnalysis" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Analyze with Url request": { - "$ref": "./examples/SuccessfulAnalyzeWithUrl.json" - } - } + "x-ms-examples": { + "Successful Analyze with Url request": { + "$ref": "./examples/SuccessfulAnalyzeWithUrl.json" + } + } + } + }, + "/generateThumbnail?overload=url": { + "post": { + "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", + "operationId": "GenerateThumbnail", + "consumes": [ + "application/json" + ], + "produces": [ + "application/octet-stream" + ], + "parameters": [ + { + "name": "width", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "name": "height", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "name": "smartCropping", + "type": "boolean", + "in": "query", + "required": false, + "default": false, + "description": "Boolean flag for enabling smart cropping." + } + ], + "responses": { + "200": { + "description": "The generated thumbnail in binary format.", + "schema": { + "type": "file" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } + } }, - "/generateThumbnail?overload=url": { - "post": { - "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", - "operationId": "GenerateThumbnail", - "consumes": [ - "application/json" - ], - "produces": [ - "application/octet-stream" - ], - "parameters": [ - { - "name": "width", - "type": "integer", - "in": "query", - "required": true, - "minimum": 1, - "maximum": 1023, - "description": "Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." - }, - { - "name": "height", - "type": "integer", - "in": "query", - "required": true, - "minimum": 1, - "maximum": 1023, - "description": "Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - }, - { - "name": "smartCropping", - "type": "boolean", - "in": "query", - "required": false, - "default": false, - "description": "Boolean flag for enabling smart cropping." - } - ], - "responses": { - "200": { - "description": "The generated thumbnail in binary format.", - "schema": { - "type": "file" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Generate Thumbnail request": { - "$ref": "./examples/SuccessfulGenerateThumbnail.json" - } - } + "x-ms-examples": { + "Successful Generate Thumbnail request": { + "$ref": "./examples/SuccessfulGenerateThumbnail.json" + } + } + } + }, + "/ocr?overload=url": { + "post": { + "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", + "operationId": "RecognizePrintedText", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/DetectOrientation" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/OcrLanguage" + } + ], + "responses": { + "200": { + "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words.textAngleThe angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "schema": { + "$ref": "#/definitions/OcrResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } + } }, - "/ocr?overload=url": { - "post": { - "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", - "operationId": "RecognizePrintedText", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "$ref": "#/parameters/DetectOrientation" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - }, - { - "$ref": "#/parameters/OcrLanguage" - } - ], - "responses": { - "200": { - "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words.textAngleThe angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", - "schema": { - "$ref": "#/definitions/OcrResult" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Ocr request": { - "$ref": "./examples/SuccessfulOcrWithUrl.json" - } - } + "x-ms-examples": { + "Successful Ocr request": { + "$ref": "./examples/SuccessfulOcrWithUrl.json" + } + } + } + }, + "/describe?overload=url": { + "post": { + "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DescribeImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "maxCandidates", + "in": "query", + "description": "Maximum number of candidate descriptions to be returned. The default is 1.", + "type": "string", + "required": false, + "default": "1" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Image description object.", + "schema": { + "$ref": "#/definitions/ImageDescription" } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } }, - "/describe?overload=url": { - "post": { - "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", - "operationId": "DescribeImage", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "maxCandidates", - "in": "query", - "description": "Maximum number of candidate descriptions to be returned. The default is 1.", - "type": "string", - "required": false, - "default": "1" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - } - ], - "responses": { - "200": { - "description": "Image description object.", - "schema": { - "$ref": "#/definitions/ImageDescription" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Describe request": { - "$ref": "./examples/SuccessfulDescribeWithUrl.json" - } - } + "x-ms-examples": { + "Successful Describe request": { + "$ref": "./examples/SuccessfulDescribeWithUrl.json" + } + } + } + }, + "/tag?overload=url": { + "post": { + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag “cello” may be accompanied by the hint “musical instrument”. All tags are in English.", + "operationId": "TagImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Image tags object.", + "schema": { + "$ref": "#/definitions/TagResult" } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } }, - "/tag?overload=url": { - "post": { - "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag “cello” may be accompanied by the hint “musical instrument”. All tags are in English.", - "operationId": "TagImage", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - } - ], - "responses": { - "200": { - "description": "Image tags object.", - "schema": { - "$ref": "#/definitions/TagResult" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Tag request": { - "$ref": "./examples/SuccessfulTagWithUrl.json" - } - } + "x-ms-examples": { + "Successful Tag request": { + "$ref": "./examples/SuccessfulTagWithUrl.json" + } + } + } + }, + "/models/{model}/analyze?overload=url": { + "post": { + "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageByDomain", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "model", + "in": "path", + "description": "The domain-specific content to recognize.", + "required": true, + "type": "string", + "x-nullable": false, + "x-ms-enum": { + "name": "DomainModels", + "modelAsString": false + }, + "enum": [ + "Celebrities", + "Landmarks" + ] + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Analysis result based on the domain model", + "schema": { + "$ref": "#/definitions/DomainModelResults" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } + } }, - "/models/{model}/analyze?overload=url": { - "post": { - "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", - "operationId": "AnalyzeImageByDomain", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "model", - "in": "path", - "description": "The domain-specific content to recognize.", - "required": true, - "type": "string", - "x-nullable": false, - "x-ms-enum": { - "name": "DomainModels", - "modelAsString": false - }, - "enum": [ - "Celebrities", - "Landmarks" - ] - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - } - ], - "responses": { - "200": { - "description": "Analysis result based on the domain model", - "schema": { - "$ref": "#/definitions/DomainModelResults" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Domain Model analysis request": { - "$ref": "./examples/SuccessfulDomainModelWithUrl.json" - } - } + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulDomainModelWithUrl.json" + } + } + } + }, + "/recognizeText?overload=url": { + "post": { + "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called “Operation-Location”. The “Operation-Location” field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", + "operationId": "RecognizeText", + "parameters": [ + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/HandwritingBoolean" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "responses": { + "202": { + "description": "The service has accepted the request and will start processing later. It will return Accepted immediately and include an Operation-Location header. Client side should further query the operation status using the URL specified in this header. The operation ID will expire in 48 hours.", + "headers": { + "Operation-Location": { + "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", + "type": "string" + } } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } }, - "/recognizeText?overload=url": { - "post": { - "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called “Operation-Location”. The “Operation-Location” field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", - "operationId": "RecognizeText", - "parameters": [ - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - }, - { - "$ref": "#/parameters/HandwritingBoolean" - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "responses": { - "202": { - "description": "The service has accepted the request and will start processing later. It will return Accepted immediately and include an Operation-Location header. Client side should further query the operation status using the URL specified in this header. The operation ID will expire in 48 hours.", - "headers": { - "Operation-Location": { - "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", - "type": "string" - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Domain Model analysis request": { - "$ref": "./examples/SuccessfulRecognizeTextWithUrl.json" - } - } + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulRecognizeTextWithUrl.json" + } + } + } + }, + "/textOperations/{operationId}": { + "get": { + "description": "This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface.", + "operationId": "GetTextOperationResult", + "parameters": [ + { + "name": "operationId", + "in": "path", + "description": "Id of the text operation returned in the response of the 'Recognize Handwritten Text'", + "required": true, + "type": "string" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Returns the operation status.", + "schema": { + "$ref": "#/definitions/TextOperationResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } + } }, - "/textOperations/{operationId}": { - "get": { - "description": "This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface.", - "operationId": "GetTextOperationResult", - "parameters": [ - { - "name": "operationId", - "in": "path", - "description": "Id of the text operation returned in the response of the 'Recognize Handwritten Text'", - "required": true, - "type": "string" - } - ], - "produces": [ - "application/json" - ], - "responses": { - "200": { - "description": "Returns the operation status.", - "schema": { - "$ref": "#/definitions/TextOperationResult" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - }, - "x-ms-examples": { - "Successful Domain Model analysis request": { - "$ref": "./examples/SuccessfulGetTextOperationResult.json" - } - } + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulGetTextOperationResult.json" + } + } + } + } + }, + "x-ms-paths": { + "/analyze?overload=stream": { + "post": { + "description": "This operation extracts a rich set of visual features based on the image content.", + "operationId": "AnalyzeImageInStream", + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/VisualFeatures" + }, + { + "name": "details", + "in": "query", + "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image.", + "type": "string", + "required": false, + "enum": [ + "Celebrities", + "Landmarks" + ] + }, + { + "name": "language", + "in": "query", + "description": "A string indicating which language to return. The service will return recognition results in specified language. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.zh - Simplified Chinese.", + "type": "string", + "required": false, + "default": "en", + "enum": [ + "en", + "zh" + ] + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The response include the extracted features in JSON format. Here is the definitions for enumeration types clipart = 0, ambiguous = 1, normal-clipart = 2, good-clipart = 3. Non-LineDrawing = 0,LineDrawing = 1.", + "schema": { + "$ref": "#/definitions/ImageAnalysis" } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } } + } }, - "x-ms-paths": { - "/analyze?overload=stream": { - "post": { - "description": "This operation extracts a rich set of visual features based on the image content.", - "operationId": "AnalyzeImageInStream", - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "$ref": "#/parameters/VisualFeatures" - }, - { - "name": "details", - "in": "query", - "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image.", - "type": "string", - "required": false, - "enum": [ - "Celebrities", - "Landmarks" - ] - }, - { - "name": "language", - "in": "query", - "description": "A string indicating which language to return. The service will return recognition results in specified language. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.zh - Simplified Chinese.", - "type": "string", - "required": false, - "default": "en", - "enum": [ - "en", - "zh" - ] - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "responses": { - "200": { - "description": "The response include the extracted features in JSON format. Here is the definitions for enumeration types clipart = 0, ambiguous = 1, normal-clipart = 2, good-clipart = 3. Non-LineDrawing = 0,LineDrawing = 1.", - "schema": { - "$ref": "#/definitions/ImageAnalysis" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + "/generateThumbnail?overload=stream": { + "post": { + "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", + "operationId": "GenerateThumbnailInStream", + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/octet-stream", + "application/json" + ], + "parameters": [ + { + "name": "width", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "name": "height", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "name": "smartCropping", + "type": "boolean", + "in": "query", + "required": false, + "default": false, + "description": "Boolean flag for enabling smart cropping." + } + ], + "responses": { + "200": { + "description": "The generated thumbnail in binary format.", + "schema": { + "type": "file" } - }, - "/generateThumbnail?overload=stream": { - "post": { - "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", - "operationId": "GenerateThumbnailInStream", - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/octet-stream", - "application/json" - ], - "parameters": [ - { - "name": "width", - "type": "integer", - "in": "query", - "required": true, - "minimum": 1, - "maximum": 1023, - "description": "Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." - }, - { - "name": "height", - "type": "integer", - "in": "query", - "required": true, - "minimum": 1, - "maximum": 1023, - "description": "Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - }, - { - "name": "smartCropping", - "type": "boolean", - "in": "query", - "required": false, - "default": false, - "description": "Boolean flag for enabling smart cropping." - } - ], - "responses": { - "200": { - "description": "The generated thumbnail in binary format.", - "schema": { - "type": "file" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } - }, - "/ocr?overload=stream": { - "post": { - "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", - "operationId": "RecognizePrintedTextInStream", - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "$ref": "#/parameters/OcrLanguage" - }, - { - "$ref": "#/parameters/DetectOrientation" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "responses": { - "200": { - "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words. The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", - "schema": { - "$ref": "#/definitions/OcrResult" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + } + } + } + }, + "/ocr?overload=stream": { + "post": { + "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", + "operationId": "RecognizePrintedTextInStream", + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/OcrLanguage" + }, + { + "$ref": "#/parameters/DetectOrientation" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words. The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "schema": { + "$ref": "#/definitions/OcrResult" } - }, - "/describe?overload=stream": { - "post": { - "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", - "operationId": "DescribeImageInStream", - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "maxCandidates", - "in": "query", - "description": "Maximum number of candidate descriptions to be returned. The default is 1.", - "type": "string", - "required": false, - "default": "1" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "responses": { - "200": { - "description": "Image description object.", - "schema": { - "$ref": "#/definitions/ImageDescription" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } - }, - "/tag?overload=stream": { - "post": { - "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag “cello” may be accompanied by the hint “musical instrument”. All tags are in English.", - "operationId": "TagImageInStream", - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "responses": { - "200": { - "description": "Image tags object.", - "schema": { - "$ref": "#/definitions/TagResult" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + } + } + } + }, + "/describe?overload=stream": { + "post": { + "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DescribeImageInStream", + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "maxCandidates", + "in": "query", + "description": "Maximum number of candidate descriptions to be returned. The default is 1.", + "type": "string", + "required": false, + "default": "1" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Image description object.", + "schema": { + "$ref": "#/definitions/ImageDescription" } - }, - "/models/{model}/analyze?overload=stream": { - "post": { - "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", - "operationId": "AnalyzeImageByDomainInStream", - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "model", - "in": "path", - "description": "The domain-specific content to recognize.", - "required": true, - "type": "string" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "responses": { - "200": { - "description": "Analysis result based on the domain model", - "schema": { - "$ref": "#/definitions/DomainModelResults" - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } - }, - "/recognizeText?overload=stream": { - "post": { - "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called “Operation-Location”. The “Operation-Location” field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", - "operationId": "RecognizeTextInStream", - "parameters": [ - { - "$ref": "#/parameters/HandwritingBoolean" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "consumes": [ - "application/octet-stream" - ], - "produces": [ - "application/json" - ], - "responses": { - "202": { - "description": "The service has accepted the request and will start processing later.", - "headers": { - "Operation-Location": { - "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", - "type": "string" - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/ComputerVisionError" - } - } - } + } + } + } + }, + "/tag?overload=stream": { + "post": { + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag “cello” may be accompanied by the hint “musical instrument”. All tags are in English.", + "operationId": "TagImageInStream", + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Image tags object.", + "schema": { + "$ref": "#/definitions/TagResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } + } } + } }, - "definitions": { - "TextOperationResult": { - "type": "object", - "properties": { - "status": { - "type": "string", - "description": "Status of the text operation.", - "enum": [ - "Not Started", - "Running", - "Failed", - "Succeeded" - ], - "x-ms-enum": { - "name": "TextOperationStatusCodes", - "modelAsString": false - }, - "x-nullable": false - }, - "recognitionResult": { - "$ref": "#/definitions/RecognitionResult" - } + "/models/{model}/analyze?overload=stream": { + "post": { + "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageByDomainInStream", + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "model", + "in": "path", + "description": "The domain-specific content to recognize.", + "required": true, + "type": "string" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Analysis result based on the domain model", + "schema": { + "$ref": "#/definitions/DomainModelResults" } - }, - "RecognitionResult": { - "type": "object", - "properties": { - "lines": { - "type": "array", - "items": { - "$ref": "#/definitions/Line" - } - } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } - }, - "Line": { - "type": "object", - "properties": { - "boundingBox": { - "$ref": "#/definitions/BoundingBox" - }, - "text": { - "type": "string" - }, - "words": { - "type": "array", - "items": { - "$ref": "#/definitions/Word" - } - } + } + } + } + }, + "/recognizeText?overload=stream": { + "post": { + "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called “Operation-Location”. The “Operation-Location” field contains the URL that you must use for your Get Handwritten Text Operation Result operation.", + "operationId": "RecognizeTextInStream", + "parameters": [ + { + "$ref": "#/parameters/HandwritingBoolean" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "responses": { + "202": { + "description": "The service has accepted the request and will start processing later.", + "headers": { + "Operation-Location": { + "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", + "type": "string" + } } - }, - "Word": { - "type": "object", - "properties": { - "boundingBox": { - "$ref": "#/definitions/BoundingBox" - }, - "text": { - "type": "string" - } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" } + } + } + } + } + }, + "definitions": { + "TextOperationResult": { + "type": "object", + "properties": { + "status": { + "type": "string", + "description": "Status of the text operation.", + "enum": [ + "Not Started", + "Running", + "Failed", + "Succeeded" + ], + "x-ms-enum": { + "name": "TextOperationStatusCodes", + "modelAsString": false + }, + "x-nullable": false }, - "BoundingBox": { - "type": "array", - "items": { - "type": "integer", - "x-nullable": false - } + "recognitionResult": { + "$ref": "#/definitions/RecognitionResult" + } + } + }, + "RecognitionResult": { + "type": "object", + "properties": { + "lines": { + "type": "array", + "items": { + "$ref": "#/definitions/Line" + } + } + } + }, + "Line": { + "type": "object", + "properties": { + "boundingBox": { + "$ref": "#/definitions/BoundingBox" }, - "ImageAnalysis": { - "type": "object", - "description": "Result of AnalyzeImage operation.", - "properties": { - "categories": { - "type": "array", - "description": "An array indicating identified categories.", - "items": { - "$ref": "#/definitions/Category" - } - }, - "adult": { - "description": "A property scoring on whether the image is adult-oriented and/or racy.", - "$ref": "#/definitions/AdultInfo" - }, - "color": { - "description": "A property scoring on color spectrums.", - "$ref": "#/definitions/ColorInfo" - }, - "imageType": { - "description": "A property indicating type of image (whether it's clipart or line drawing)", - "$ref": "#/definitions/ImageType" - }, - "tags": { - "type": "array", - "description": "A list of tags with confidence level.", - "items": { - "$ref": "#/definitions/ImageTag" - } - }, - "description": { - "description": "Description of the image.", - "$ref": "#/definitions/ImageDescriptionDetails" - }, - "faces": { - "type": "array", - "description": "An array of possible faces within the image.", - "items": { - "$ref": "#/definitions/FaceDescription" - } - }, - "requestId": { - "type": "string", - "description": "Id of the request for tracking purposes." - }, - "metadata": { - "description": "Image metadata", - "$ref": "#/definitions/ImageMetadata" - } - } + "text": { + "type": "string" }, - "OcrResult": { - "type": "object", - "properties": { - "language": { - "$ref": "#/definitions/OcrResult" - }, - "textAngle": { - "type": "number", - "format": "double", - "description": "The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly." - }, - "orientation": { - "type": "string", - "description": "Orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property)." - }, - "regions": { - "type": "array", - "description": "An array of objects, where each object represents a region of recognized text.", - "items": { - "$ref": "#/definitions/OcrRegion" - } - } - } + "words": { + "type": "array", + "items": { + "$ref": "#/definitions/Word" + } + } + } + }, + "Word": { + "type": "object", + "properties": { + "boundingBox": { + "$ref": "#/definitions/BoundingBox" }, - "OcrRegion": { - "type": "object", - "description": "A region consists of multiple lines (e.g. a column of text in a multi-column document).", - "properties": { - "boundingBox": { - "type": "string", - "description": "Bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." - }, - "lines": { - "type": "array", - "items": { - "$ref": "#/definitions/OcrLine" - } - } - } + "text": { + "type": "string" + } + } + }, + "BoundingBox": { + "type": "array", + "items": { + "type": "integer", + "x-nullable": false + } + }, + "ImageAnalysis": { + "type": "object", + "description": "Result of AnalyzeImage operation.", + "properties": { + "categories": { + "type": "array", + "description": "An array indicating identified categories.", + "items": { + "$ref": "#/definitions/Category" + } }, - "OcrLine": { - "type": "object", - "description": "An object describing a single recognized line of text.", - "properties": { - "boundingBox": { - "type": "string", - "description": "Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." - }, - "words": { - "type": "array", - "description": "An array of objects, where each object represents a recognized word.", - "items": { - "$ref": "#/definitions/OcrWord" - } - } - } + "adult": { + "description": "A property scoring on whether the image is adult-oriented and/or racy.", + "$ref": "#/definitions/AdultInfo" }, - "OcrWord": { - "type": "object", - "description": "Information on a recognized word.", - "properties": { - "boundingBox": { - "type": "string", - "description": "Bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." - }, - "text": { - "type": "string", - "description": "String value of a recognized word." - } - } + "color": { + "description": "A property scoring on color spectrums.", + "$ref": "#/definitions/ColorInfo" }, - "ListModelsResult": { - "type": "object", - "description": "Result of the List Domain Models operation.", - "properties": { - "models": { - "type": "array", - "readOnly": true, - "description": "An array of supported models.", - "items": { - "$ref": "#/definitions/ModelDescription" - } - } - } + "imageType": { + "description": "A property indicating type of image (whether it's clipart or line drawing)", + "$ref": "#/definitions/ImageType" }, - "ModelDescription": { - "type": "object", - "description": "An object describing supported model by name and categories.", - "properties": { - "name": { - "type": "string" - }, - "categories": { - "type": "array", - "items": { - "type": "string" - } - } - } + "tags": { + "type": "array", + "description": "A list of tags with confidence level.", + "items": { + "$ref": "#/definitions/ImageTag" + } }, - "DomainModelResults": { - "type": "object", - "description": "Result of image analysis using a specific domain model including additional metadata.", - "properties": { - "result": { - "description": "Result of the image analysis.", - "x-ms-client-flatten": true, - "$ref": "#/definitions/DomainModelResult" - }, - "requestId": { - "type": "string", - "description": "Id of the REST API request." - }, - "metadata": { - "description": "Additional image metadata", - "$ref": "#/definitions/ImageMetadata" - } - } + "description": { + "description": "Description of the image.", + "$ref": "#/definitions/ImageDescriptionDetails" }, - "DomainModelResult": { - "type": "object", - "properties": { - "celebrities": { - "type": "array", - "description": "An array of possible celebritied identified in the image.", - "items": { - "$ref": "#/definitions/CelebritiesModel" - } - } - } + "faces": { + "type": "array", + "description": "An array of possible faces within the image.", + "items": { + "$ref": "#/definitions/FaceDescription" + } }, - "ImageDescription": { - "type": "object", - "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", - "properties": { - "description": { - "x-ms-client-flatten": true, - "$ref": "#/definitions/ImageDescriptionDetails" - } - } + "requestId": { + "type": "string", + "description": "Id of the request for tracking purposes." }, - "TagResult": { - "type": "object", - "description": "The results of a image tag operation, including any tags and image metadata.", - "properties": { - "tags": { - "type": "array", - "description": "A list of tags with confidence level.", - "items": { - "$ref": "#/definitions/ImageTag" - } - }, - "requestId": { - "type": "string", - "description": "Id of the REST API request." - }, - "metadata": { - "description": "Image metadata", - "$ref": "#/definitions/ImageMetadata" - } - } + "metadata": { + "description": "Image metadata", + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "OcrResult": { + "type": "object", + "properties": { + "language": { + "$ref": "#/definitions/OcrResult" }, - "ImageDescriptionDetails": { - "type": "object", - "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", - "properties": { - "tags": { - "type": "array", - "description": "A collection of image tags.", - "items": { - "type": "string" - } - }, - "captions": { - "type": "array", - "description": "A list of captions, sorted by confidence level.", - "items": { - "$ref": "#/definitions/ImageCaption" - } - }, - "requestId": { - "type": "string", - "description": "Id of the REST API request." - }, - "metadata": { - "description": "Image metadata", - "$ref": "#/definitions/ImageMetadata" - } - } + "textAngle": { + "type": "number", + "format": "double", + "description": "The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly." }, - "ImageCaption": { - "type": "object", - "description": "An image caption, i.e. a brief description of what the image depicts.", - "properties": { - "text": { - "type": "string", - "description": "The text of the caption" - }, - "confidence": { - "type": "number", - "format": "double", - "description": "The level of confidence the service has in the caption" - } - } + "orientation": { + "type": "string", + "description": "Orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property)." }, - "ImageTag": { - "type": "object", - "description": "An image caption, i.e. a brief description of what the image depicts.", - "properties": { - "name": { - "type": "string", - "description": "The tag value" - }, - "confidence": { - "type": "number", - "format": "double", - "description": "The level of confidence the service has in the caption" - } - } + "regions": { + "type": "array", + "description": "An array of objects, where each object represents a region of recognized text.", + "items": { + "$ref": "#/definitions/OcrRegion" + } + } + } + }, + "OcrRegion": { + "type": "object", + "description": "A region consists of multiple lines (e.g. a column of text in a multi-column document).", + "properties": { + "boundingBox": { + "type": "string", + "description": "Bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." }, - "ImageMetadata": { - "type": "object", - "description": "Image metadata", - "properties": { - "width": { - "type": "integer", - "format": "int32", - "description": "Image width" - }, - "height": { - "type": "integer", - "format": "int32", - "description": "Image height" - }, - "format": { - "type": "string", - "description": "Image format" - } - } + "lines": { + "type": "array", + "items": { + "$ref": "#/definitions/OcrLine" + } + } + } + }, + "OcrLine": { + "type": "object", + "description": "An object describing a single recognized line of text.", + "properties": { + "boundingBox": { + "type": "string", + "description": "Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." }, - "CelebritiesModel": { - "type": "object", - "description": "An object describing possible celebrity identification.", - "properties": { - "name": { - "type": "string", - "description": "Name of the celebrity." - }, - "confidence": { - "type": "number", - "format": "double", - "description": "Level of confidence ranging from 0 to 1." - }, - "faceRectangle": { - "$ref": "#/definitions/FaceRectangle" - } - } + "words": { + "type": "array", + "description": "An array of objects, where each object represents a recognized word.", + "items": { + "$ref": "#/definitions/OcrWord" + } + } + } + }, + "OcrWord": { + "type": "object", + "description": "Information on a recognized word.", + "properties": { + "boundingBox": { + "type": "string", + "description": "Bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." }, - "FaceRectangle": { - "type": "object", - "description": "An object describing face rectangle.", - "properties": { - "left": { - "type": "integer", - "description": "X-coordinate of the top left point of the face." - }, - "top": { - "type": "integer", - "description": "Y-coordinate of the top left point of the face." - }, - "width": { - "type": "integer", - "description": "Width measured from the top-left point of the face." - }, - "height": { - "type": "integer", - "description": "Height measured from the top-left point of the face." - } - } + "text": { + "type": "string", + "description": "String value of a recognized word." + } + } + }, + "ListModelsResult": { + "type": "object", + "description": "Result of the List Domain Models operation.", + "properties": { + "models": { + "type": "array", + "readOnly": true, + "description": "An array of supported models.", + "items": { + "$ref": "#/definitions/ModelDescription" + } + } + } + }, + "ModelDescription": { + "type": "object", + "description": "An object describing supported model by name and categories.", + "properties": { + "name": { + "type": "string" }, - "FaceDescription": { - "type": "object", - "description": "An object describing a face identified in the image.", - "properties": { - "age": { - "type": "integer", - "description": "Possible age of the face." - }, - "gender": { - "type": "string", - "description": "Possible gender of the face.", - "enum": [ - "Male", - "Female" - ] - }, - "faceRectangle": { - "$ref": "#/definitions/FaceRectangle" - } - } + "categories": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "DomainModelResults": { + "type": "object", + "description": "Result of image analysis using a specific domain model including additional metadata.", + "properties": { + "result": { + "description": "Result of the image analysis.", + "x-ms-client-flatten": true, + "$ref": "#/definitions/DomainModelResult" }, - "ImageType": { - "type": "object", - "description": "An object providing possible image types and matching confidence levels.", - "properties": { - "clipArtType": { - "type": "number", - "description": "Confidence level that the image is a clip art." - }, - "lineDrawingType": { - "type": "number", - "description": "Confidence level that the image is a line drawing." - } - } + "requestId": { + "type": "string", + "description": "Id of the REST API request." }, - "ColorInfo": { - "type": "object", - "description": "An object providing additional metadata describing color attributes.", - "properties": { - "dominantColorForeground": { - "type": "string", - "description": "Possible dominant foreground color." - }, - "dominantColorBackground": { - "type": "string", - "description": "Possible dominant background color." - }, - "dominantColors": { - "type": "array", - "description": "An array of possible dominant colors.", - "items": { - "type": "string" - } - }, - "accentColor": { - "type": "string", - "description": "Possible accent color." - }, - "isBWImg": { - "type": "boolean", - "description": "A value indicating if the image is black and white." - } - } + "metadata": { + "description": "Additional image metadata", + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "DomainModelResult": { + "type": "object", + "properties": { + "celebrities": { + "type": "array", + "description": "An array of possible celebritied identified in the image.", + "items": { + "$ref": "#/definitions/CelebritiesModel" + } + } + } + }, + "ImageDescription": { + "type": "object", + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "properties": { + "description": { + "x-ms-client-flatten": true, + "$ref": "#/definitions/ImageDescriptionDetails" + } + } + }, + "TagResult": { + "type": "object", + "description": "The results of a image tag operation, including any tags and image metadata.", + "properties": { + "tags": { + "type": "array", + "description": "A list of tags with confidence level.", + "items": { + "$ref": "#/definitions/ImageTag" + } }, - "AdultInfo": { - "type": "object", - "description": "An object describing whether the image contains adult-oriented content and/or is racy.", - "properties": { - "isAdultContent": { - "type": "boolean", - "x-nullable" : false, - "description": "A value indicating if the image contains adult-oriented content." - }, - "isRacyContent": { - "type": "boolean", - "x-nullable" : false, - "description": "A value indicating if the image is race." - }, - "adultScore": { - "type": "number", - "format": "double", - "x-nullable" : false, - "description": "Score from 0 to 1 that indicates how much of adult content is within the image." - }, - "racyScore": { - "type": "number", - "format": "double", - "x-nullable" : false, - "description": "Score from 0 to 1 that indicates how suggestive is the image." - } - } + "requestId": { + "type": "string", + "description": "Id of the REST API request." }, - "Category": { - "type": "object", - "description": "An object describing identified category.", - "properties": { - "name": { - "type": "string", - "description": "Name of the category." - }, - "score": { - "type": "number", - "format": "double", - "description": "Scoring of the category." - }, - "detail": { - "type": "object", - "description": "Additional category detail if available.", - "$ref": "#/definitions/CategoryDetail" - } - } + "metadata": { + "description": "Image metadata", + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "ImageDescriptionDetails": { + "type": "object", + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "properties": { + "tags": { + "type": "array", + "description": "A collection of image tags.", + "items": { + "type": "string" + } }, - "CategoryDetail": { - "type": "object", - "description": "An object describing additional category details.", - "properties": { - "celebrities": { - "type": "array", - "description": "An array of celebrities if any identified.", - "items": { - "$ref": "#/definitions/CelebritiesModel" - } - } - } + "captions": { + "type": "array", + "description": "A list of captions, sorted by confidence level.", + "items": { + "$ref": "#/definitions/ImageCaption" + } }, - "ComputerVisionError": { - "type": "object", - "required": [ - "code", - "message" - ], - "properties": { - "code": { - "type": "string", - "description": "The error code.", - "enum": [ - "InvalidImageUrl", - "InvalidImageFormat", - "InvalidImageSize", - "NotSupportedVisualFeature", - "NotSupportedImage", - "InvalidDetails", - "NotSupportedLanguage", - "BadArgument", - "FailedToProcess", - "Timeout", - "InternalServerError", - "Unspecified", - "StorageException" - ], - "x-ms-enum": { - "name": "ComputerVisionErrorCodes", - "modelAsString": false - } - }, - "message": { - "type": "string", - "description": "A message explaining the error reported by the service." - }, - "requestId": { - "type": "string", - "description": "A unique request identifier." - } - } + "requestId": { + "type": "string", + "description": "Id of the REST API request." + }, + "metadata": { + "description": "Image metadata", + "$ref": "#/definitions/ImageMetadata" } + } }, - "parameters": { - "VisualFeatures": { - "name": "visualFeatures", - "in": "query", - "description": "A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected.", - "type": "array", - "x-ms-parameter-location": "method", - "required": false, - "collectionFormat": "csv", - "items": { - "type": "string", - "x-nullable": false, - "x-ms-enum": { - "name": "VisualFeatureTypes", - "modelAsString": false - }, - "enum": [ - "ImageType", - "Faces", - "Adult", - "Categories", - "Color", - "Tags", - "Description" - ] - } + "ImageCaption": { + "type": "object", + "description": "An image caption, i.e. a brief description of what the image depicts.", + "properties": { + "text": { + "type": "string", + "description": "The text of the caption" }, - "OcrLanguage": { - "name": "language", - "in": "query", - "description": "The BCP-47 language code of the text to be detected in the image. The default value is 'unk'", - "type": "string", - "required": false, - "x-ms-parameter-location": "method", - "x-nullable": false, - "x-ms-enum": { - "name": "OcrLanguages", - "modelAsString": false - }, - "default": "unk", - "enum": [ - "unk", - "zh-Hans", - "zh-Hant", - "cs", - "da", - "nl", - "en", - "fi", - "fr", - "de", - "el", - "hu", - "it", - "ja", - "ko", - "nb", - "pl", - "pt", - "ru", - "es", - "sv", - "tr", - "ar", - "ro", - "sr-Cyrl", - "sr-Latn", - "sk" - ] + "confidence": { + "type": "number", + "format": "double", + "description": "The level of confidence the service has in the caption" + } + } + }, + "ImageTag": { + "type": "object", + "description": "An image caption, i.e. a brief description of what the image depicts.", + "properties": { + "name": { + "type": "string", + "description": "The tag value" }, - "DetectOrientation": { - "name": "detectOrientation", - "in": "query", - "description": "Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). ", - "required": true, - "x-ms-parameter-location": "method", - "type": "boolean", - "default": true + "confidence": { + "type": "number", + "format": "double", + "description": "The level of confidence the service has in the caption" + } + } + }, + "ImageMetadata": { + "type": "object", + "description": "Image metadata", + "properties": { + "width": { + "type": "integer", + "format": "int32", + "description": "Image width" }, - "HandwritingBoolean": { - "name": "detectHandwriting", - "in": "query", - "description": "If “true” is specified, handwriting recognition is performed. If this parameter is set to “false” or is not specified, printed text recognition is performed.", - "required": false, - "x-ms-parameter-location": "method", - "type": "boolean", - "default": false + "height": { + "type": "integer", + "format": "int32", + "description": "Image height" + }, + "format": { + "type": "string", + "description": "Image format" + } + } + }, + "CelebritiesModel": { + "type": "object", + "description": "An object describing possible celebrity identification.", + "properties": { + "name": { + "type": "string", + "description": "Name of the celebrity." + }, + "confidence": { + "type": "number", + "format": "double", + "description": "Level of confidence ranging from 0 to 1." + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle" } + } + }, + "FaceRectangle": { + "type": "object", + "description": "An object describing face rectangle.", + "properties": { + "left": { + "type": "integer", + "description": "X-coordinate of the top left point of the face." + }, + "top": { + "type": "integer", + "description": "Y-coordinate of the top left point of the face." + }, + "width": { + "type": "integer", + "description": "Width measured from the top-left point of the face." + }, + "height": { + "type": "integer", + "description": "Height measured from the top-left point of the face." + } + } + }, + "FaceDescription": { + "type": "object", + "description": "An object describing a face identified in the image.", + "properties": { + "age": { + "type": "integer", + "description": "Possible age of the face." + }, + "gender": { + "type": "string", + "description": "Possible gender of the face.", + "enum": [ + "Male", + "Female" + ] + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle" + } + } + }, + "ImageType": { + "type": "object", + "description": "An object providing possible image types and matching confidence levels.", + "properties": { + "clipArtType": { + "type": "number", + "description": "Confidence level that the image is a clip art." + }, + "lineDrawingType": { + "type": "number", + "description": "Confidence level that the image is a line drawing." + } + } + }, + "ColorInfo": { + "type": "object", + "description": "An object providing additional metadata describing color attributes.", + "properties": { + "dominantColorForeground": { + "type": "string", + "description": "Possible dominant foreground color." + }, + "dominantColorBackground": { + "type": "string", + "description": "Possible dominant background color." + }, + "dominantColors": { + "type": "array", + "description": "An array of possible dominant colors.", + "items": { + "type": "string" + } + }, + "accentColor": { + "type": "string", + "description": "Possible accent color." + }, + "isBWImg": { + "type": "boolean", + "description": "A value indicating if the image is black and white." + } + } + }, + "AdultInfo": { + "type": "object", + "description": "An object describing whether the image contains adult-oriented content and/or is racy.", + "properties": { + "isAdultContent": { + "type": "boolean", + "x-nullable": false, + "description": "A value indicating if the image contains adult-oriented content." + }, + "isRacyContent": { + "type": "boolean", + "x-nullable": false, + "description": "A value indicating if the image is race." + }, + "adultScore": { + "type": "number", + "format": "double", + "x-nullable": false, + "description": "Score from 0 to 1 that indicates how much of adult content is within the image." + }, + "racyScore": { + "type": "number", + "format": "double", + "x-nullable": false, + "description": "Score from 0 to 1 that indicates how suggestive is the image." + } + } + }, + "Category": { + "type": "object", + "description": "An object describing identified category.", + "properties": { + "name": { + "type": "string", + "description": "Name of the category." + }, + "score": { + "type": "number", + "format": "double", + "description": "Scoring of the category." + }, + "detail": { + "type": "object", + "description": "Additional category detail if available.", + "$ref": "#/definitions/CategoryDetail" + } + } + }, + "CategoryDetail": { + "type": "object", + "description": "An object describing additional category details.", + "properties": { + "celebrities": { + "type": "array", + "description": "An array of celebrities if any identified.", + "items": { + "$ref": "#/definitions/CelebritiesModel" + } + } + } + }, + "ComputerVisionError": { + "type": "object", + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "string", + "description": "The error code.", + "enum": [ + "InvalidImageUrl", + "InvalidImageFormat", + "InvalidImageSize", + "NotSupportedVisualFeature", + "NotSupportedImage", + "InvalidDetails", + "NotSupportedLanguage", + "BadArgument", + "FailedToProcess", + "Timeout", + "InternalServerError", + "Unspecified", + "StorageException" + ], + "x-ms-enum": { + "name": "ComputerVisionErrorCodes", + "modelAsString": false + } + }, + "message": { + "type": "string", + "description": "A message explaining the error reported by the service." + }, + "requestId": { + "type": "string", + "description": "A unique request identifier." + } + } + } + }, + "parameters": { + "VisualFeatures": { + "name": "visualFeatures", + "in": "query", + "description": "A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected.", + "type": "array", + "x-ms-parameter-location": "method", + "required": false, + "collectionFormat": "csv", + "items": { + "type": "string", + "x-nullable": false, + "x-ms-enum": { + "name": "VisualFeatureTypes", + "modelAsString": false + }, + "enum": [ + "ImageType", + "Faces", + "Adult", + "Categories", + "Color", + "Tags", + "Description" + ] + } + }, + "OcrLanguage": { + "name": "language", + "in": "query", + "description": "The BCP-47 language code of the text to be detected in the image. The default value is 'unk'", + "type": "string", + "required": false, + "x-ms-parameter-location": "method", + "x-nullable": false, + "x-ms-enum": { + "name": "OcrLanguages", + "modelAsString": false + }, + "default": "unk", + "enum": [ + "unk", + "zh-Hans", + "zh-Hant", + "cs", + "da", + "nl", + "en", + "fi", + "fr", + "de", + "el", + "hu", + "it", + "ja", + "ko", + "nb", + "pl", + "pt", + "ru", + "es", + "sv", + "tr", + "ar", + "ro", + "sr-Cyrl", + "sr-Latn", + "sk" + ] + }, + "DetectOrientation": { + "name": "detectOrientation", + "in": "query", + "description": "Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). ", + "required": true, + "x-ms-parameter-location": "method", + "type": "boolean", + "default": true + }, + "HandwritingBoolean": { + "name": "detectHandwriting", + "in": "query", + "description": "If “true” is specified, handwriting recognition is performed. If this parameter is set to “false” or is not specified, printed text recognition is performed.", + "required": false, + "x-ms-parameter-location": "method", + "type": "boolean", + "default": false } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulAnalyzeWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulAnalyzeWithUrl.json index cdba1be88c6e..7421bbd6fc7b 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulAnalyzeWithUrl.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulAnalyzeWithUrl.json @@ -1,120 +1,120 @@ { - "parameters": { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": "{API key}", - "visualFeatures": "Categories,Adult,Tags,Description,Faces,Color,ImageType", - "details": "Celebrities", - "language": "en", - "body": { - "url": "{Image Url here}" - } - }, - "responses": { - "200": { - "headers": {}, - "body": { - "categories": [ - { - "name": "abstract_", - "score": 0.00390625 - }, - { - "name": "people_", - "score": 0.83984375, - "detail": { - "celebrities": [ - { - "name": "Satya Nadella", - "faceRectangle": { - "left": 597, - "top": 162, - "width": 248, - "height": 248 - }, - "confidence": 0.999028444 - } - ], - "landmarks": [ - { - "name": "Forbidden City", - "confidence": 0.9978346 - } - ] - } - } - ], - "adult": { - "isAdultContent": false, - "isRacyContent": false, - "adultScore": 0.0934349000453949, - "racyScore": 0.068613491952419281 - }, - "tags": [ - { - "name": "person", - "confidence": 0.98979085683822632 - }, - { - "name": "man", - "confidence": 0.94493889808654785 - }, - { - "name": "outdoor", - "confidence": 0.938492476940155 - }, - { - "name": "window", - "confidence": 0.89513939619064331 - } - ], - "description": { - "tags": [ - "person", - "man", - "outdoor", - "window", - "glasses" - ], - "captions": [ - { - "text": "Satya Nadella sitting on a bench", - "confidence": 0.48293603002174407 - } - ] - }, - "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", - "metadata": { - "width": 1500, - "height": 1000, - "format": "Jpeg" - }, - "faces": [ - { - "age": 44, - "gender": "Male", - "faceRectangle": { - "left": 593, - "top": 160, - "width": 250, - "height": 250 - } - } - ], - "color": { - "dominantColorForeground": "Brown", - "dominantColorBackground": "Brown", - "dominantColors": [ - "Brown", - "Black" - ], - "accentColor": "873B59", - "isBWImg": false - }, - "imageType": { - "clipArtType": 0, - "lineDrawingType": 0 + "parameters": { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": "{API key}", + "visualFeatures": "Categories,Adult,Tags,Description,Faces,Color,ImageType", + "details": "Celebrities", + "language": "en", + "body": { + "url": "{Image Url here}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "categories": [ + { + "name": "abstract_", + "score": 0.00390625 + }, + { + "name": "people_", + "score": 0.83984375, + "detail": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ], + "landmarks": [ + { + "name": "Forbidden City", + "confidence": 0.9978346 } + ] + } + } + ], + "adult": { + "isAdultContent": false, + "isRacyContent": false, + "adultScore": 0.0934349000453949, + "racyScore": 0.068613491952419281 + }, + "tags": [ + { + "name": "person", + "confidence": 0.98979085683822632 + }, + { + "name": "man", + "confidence": 0.94493889808654785 + }, + { + "name": "outdoor", + "confidence": 0.938492476940155 + }, + { + "name": "window", + "confidence": 0.89513939619064331 + } + ], + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + } + ] + }, + "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "faces": [ + { + "age": 44, + "gender": "Male", + "faceRectangle": { + "left": 593, + "top": 160, + "width": 250, + "height": 250 } + } + ], + "color": { + "dominantColorForeground": "Brown", + "dominantColorBackground": "Brown", + "dominantColors": [ + "Brown", + "Black" + ], + "accentColor": "873B59", + "isBWImg": false + }, + "imageType": { + "clipArtType": 0, + "lineDrawingType": 0 } + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulDomainModelWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulDomainModelWithUrl.json index be85863f21d8..f55a0c2dd911 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulDomainModelWithUrl.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulDomainModelWithUrl.json @@ -1,36 +1,36 @@ { - "parameters": { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": "{API key}", - "Model": "Celebrities", - "body": { - "url": "{Image Url here}" - } - }, - "responses": { - "200": { - "body": { - "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", - "metadata": { - "width": 1500, - "height": 1000, - "format": "Jpeg" - }, - "result": { - "celebrities": [ - { - "name": "Satya Nadella", - "faceRectangle": { - "left": 597, - "top": 162, - "width": 248, - "height": 248 - }, - "confidence": 0.999028444 - } - ] - } + "parameters": { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": "{API key}", + "Model": "Celebrities", + "body": { + "url": "{Image Url here}" + } + }, + "responses": { + "200": { + "body": { + "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "result": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 } + ] } + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGenerateThumbnail.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGenerateThumbnail.json index 66a31ca34b7f..369dd2f57ace 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGenerateThumbnail.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGenerateThumbnail.json @@ -1,19 +1,18 @@ { - "parameters": { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": "{API key}", - "width": "500", - "height": "500", - "smartCropping": true, - "body": { - "url": "{Image Url here}" - } - }, - "responses": { - "200": { - "headers": { - }, - "body": "{Binary}" - } + "parameters": { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": "{API key}", + "width": "500", + "height": "500", + "smartCropping": true, + "body": { + "url": "{Image Url here}" } + }, + "responses": { + "200": { + "headers": {}, + "body": "{Binary}" + } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGetTextOperationResult.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGetTextOperationResult.json index e7bc74e262a6..538741cae352 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGetTextOperationResult.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulGetTextOperationResult.json @@ -1,271 +1,271 @@ { - "parameters": { - "Ocp-Apim-Subscription-Key": "{API key}", - "operationId": "49a36324-fc4b-4387-aa06-090cfbf0064f" - }, - "responses": { - "200": { - "header": {}, - "body": { - "status": "Succeeded", - "recognitionResult": { - "lines": [ - { - "boundingBox": [ - 202, - 618, - 2047, - 643, - 2046, - 840, - 200, - 813 - ], - "text": "Our greatest glory is not", - "words": [ - { - "boundingBox": [ - 204, - 627, - 481, - 628, - 481, - 830, - 204, - 829 - ], - "text": "Our" - }, - { - "boundingBox": [ - 519, - 628, - 1057, - 630, - 1057, - 832, - 518, - 830 - ], - "text": "greatest" - }, - { - "boundingBox": [ - 1114, - 630, - 1549, - 631, - 1548, - 833, - 1114, - 832 - ], - "text": "glory" - }, - { - "boundingBox": [ - 1586, - 631, - 1785, - 632, - 1784, - 834, - 1586, - 833 - ], - "text": "is" - }, - { - "boundingBox": [ - 1822, - 632, - 2115, - 633, - 2115, - 835, - 1822, - 834 - ], - "text": "not" - } - ] - }, - { - "boundingBox": [ - 420, - 1273, - 2954, - 1250, - 2958, - 1488, - 422, - 1511 - ], - "text": "but in rising every time we fall", - "words": [ - { - "boundingBox": [ - 423, - 1269, - 634, - 1268, - 635, - 1507, - 424, - 1508 - ], - "text": "but" - }, - { - "boundingBox": [ - 667, - 1268, - 808, - 1268, - 809, - 1506, - 668, - 1507 - ], - "text": "in" - }, - { - "boundingBox": [ - 874, - 1267, - 1289, - 1265, - 1290, - 1504, - 875, - 1506 - ], - "text": "rising" - }, - { - "boundingBox": [ - 1331, - 1265, - 1771, - 1263, - 1772, - 1502, - 1332, - 1504 - ], - "text": "every" - }, - { - "boundingBox": [ - 1812, - 1263, - 2178, - 1261, - 2179, - 1500, - 1813, - 1502 - ], - "text": "time" - }, - { - "boundingBox": [ - 2219, - 1261, - 2510, - 1260, - 2511, - 1498, - 2220, - 1500 - ], - "text": "we" - }, - { - "boundingBox": [ - 2551, - 1260, - 3016, - 1258, - 3017, - 1496, - 2552, - 1498 - ], - "text": "fall" - } - ] - }, - { - "boundingBox": [ - 1612, - 903, - 2744, - 935, - 2738, - 1139, - 1607, - 1107 - ], - "text": "in never failing ,", - "words": [ - { - "boundingBox": [ - 1611, - 934, - 1707, - 933, - 1708, - 1147, - 1613, - 1147 - ], - "text": "in" - }, - { - "boundingBox": [ - 1753, - 933, - 2132, - 930, - 2133, - 1144, - 1754, - 1146 - ], - "text": "never" - }, - { - "boundingBox": [ - 2162, - 930, - 2673, - 927, - 2674, - 1140, - 2164, - 1144 - ], - "text": "failing" - }, - { - "boundingBox": [ - 2703, - 926, - 2788, - 926, - 2790, - 1139, - 2705, - 1140 - ], - "text": "," - } - ] - } - ] + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "operationId": "49a36324-fc4b-4387-aa06-090cfbf0064f" + }, + "responses": { + "200": { + "header": {}, + "body": { + "status": "Succeeded", + "recognitionResult": { + "lines": [ + { + "boundingBox": [ + 202, + 618, + 2047, + 643, + 2046, + 840, + 200, + 813 + ], + "text": "Our greatest glory is not", + "words": [ + { + "boundingBox": [ + 204, + 627, + 481, + 628, + 481, + 830, + 204, + 829 + ], + "text": "Our" + }, + { + "boundingBox": [ + 519, + 628, + 1057, + 630, + 1057, + 832, + 518, + 830 + ], + "text": "greatest" + }, + { + "boundingBox": [ + 1114, + 630, + 1549, + 631, + 1548, + 833, + 1114, + 832 + ], + "text": "glory" + }, + { + "boundingBox": [ + 1586, + 631, + 1785, + 632, + 1784, + 834, + 1586, + 833 + ], + "text": "is" + }, + { + "boundingBox": [ + 1822, + 632, + 2115, + 633, + 2115, + 835, + 1822, + 834 + ], + "text": "not" } + ] + }, + { + "boundingBox": [ + 420, + 1273, + 2954, + 1250, + 2958, + 1488, + 422, + 1511 + ], + "text": "but in rising every time we fall", + "words": [ + { + "boundingBox": [ + 423, + 1269, + 634, + 1268, + 635, + 1507, + 424, + 1508 + ], + "text": "but" + }, + { + "boundingBox": [ + 667, + 1268, + 808, + 1268, + 809, + 1506, + 668, + 1507 + ], + "text": "in" + }, + { + "boundingBox": [ + 874, + 1267, + 1289, + 1265, + 1290, + 1504, + 875, + 1506 + ], + "text": "rising" + }, + { + "boundingBox": [ + 1331, + 1265, + 1771, + 1263, + 1772, + 1502, + 1332, + 1504 + ], + "text": "every" + }, + { + "boundingBox": [ + 1812, + 1263, + 2178, + 1261, + 2179, + 1500, + 1813, + 1502 + ], + "text": "time" + }, + { + "boundingBox": [ + 2219, + 1261, + 2510, + 1260, + 2511, + 1498, + 2220, + 1500 + ], + "text": "we" + }, + { + "boundingBox": [ + 2551, + 1260, + 3016, + 1258, + 3017, + 1496, + 2552, + 1498 + ], + "text": "fall" + } + ] + }, + { + "boundingBox": [ + 1612, + 903, + 2744, + 935, + 2738, + 1139, + 1607, + 1107 + ], + "text": "in never failing ,", + "words": [ + { + "boundingBox": [ + 1611, + 934, + 1707, + 933, + 1708, + 1147, + 1613, + 1147 + ], + "text": "in" + }, + { + "boundingBox": [ + 1753, + 933, + 2132, + 930, + 2133, + 1144, + 1754, + 1146 + ], + "text": "never" + }, + { + "boundingBox": [ + 2162, + 930, + 2673, + 927, + 2674, + 1140, + 2164, + 1144 + ], + "text": "failing" + }, + { + "boundingBox": [ + 2703, + 926, + 2788, + 926, + 2790, + 1139, + 2705, + 1140 + ], + "text": "," + } + ] } + ] } + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulListDomainModels.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulListDomainModels.json index d25429ae8baf..47cac7ddbc40 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulListDomainModels.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulListDomainModels.json @@ -1,27 +1,27 @@ { - "parameters": { - "Ocp-Apim-Subscription-Key": "{API key}", - "body": {} - }, - "responses": { - "200": { - "headers": {}, - "body": { - "models": [ - { - "name": "celebrities", - "categories": [ - "people_" - ] - }, - { - "name": "landmarks", - "categories": [ - "building_" - ] - } - ] - } - } + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "body": {} + }, + "responses": { + "200": { + "headers": {}, + "body": { + "models": [ + { + "name": "celebrities", + "categories": [ + "people_" + ] + }, + { + "name": "landmarks", + "categories": [ + "building_" + ] + } + ] + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulOcrWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulOcrWithUrl.json index a8700a91923a..838fb4cce107 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulOcrWithUrl.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulOcrWithUrl.json @@ -1,79 +1,79 @@ { - "parameters": { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": "{API key}", - "detectOrientation": "true", + "parameters": { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": "{API key}", + "detectOrientation": "true", + "language": "en", + "body": { + "url": "{Image Url here}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": { "language": "en", - "body": { - "url": "{Image Url here}" - } - }, - "responses": { - "200": { - "headers": {}, - "body": { - "language": "en", - "textAngle": -2.0000000000000338, - "orientation": "Up", - "regions": [ + "textAngle": -2.0000000000000338, + "orientation": "Up", + "regions": [ + { + "boundingBox": "462,379,497,258", + "lines": [ + { + "boundingBox": "462,379,497,74", + "words": [ + { + "boundingBox": "462,379,41,73", + "text": "A" + }, + { + "boundingBox": "523,379,153,73", + "text": "GOAL" + }, + { + "boundingBox": "694,379,265,74", + "text": "WITHOUT" + } + ] + }, + { + "boundingBox": "565,471,289,74", + "words": [ + { + "boundingBox": "565,471,41,73", + "text": "A" + }, + { + "boundingBox": "626,471,150,73", + "text": "PLAN" + }, + { + "boundingBox": "801,472,53,73", + "text": "IS" + } + ] + }, + { + "boundingBox": "519,563,375,74", + "words": [ + { + "boundingBox": "519,563,149,74", + "text": "JUST" + }, + { + "boundingBox": "683,564,41,72", + "text": "A" + }, { - "boundingBox": "462,379,497,258", - "lines": [ - { - "boundingBox": "462,379,497,74", - "words": [ - { - "boundingBox": "462,379,41,73", - "text": "A" - }, - { - "boundingBox": "523,379,153,73", - "text": "GOAL" - }, - { - "boundingBox": "694,379,265,74", - "text": "WITHOUT" - } - ] - }, - { - "boundingBox": "565,471,289,74", - "words": [ - { - "boundingBox": "565,471,41,73", - "text": "A" - }, - { - "boundingBox": "626,471,150,73", - "text": "PLAN" - }, - { - "boundingBox": "801,472,53,73", - "text": "IS" - } - ] - }, - { - "boundingBox": "519,563,375,74", - "words": [ - { - "boundingBox": "519,563,149,74", - "text": "JUST" - }, - { - "boundingBox": "683,564,41,72", - "text": "A" - }, - { - "boundingBox": "741,564,153,73", - "text": "WISH" - } - ] - } - ] + "boundingBox": "741,564,153,73", + "text": "WISH" } ] } - } + ] + } + ] + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulRecognizeTextWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulRecognizeTextWithUrl.json index 502731ae719f..0706d2c3a501 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulRecognizeTextWithUrl.json +++ b/specification/cognitiveservices/data-plane/ComputerVision/v1.0/examples/SuccessfulRecognizeTextWithUrl.json @@ -1,17 +1,17 @@ { - "parameters": { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": "{API key}", - "Handwriting": "true", - "body": { - "url": "{Image Url here}" - } - }, - "responses": { - "202": { - "header": { - "Operation-Location":"https://{domain}/vision/v1.0/textOperations/49a36324-fc4b-4387-aa06-090cfbf0064f" - } - } + "parameters": { + "Content-Type": "application/json", + "Ocp-Apim-Subscription-Key": "{API key}", + "Handwriting": "true", + "body": { + "url": "{Image Url here}" } + }, + "responses": { + "202": { + "header": { + "Operation-Location": "https://{domain}/vision/v1.0/textOperations/49a36324-fc4b-4387-aa06-090cfbf0064f" + } + } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/EntitySearch/v1.0/EntitySearch.json b/specification/cognitiveservices/data-plane/EntitySearch/v1.0/EntitySearch.json index 04ee20594e4a..9c12d3575215 100644 --- a/specification/cognitiveservices/data-plane/EntitySearch/v1.0/EntitySearch.json +++ b/specification/cognitiveservices/data-plane/EntitySearch/v1.0/EntitySearch.json @@ -42,7 +42,9 @@ "EntitySearch" ], "parameters": [ - { "$ref": "#/parameters/x-bingapis-sdk" }, + { + "$ref": "#/parameters/x-bingapis-sdk" + }, { "name": "Accept", "in": "header", @@ -933,13 +935,15 @@ } }, "parameters": { - "x-bingapis-sdk" :{ + "x-bingapis-sdk": { "name": "X-BingApis-SDK", "in": "header", "description": "Activate Swagger compliance", "required": true, "type": "string", - "enum": ["true"], + "enum": [ + "true" + ], "x-ms-enum": { "name": "XBingApisSDK", "modelAsString": true @@ -947,4 +951,4 @@ "x-ms-parameter-location": "method" } } -} +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/Face/v1.0/Face.json b/specification/cognitiveservices/data-plane/Face/v1.0/Face.json index 501250e4143c..2a862af09369 100644 --- a/specification/cognitiveservices/data-plane/Face/v1.0/Face.json +++ b/specification/cognitiveservices/data-plane/Face/v1.0/Face.json @@ -1,2511 +1,2511 @@ { - "swagger": "2.0", - "info": { - "version": "1.0", - "title": "Face API", - "description": "An API for face detection, verification, and identification." - }, - "securityDefinitions": { - "apim_key": { - "type": "apiKey", - "name": "Ocp-Apim-Subscription-Key", - "in": "header" - } - }, - "security": [ - { - "apim_key": [] - } - ], - "x-ms-parameterized-host": { - "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", - "parameters": [ - { - "$ref": "../../Common/ExtendedRegions.json#/parameters/AzureRegion" - } - ] - }, - "basePath": "/face/v1.0", - "schemes": [ - "https" - ], - "paths": { - "/findsimilars": { - "post": { - "description": "Given query face's faceId, find the similar-looking faces from a faceId array or a faceListId.", - "operationId": "Face_FindSimilar", + "swagger": "2.0", + "info": { + "version": "1.0", + "title": "Face API", + "description": "An API for face detection, verification, and identification." + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + { + "apim_key": [] + } + ], + "x-ms-parameterized-host": { + "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/FindSimilarRequest" - } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an array of the most similar faces represented in faceId if the input parameter is faceIds or persistedFaceId if the input parameter is faceListId.", - "schema": { - "$ref": "#/definitions/SimilarFaceResults" - }, - "examples": { - "application/json": [ - { - "persistedFaceId": "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", - "confidence": 0.82 - } - ] + { + "$ref": "../../Common/ExtendedRegions.json#/parameters/AzureRegion" } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" - } - } - }, - "produces": [ - "application/json" ] - } }, - "/group": { - "post": { - "description": "Divide candidate faces into groups based on face similarity.", - "operationId": "Face_Group", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/GroupRequest" + "basePath": "/face/v1.0", + "schemes": [ + "https" + ], + "paths": { + "/findsimilars": { + "post": { + "description": "Given query face's faceId, find the similar-looking faces from a faceId array or a faceListId.", + "operationId": "Face_FindSimilar", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/FindSimilarRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an array of the most similar faces represented in faceId if the input parameter is faceIds or persistedFaceId if the input parameter is faceListId.", + "schema": { + "$ref": "#/definitions/SimilarFaceResults" + }, + "examples": { + "application/json": [ + { + "persistedFaceId": "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", + "confidence": 0.82 + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns one or more groups of similar faces (rank by group size) and a messyGroup.", - "schema": { - "$ref": "#/definitions/GroupResponse" - }, - "examples": { - "application/json": { - "groups": [ - [ - "c5c24a82-6845-4031-9d5d-978df9175426", - "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", - "fce92aed-d578-4d2e-8114-068f8af4492e", - "b64d5e15-8257-4af2-b20a-5a750f8940e7" - ], - [ - "65d083d4-9447-47d1-af30-b626144bf0fb", - "30ea1073-cc9e-4652-b1e3-d08fb7b95315" - ] + }, + "/group": { + "post": { + "description": "Divide candidate faces into groups based on face similarity.", + "operationId": "Face_Group", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/GroupRequest" + } + } + ], + "consumes": [ + "application/json" ], - "messyGroup": [ - "be386ab3-af91-4104-9e6d-4dae4c9fddb7" + "responses": { + "200": { + "description": "A successful call returns one or more groups of similar faces (rank by group size) and a messyGroup.", + "schema": { + "$ref": "#/definitions/GroupResponse" + }, + "examples": { + "application/json": { + "groups": [ + [ + "c5c24a82-6845-4031-9d5d-978df9175426", + "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", + "fce92aed-d578-4d2e-8114-068f8af4492e", + "b64d5e15-8257-4af2-b20a-5a750f8940e7" + ], + [ + "65d083d4-9447-47d1-af30-b626144bf0fb", + "30ea1073-cc9e-4652-b1e3-d08fb7b95315" + ] + ], + "messyGroup": [ + "be386ab3-af91-4104-9e6d-4dae4c9fddb7" + ] + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" ] - } } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "/identify": { + "post": { + "description": "Identify unknown faces from a person group.", + "operationId": "Face_Identify", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/IdentifyRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns the identified candidate person(s) for each query face.", + "schema": { + "$ref": "#/definitions/IdentifyResult" + }, + "examples": { + "application/json": [ + { + "faceId": "c5c24a82-6845-4031-9d5d-978df9175426", + "candidates": [ + { + "personId": "25985303-c537-4467-b41d-bdb45cd95ca1", + "confidence": 0.92 + } + ] + }, + { + "faceId": "65d083d4-9447-47d1-af30-b626144bf0fb", + "candidates": [ + { + "personId": "2ae4935b-9659-44c3-977f-61fac20d0538", + "confidence": 0.89 + } + ] + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - } - }, - "/identify": { - "post": { - "description": "Identify unknown faces from a person group.", - "operationId": "Face_Identify", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/IdentifyRequest" + "/verify": { + "post": { + "description": "Verify whether two faces belong to a same person or whether one face belongs to a person.", + "operationId": "Face_Verify", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/VerifyRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns the verification result.", + "schema": { + "$ref": "#/definitions/VerifyResult" + }, + "examples": { + "application/json": { + "isIdentical": true, + "confidence": 0.9 + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns the identified candidate person(s) for each query face.", - "schema": { - "$ref": "#/definitions/IdentifyResult" - }, - "examples": { - "application/json": [ - { - "faceId": "c5c24a82-6845-4031-9d5d-978df9175426", - "candidates": [ + }, + "/persongroups/{personGroupId}/persons": { + "post": { + "description": "Create a new person in a specified person group.", + "operationId": "Person_Create", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the target person group to create the person.", + "required": true, + "type": "string" + }, { - "personId": "25985303-c537-4467-b41d-bdb45cd95ca1", - "confidence": 0.92 + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/CreatePersonRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns a new personId created.", + "schema": { + "$ref": "#/definitions/CreatePersonResult" + }, + "examples": { + "application/json": { + "personId": "25985303-c537-4467-b41d-bdb45cd95ca1" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } } - ] }, - { - "faceId": "65d083d4-9447-47d1-af30-b626144bf0fb", - "candidates": [ + "produces": [ + "application/json" + ] + }, + "get": { + "description": "List all persons in a person group, and retrieve person information (including personId, name, userData and persistedFaceIds of registered faces of the person).", + "operationId": "Person_List", + "parameters": [ { - "personId": "2ae4935b-9659-44c3-977f-61fac20d0538", - "confidence": 0.89 + "name": "personGroupId", + "in": "path", + "description": "personGroupId of the target person group.", + "required": true, + "type": "string" } - ] - } - ] - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + ], + "responses": { + "200": { + "description": "A successful call returns an array of person information that belong to the person group.", + "schema": { + "$ref": "#/definitions/PersonResults" + }, + "examples": { + "application/json": [ + { + "personId": "25985303-c537-4467-b41d-bdb45cd95ca1", + "name": "Ryan", + "userData": "User-provided data attached to the person", + "persistedFaceIds": [ + "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", + "fce92aed-d578-4d2e-8114-068f8af4492e", + "b64d5e15-8257-4af2-b20a-5a750f8940e7" + ] + }, + { + "personId": "2ae4935b-9659-44c3-977f-61fac20d0538", + "name": "David", + "userData": "User-provided data attached to the person", + "persistedFaceIds": [ + "30ea1073-cc9e-4652-b1e3-d08fb7b95315", + "fbd2a038-dbff-452c-8e79-2ee81b1aa84e" + ] + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - } - }, - "/verify": { - "post": { - "description": "Verify whether two faces belong to a same person or whether one face belongs to a person.", - "operationId": "Face_Verify", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/VerifyRequest" + "/persongroups/{personGroupId}/persons/{personId}": { + "delete": { + "description": "Delete an existing person from a person group. Persisted face images of the person will also be deleted.", + "operationId": "Person_Delete", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "The target personId to delete.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "get": { + "description": "Retrieve a person's information, including registered persisted faces, name and userData.", + "operationId": "Person_Get", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "Specifying the target person.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns the person's information.", + "schema": { + "$ref": "#/definitions/PersonResult" + }, + "examples": { + "application/json": { + "personId": "25985303-c537-4467-b41d-bdb45cd95ca1", + "persistedFaceIds": [ + "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", + "fce92aed-d578-4d2e-8114-068f8af4492e", + "b64d5e15-8257-4af2-b20a-5a750f8940e7" + ], + "name": "Ryan", + "userData": "User-provided data attached to the person" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "patch": { + "description": "Update name or userData of a person.", + "operationId": "Person_Update", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "personId of the target person.", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/CreatePersonRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns the verification result.", - "schema": { - "$ref": "#/definitions/VerifyResult" + }, + "/persongroups/{personGroupId}/persons/{personId}/persistedFaces/{persistedFaceId}": { + "delete": { + "description": "Delete a face from a person. Relative image for the persisted face will also be deleted.", + "operationId": "Person_DeleteFace", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "Specifying the person that the target persisted face belong to.", + "required": true, + "type": "string" + }, + { + "name": "persistedFaceId", + "in": "path", + "description": "The persisted face to remove.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "get": { + "description": "Retrieve information about a persisted face (specified by persistedFaceId, personId and its belonging personGroupId).", + "operationId": "Person_GetFace", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "Specifying the target person that the face belongs to.", + "required": true, + "type": "string" + }, + { + "name": "persistedFaceId", + "in": "path", + "description": "The persistedFaceId of the target persisted face of the person.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns target persisted face's information (persistedFaceId and userData).", + "schema": { + "$ref": "#/definitions/PersonFaceResult" + }, + "examples": { + "application/json": { + "persistedFaceId": "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", + "userData": "User-provided data attached to the person" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] }, - "examples": { - "application/json": { - "isIdentical": true, - "confidence": 0.9 - } + "patch": { + "description": "Update a person persisted face's userData field.", + "operationId": "Person_UpdateFace", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "personId of the target person.", + "required": true, + "type": "string" + }, + { + "name": "persistedFaceId", + "in": "path", + "description": "persistedFaceId of target face, which is persisted and will not expire.", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/UpdatePersonFaceDataRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "/persongroups/{personGroupId}": { + "put": { + "description": "Create a new person group with specified personGroupId, name and user-provided userData.", + "operationId": "PersonGroup_Create", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "User-provided personGroupId as a string.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/CreatePersonGroupRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "delete": { + "description": "Delete an existing person group. Persisted face images of all people in the person group will also be deleted.", + "operationId": "PersonGroup_Delete", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "The personGroupId of the person group to be deleted.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "get": { + "description": "Retrieve the information of a person group, including its name and userData.", + "operationId": "PersonGroup_Get", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "personGroupId of the target person group.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns the person group's information.", + "schema": { + "$ref": "#/definitions/PersonGroupResult" + }, + "examples": { + "application/json": { + "personGroupId": "sample_group", + "name": "group1", + "userData": "User-provided data attached to the person group" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "patch": { + "description": "Update an existing person group's display name and userData. The properties which does not appear in request body will not be updated.", + "operationId": "PersonGroup_Update", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "personGroupId of the person group to be updated.", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/CreatePersonGroupRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups/{personGroupId}/persons": { - "post": { - "description": "Create a new person in a specified person group.", - "operationId": "Person_Create", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the target person group to create the person.", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/CreatePersonRequest" + "/persongroups/{personGroupId}/training": { + "get": { + "description": "Retrieve the training status of a person group (completed or ongoing).", + "operationId": "PersonGroup_GetTrainingStatus", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "personGroupId of target person group.", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns the person group's training status.", + "schema": { + "$ref": "#/definitions/TrainingStatus" + }, + "examples": { + "application/json": { + "status": "succeeded", + "createdDateTime": "1/3/2017 4:11:35 AM", + "lastActionDateTime": null, + "message": null + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns a new personId created.", - "schema": { - "$ref": "#/definitions/CreatePersonResult" - }, - "examples": { - "application/json": { - "personId": "25985303-c537-4467-b41d-bdb45cd95ca1" - } + }, + "/persongroups": { + "get": { + "description": "List person groups and their information.", + "operationId": "PersonGroup_List", + "parameters": [ + { + "name": "start", + "in": "query", + "description": "List person groups from the least personGroupId greater than the \"start\".", + "type": "string", + "maxLength": 64 + }, + { + "name": "top", + "in": "query", + "description": "The number of person groups to list.", + "type": "integer", + "minimum": 1, + "maximum": 1000, + "default": 1000 + } + ], + "responses": { + "200": { + "description": "A successful call returns an array of person groups and their information.", + "schema": { + "$ref": "#/definitions/PersonGroupResults" + }, + "examples": { + "application/json": [ + { + "personGroupId": "sample_group", + "name": "group1", + "userData": "User-provideddataattachedtothepersongroup" + }, + { + "personGroupId": "sample_group2", + "name": "group2", + "userData": "User-provideddataattachedtothepersongroup" + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "/persongroups/{personGroupId}/train": { + "post": { + "description": "Queue a person group training task, the training task may not be started immediately.", + "operationId": "PersonGroup_Train", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Target person group to be trained.", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "The training task was queued successfully." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - }, - "get": { - "description": "List all persons in a person group, and retrieve person information (including personId, name, userData and persistedFaceIds of registered faces of the person).", - "operationId": "Person_List", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "personGroupId of the target person group.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns an array of person information that belong to the person group.", - "schema": { - "$ref": "#/definitions/PersonResults" + "/facelists/{faceListId}": { + "put": { + "description": "Create an empty face list. Up to 64 face lists are allowed to exist in one subscription.", + "operationId": "FaceList_Create", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "Id referencing a particular face list.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/CreateFaceListRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] }, - "examples": { - "application/json": [ - { - "personId": "25985303-c537-4467-b41d-bdb45cd95ca1", - "name": "Ryan", - "userData": "User-provided data attached to the person", - "persistedFaceIds": [ - "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", - "fce92aed-d578-4d2e-8114-068f8af4492e", - "b64d5e15-8257-4af2-b20a-5a750f8940e7" - ] - }, - { - "personId": "2ae4935b-9659-44c3-977f-61fac20d0538", - "name": "David", - "userData": "User-provided data attached to the person", - "persistedFaceIds": [ - "30ea1073-cc9e-4652-b1e3-d08fb7b95315", - "fbd2a038-dbff-452c-8e79-2ee81b1aa84e" - ] - } - ] + "get": { + "description": "Retrieve a face list's information.", + "operationId": "FaceList_Get", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "Id referencing a Face List.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + } + ], + "responses": { + "200": { + "description": "A successful call returns the face list's information.", + "schema": { + "$ref": "#/definitions/GetFaceListResult" + }, + "examples": { + "application/json": { + "faceListId": "sample_list", + "name": "list1", + "userData": "User-provideddataattachedtothefacelist", + "persistedFaces": [ + { + "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBD", + "userData": "User-provideddataattachedtotheface" + } + ] + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "patch": { + "description": "Update information of a face list. ", + "operationId": "FaceList_Update", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "Id referencing a Face List.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + { + "name": "body", + "in": "body", + "required": true, + "x-ms-client-flatten": true, + "schema": { + "$ref": "#/definitions/CreateFaceListRequest" + } + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + }, + "delete": { + "description": "Delete an existing face list according to faceListId. Persisted face images in the face list will also be deleted.", + "operationId": "FaceList_Delete", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "Id referencing a Face List.", + "required": true, + "type": "string", + "pattern": "^[a-z0-9-_]+$", + "maxLength": 64 + } + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "/facelists": { + "get": { + "description": "Retrieve information about all existing face lists. Only faceListId, name and userData will be returned.", + "operationId": "FaceList_List", + "responses": { + "200": { + "description": "A successful call returns an array of faceList.", + "schema": { + "$ref": "#/definitions/FaceListResults" + }, + "examples": { + "application/json": [ + { + "faceListId": "sample_list", + "name": "list1", + "userData": "User-provideddataattachedtothefacelist" + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - } + "/facelists/{faceListId}/persistedFaces/{persistedFaceId}": { + "delete": { + "description": "Delete an existing face from a face list (given by a persisitedFaceId and a faceListId). Persisted image related to the face will also be deleted.", + "operationId": "FaceList_DeleteFace", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "faceListId of an existing face list.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + { + "name": "persistedFaceId", + "in": "path", + "description": "persistedFaceId of an existing face. ", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "A successful call returns an empty response body." + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + } + } }, - "/persongroups/{personGroupId}/persons/{personId}": { - "delete": { - "description": "Delete an existing person from a person group. Persisted face images of the person will also be deleted.", - "operationId": "Person_Delete", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "The target personId to delete.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "x-ms-paths": { + "/detect?overload=url": { + "post": { + "description": "Detect human faces in an image and returns face locations, and optionally with faceIds, landmarks, and attributes.", + "operationId": "Face_Detect", + "parameters": [ + { + "name": "returnFaceId", + "in": "query", + "description": "A value indicating whether the operation should return faceIds of detected faces.", + "type": "boolean", + "default": true + }, + { + "name": "returnFaceLandmarks", + "in": "query", + "description": "A value indicating whether the operation should return landmarks of the detected faces.", + "type": "boolean", + "default": false + }, + { + "name": "returnFaceAttributes", + "in": "query", + "description": "Analyze and return the one or more specified face attributes in the comma-separated string like \"returnFaceAttributes=age,gender\". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost.", + "type": "string" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "consumes": [ + "application/json" + ], + "responses": { + "200": { + "description": "A successful call returns an array of face entries ranked by face rectangle size in descendingorder. An empty response indicates no faces detected. ", + "schema": { + "$ref": "#/definitions/DetectionResult" + }, + "examples": { + "application/json": [ + { + "faceId": "c5c24a82-6845-4031-9d5d-978df9175426", + "faceRectangle": { + "width": 78, + "height": 78, + "left": 394, + "top": 54 + }, + "faceLandmarks": { + "pupilLeft": { + "x": 412, + "y": 78 + }, + "pupilRight": { + "x": 446, + "y": 74 + }, + "noseTip": { + "x": 437, + "y": 92 + }, + "mouthLeft": { + "x": 417, + "y": 114 + }, + "mouthRight": { + "x": 451, + "y": 109 + }, + "eyebrowLeftOuter": { + "x": 397, + "y": 78 + }, + "eyebrowLeftInner": { + "x": 425, + "y": 70 + }, + "eyeLeftOuter": { + "x": 406, + "y": 80 + }, + "eyeLeftTop": { + "x": 412, + "y": 76 + }, + "eyeLeftBottom": { + "x": 413, + "y": 80 + }, + "eyeLeftInner": { + "x": 418, + "y": 78 + }, + "eyebrowRightInner": { + "x": 4, + "y": 69 + }, + "eyebrowRightOuter": { + "x": 5, + "y": 68 + }, + "eyeRightInner": { + "x": 441, + "y": 75 + }, + "eyeRightTop": { + "x": 446, + "y": 71 + }, + "eyeRightBottom": { + "x": 447, + "y": 75 + }, + "eyeRightOuter": { + "x": 451, + "y": 73 + }, + "noseRootLeft": { + "x": 428, + "y": 77 + }, + "noseRootRight": { + "x": 435, + "y": 75 + }, + "noseLeftAlarTop": { + "x": 428, + "y": 89 + }, + "noseRightAlarTop": { + "x": 442, + "y": 87 + }, + "noseLeftAlarOutTip": { + "x": 424, + "y": 96 + }, + "noseRightAlarOutTip": { + "x": 446, + "y": 92 + }, + "upperLipTop": { + "x": 437, + "y": 105 + }, + "upperLipBottom": { + "x": 437, + "y": 108 + }, + "underLipTop": { + "x": 436, + "y": 111 + }, + "underLipBottom": { + "x": 437, + "y": 114 + } + }, + "faceAttributes": { + "age": 71, + "gender": "male", + "smile": 0.88, + "facialHair": { + "mustache": 0.8, + "beard": 0.1, + "sideburns": 0.02 + } + }, + "glasses": "sunglasses", + "headPose": { + "roll": 2.1, + "yaw": 3, + "pitch": 0 + }, + "emotion": { + "anger": 0.575, + "contempt": 0, + "disgust": 0.006, + "fear": 0.008, + "happiness": 0.394, + "neutral": 0.013, + "sadness": 0, + "surprise": 0.004 + } + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - }, - "get": { - "description": "Retrieve a person's information, including registered persisted faces, name and userData.", - "operationId": "Person_Get", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "Specifying the target person.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns the person's information.", - "schema": { - "$ref": "#/definitions/PersonResult" - }, - "examples": { - "application/json": { - "personId": "25985303-c537-4467-b41d-bdb45cd95ca1", - "persistedFaceIds": [ - "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", - "fce92aed-d578-4d2e-8114-068f8af4492e", - "b64d5e15-8257-4af2-b20a-5a750f8940e7" + "/detect?overload=stream": { + "post": { + "description": "Detect human faces in an image and returns face locations, and optionally with faceIds, landmarks, and attributes.", + "operationId": "Face_DetectInStream", + "parameters": [ + { + "name": "returnFaceId", + "in": "query", + "description": "A value indicating whether the operation should return faceIds of detected faces.", + "type": "boolean", + "default": true + }, + { + "name": "returnFaceLandmarks", + "in": "query", + "description": "A value indicating whether the operation should return landmarks of the detected faces.", + "type": "boolean", + "default": false + }, + { + "name": "returnFaceAttributes", + "in": "query", + "description": "Analyze and return the one or more specified face attributes in the comma-separated string like \"returnFaceAttributes=age,gender\". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost.", + "type": "string" + }, + { + "$ref": "../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "consumes": [ + "application/octet-stream" ], - "name": "Ryan", - "userData": "User-provided data attached to the person" - } + "responses": { + "200": { + "description": "A successful call returns an array of face entries ranked by face rectangle size in descendingorder. An empty response indicates no faces detected. ", + "schema": { + "$ref": "#/definitions/DetectionResult" + }, + "examples": { + "application/json": [ + { + "faceId": "c5c24a82-6845-4031-9d5d-978df9175426", + "faceRectangle": { + "width": 78, + "height": 78, + "left": 394, + "top": 54 + }, + "faceLandmarks": { + "pupilLeft": { + "x": 412, + "y": 78 + }, + "pupilRight": { + "x": 446, + "y": 74 + }, + "noseTip": { + "x": 437, + "y": 92 + }, + "mouthLeft": { + "x": 417, + "y": 114 + }, + "mouthRight": { + "x": 451, + "y": 109 + }, + "eyebrowLeftOuter": { + "x": 397, + "y": 78 + }, + "eyebrowLeftInner": { + "x": 425, + "y": 70 + }, + "eyeLeftOuter": { + "x": 406, + "y": 80 + }, + "eyeLeftTop": { + "x": 412, + "y": 76 + }, + "eyeLeftBottom": { + "x": 413, + "y": 80 + }, + "eyeLeftInner": { + "x": 418, + "y": 78 + }, + "eyebrowRightInner": { + "x": 4, + "y": 69 + }, + "eyebrowRightOuter": { + "x": 5, + "y": 68 + }, + "eyeRightInner": { + "x": 441, + "y": 75 + }, + "eyeRightTop": { + "x": 446, + "y": 71 + }, + "eyeRightBottom": { + "x": 447, + "y": 75 + }, + "eyeRightOuter": { + "x": 451, + "y": 73 + }, + "noseRootLeft": { + "x": 428, + "y": 77 + }, + "noseRootRight": { + "x": 435, + "y": 75 + }, + "noseLeftAlarTop": { + "x": 428, + "y": 89 + }, + "noseRightAlarTop": { + "x": 442, + "y": 87 + }, + "noseLeftAlarOutTip": { + "x": 424, + "y": 96 + }, + "noseRightAlarOutTip": { + "x": 446, + "y": 92 + }, + "upperLipTop": { + "x": 437, + "y": 105 + }, + "upperLipBottom": { + "x": 437, + "y": 108 + }, + "underLipTop": { + "x": 436, + "y": 111 + }, + "underLipBottom": { + "x": 437, + "y": 114 + } + }, + "faceAttributes": { + "age": 71, + "gender": "male", + "smile": 0.88, + "facialHair": { + "mustache": 0.8, + "beard": 0.1, + "sideburns": 0.02 + } + }, + "glasses": "sunglasses", + "headPose": { + "roll": 2.1, + "yaw": 3, + "pitch": 0 + }, + "emotion": { + "anger": 0.575, + "contempt": 0, + "disgust": 0.006, + "fear": 0.008, + "happiness": 0.394, + "neutral": 0.013, + "sadness": 0, + "surprise": 0.004 + } + } + ] + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "/persongroups/{personGroupId}/persons/{personId}/persistedFaces?overload=url": { + "post": { + "description": "Add a representative face to a person for identification. The input face is specified as an image with a targetFace rectangle.", + "operationId": "Person_AddFace", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "Target person that the face is added to.", + "required": true, + "type": "string" + }, + { + "name": "userData", + "in": "query", + "description": "User-specified data about the target face to add for any purpose. The maximum length is 1KB. ", + "type": "string" + }, + { + "name": "targetFace", + "in": "query", + "description": "A face rectangle to specify the target face to be added to a person in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. ", + "type": "string" + } + ], + "consumes": [ + "application/json", + "application/octet-stream" + ], + "responses": { + "200": { + "description": "A successful call returns the new persistedFaceId.", + "examples": { + "application/json": { + "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - }, - "patch": { - "description": "Update name or userData of a person.", - "operationId": "Person_Update", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "personId of the target person.", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/CreatePersonRequest" + "/persongroups/{personGroupId}/persons/{personId}/persistedFaces?overload=stream": { + "post": { + "description": "Add a representative face to a person for identification. The input face is specified as an image with a targetFace rectangle.", + "operationId": "Person_AddFaceFromStream", + "parameters": [ + { + "name": "personGroupId", + "in": "path", + "description": "Specifying the person group containing the target person.", + "required": true, + "type": "string" + }, + { + "name": "personId", + "in": "path", + "description": "Target person that the face is added to.", + "required": true, + "type": "string" + }, + { + "name": "userData", + "in": "query", + "description": "User-specified data about the target face to add for any purpose. The maximum length is 1KB. ", + "type": "string" + }, + { + "name": "targetFace", + "in": "query", + "description": "A face rectangle to specify the target face to be added to a person, in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. ", + "type": "string" + } + ], + "consumes": [ + "application/json", + "application/octet-stream" + ], + "responses": { + "200": { + "description": "A successful call returns the new persistedFaceId.", + "examples": { + "application/json": { + "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "/facelists/{faceListId}/persistedFaces?overload=url": { + "post": { + "description": "Add a face to a face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire. ", + "operationId": "FaceList_AddFace", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "Id referencing a Face List.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + { + "name": "userData", + "in": "query", + "description": "User-specified data about the face list for any purpose. The maximum length is 1KB.", + "type": "string" + }, + { + "name": "targetFace", + "in": "query", + "description": "A face rectangle to specify the target face to be added into the face list, in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image.", + "type": "string" + } + ], + "consumes": [ + "application/json", + "application/octet-stream" + ], + "responses": { + "200": { + "description": "A successful call returns a new persistedFaceId.", + "examples": { + "application/json": { + "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] } - } }, - "produces": [ - "application/json" - ] - } + "/facelists/{faceListId}/persistedFaces?overload=stream": { + "post": { + "description": "Add a face to a face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire.", + "operationId": "FaceList_AddFaceFromStream", + "parameters": [ + { + "name": "faceListId", + "in": "path", + "description": "Id referencing a Face List.", + "required": true, + "type": "string", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + { + "name": "userData", + "in": "query", + "description": "User-specified data about the face list for any purpose. The maximum length is 1KB.", + "type": "string" + }, + { + "name": "targetFace", + "in": "query", + "description": "A face rectangle to specify the target face to be added into the face list, in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image.", + "type": "string" + } + ], + "consumes": [ + "application/json", + "application/octet-stream" + ], + "responses": { + "200": { + "description": "A successful call returns a new persistedFaceId.", + "examples": { + "application/json": { + "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/APIError" + } + } + }, + "produces": [ + "application/json" + ] + } + } }, - "/persongroups/{personGroupId}/persons/{personId}/persistedFaces/{persistedFaceId}": { - "delete": { - "description": "Delete a face from a person. Relative image for the persisted face will also be deleted.", - "operationId": "Person_DeleteFace", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "Specifying the person that the target persisted face belong to.", - "required": true, - "type": "string" - }, - { - "name": "persistedFaceId", - "in": "path", - "description": "The persisted face to remove.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "definitions": { + "APIError": { + "type": "object", + "description": "Error information returned by the API", + "properties": { + "error": { + "$ref": "#/definitions/Error" + } } - } }, - "produces": [ - "application/json" - ] - }, - "get": { - "description": "Retrieve information about a persisted face (specified by persistedFaceId, personId and its belonging personGroupId).", - "operationId": "Person_GetFace", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "Specifying the target person that the face belongs to.", - "required": true, - "type": "string" - }, - { - "name": "persistedFaceId", - "in": "path", - "description": "The persistedFaceId of the target persisted face of the person.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns target persisted face's information (persistedFaceId and userData).", - "schema": { - "$ref": "#/definitions/PersonFaceResult" - }, - "examples": { - "application/json": { - "persistedFaceId": "015839fb-fbd9-4f79-ace9-7675fc2f1dd9", - "userData": "User-provided data attached to the person" - } + "Error": { + "type": "object", + "description": "Error body.", + "properties": { + "code": { + "type": "string" + }, + "message": { + "type": "string" + } } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "DetectionResult": { + "type": "array", + "items": { + "$ref": "#/definitions/DetectedFace" } - } }, - "produces": [ - "application/json" - ] - }, - "patch": { - "description": "Update a person persisted face's userData field.", - "operationId": "Person_UpdateFace", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "personId of the target person.", - "required": true, - "type": "string" - }, - { - "name": "persistedFaceId", - "in": "path", - "description": "persistedFaceId of target face, which is persisted and will not expire.", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/UpdatePersonFaceDataRequest" + "DetectedFace": { + "type": "object", + "required": [ + "faceRectangle" + ], + "description": "Detected Face object.", + "properties": { + "faceId": { + "type": "string", + "maxLength": 64 + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle" + }, + "faceLandmarks": { + "$ref": "#/definitions/FaceLandmarks" + } } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "FaceRectangle": { + "type": "object", + "required": [ + "width", + "height", + "left", + "top" + ], + "description": "A rectangle within which a face can be found", + "properties": { + "width": { + "type": "integer", + "format": "int32", + "description": "The width of the rectangle, in pixels." + }, + "height": { + "type": "integer", + "format": "int32", + "description": "The height of the rectangle, in pixels." + }, + "left": { + "type": "integer", + "format": "int32", + "description": "The distance from the left edge if the image to the left edge of the rectangle, in pixels." + }, + "top": { + "type": "integer", + "format": "int32", + "description": "The distance from the top edge if the image to the top edge of the rectangle, in pixels." + } } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups/{personGroupId}": { - "put": { - "description": "Create a new person group with specified personGroupId, name and user-provided userData.", - "operationId": "PersonGroup_Create", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "User-provided personGroupId as a string.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/CreatePersonGroupRequest" + "Position": { + "type": "object", + "required": [ + "x", + "y" + ], + "description": "Coordinates within an image", + "properties": { + "x": { + "type": "integer", + "format": "int32", + "description": "The horizontal component, in pixels." + }, + "y": { + "type": "integer", + "format": "int32", + "description": "The vertical component, in pixels." + } } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "FaceLandmarks": { + "type": "object", + "description": "A collection of 27-point face landmarks pointing to the important positions of face components.", + "properties": { + "pupilLeft": { + "$ref": "#/definitions/Position" + }, + "pupilRight": { + "$ref": "#/definitions/Position" + }, + "noseTip": { + "$ref": "#/definitions/Position" + }, + "mouthLeft": { + "$ref": "#/definitions/Position" + }, + "mouthRight": { + "$ref": "#/definitions/Position" + }, + "eyebrowLeftOuter": { + "$ref": "#/definitions/Position" + }, + "eyebrowLeftInner": { + "$ref": "#/definitions/Position" + }, + "eyeLeftOuter": { + "$ref": "#/definitions/Position" + }, + "eyeLeftTop": { + "$ref": "#/definitions/Position" + }, + "eyeLeftBottom": { + "$ref": "#/definitions/Position" + }, + "eyeLeftInner": { + "$ref": "#/definitions/Position" + }, + "eyebrowRightInner": { + "$ref": "#/definitions/Position" + }, + "eyebrowRightOuter": { + "$ref": "#/definitions/Position" + }, + "eyeRightInner": { + "$ref": "#/definitions/Position" + }, + "eyeRightTop": { + "$ref": "#/definitions/Position" + }, + "eyeRightBottom": { + "$ref": "#/definitions/Position" + }, + "eyeRightOuter": { + "$ref": "#/definitions/Position" + }, + "noseRootLeft": { + "$ref": "#/definitions/Position" + }, + "noseRootRight": { + "$ref": "#/definitions/Position" + }, + "noseLeftAlarTop": { + "$ref": "#/definitions/Position" + }, + "noseRightAlarTop": { + "$ref": "#/definitions/Position" + }, + "noseLeftAlarOutTip": { + "$ref": "#/definitions/Position" + }, + "noseRightAlarOutTip": { + "$ref": "#/definitions/Position" + }, + "upperLipTop": { + "$ref": "#/definitions/Position" + }, + "upperLipBottom": { + "$ref": "#/definitions/Position" + }, + "underLipTop": { + "$ref": "#/definitions/Position" + }, + "underLipBottom": { + "$ref": "#/definitions/Position" + } } - } }, - "produces": [ - "application/json" - ] - }, - "delete": { - "description": "Delete an existing person group. Persisted face images of all people in the person group will also be deleted.", - "operationId": "PersonGroup_Delete", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "The personGroupId of the person group to be deleted.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "FaceAttributes": { + "type": "object", + "description": "Face Attributes", + "properties": { + "age": { + "type": "number", + "description": "Age in years" + }, + "gender": { + "type": "string", + "description": "Gender: male or female", + "enum": [ + "male", + "female" + ] + }, + "smile": { + "type": "number", + "description": "Smile intensity, a number between [0,1] ", + "minimum": 0, + "maximum": 1 + }, + "glasses": { + "type": "string", + "description": "Glasses type. Possible values are 'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles'. ", + "enum": [ + "noGlasses", + "readingGlasses", + "sunglasses", + "swimmingGoggles" + ] + }, + "facialHair": { + "$ref": "#/definitions/FacialHairProperties" + }, + "headPose": { + "$ref": "#/definitions/HeadPoseProperties" + }, + "emotion": { + "$ref": "#/definitions/EmotionProperties" + } } - } }, - "produces": [ - "application/json" - ] - }, - "get": { - "description": "Retrieve the information of a person group, including its name and userData.", - "operationId": "PersonGroup_Get", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "personGroupId of the target person group.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns the person group's information.", - "schema": { - "$ref": "#/definitions/PersonGroupResult" - }, - "examples": { - "application/json": { - "personGroupId": "sample_group", - "name": "group1", - "userData": "User-provided data attached to the person group" - } + "FacialHairProperties": { + "type": "object", + "description": "Properties describing facial hair attributes.", + "properties": { + "mustache": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "beard": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "sideburns": { + "type": "number", + "minimum": 0, + "maximum": 1 + } } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "HeadPoseProperties": { + "type": "object", + "description": "Properties indicating head pose of the face.", + "properties": { + "roll": { + "type": "number" + }, + "yaw": { + "type": "number" + }, + "pitch": { + "type": "number" + } } - } }, - "produces": [ - "application/json" - ] - }, - "patch": { - "description": "Update an existing person group's display name and userData. The properties which does not appear in request body will not be updated.", - "operationId": "PersonGroup_Update", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "personGroupId of the person group to be updated.", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/CreatePersonGroupRequest" + "EmotionProperties": { + "type": "object", + "description": "Properties describing facial emotion.", + "properties": { + "anger": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "contempt": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "disgust": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "fear": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "happiness": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "neutral": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "sadness": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "surprise": { + "type": "number", + "minimum": 0, + "maximum": 1 + } } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + }, + "FindSimilarRequest": { + "type": "object", + "required": [ + "faceId" + ], + "description": "Request body for find similar operation.", + "properties": { + "faceId": { + "type": "string", + "description": "FaceId of the query face. User needs to call Face - Detect first to get a valid faceId. Note that this faceId is not persisted and will expire 24 hours after the detection call", + "maxLength": 64 + }, + "faceListId": { + "type": "string", + "description": "An existing user-specified unique candidate face list, created in Face List - Create a Face List. Face list contains a set of persistedFaceIds which are persisted and will never expire. Parameter faceListId and faceIds should not be provided at the same time", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + "faceIds": { + "type": "array", + "description": "An array of candidate faceIds. All of them are created by Face - Detect and the faceIds will expire 24 hours after the detection call.", + "maxItems": 1000, + "items": { + "type": "string", + "maxLength": 64 + } + }, + "maxNumOfCandidatesReturned": { + "type": "integer", + "description": "The number of top similar faces returned. The valid range is [1, 1000].", + "default": 20, + "minimum": 1, + "maximum": 1000 + }, + "mode": { + "type": "string", + "description": "Similar face searching mode. It can be \"matchPerson\" or \"matchFace\".", + "default": "matchPerson", + "enum": [ + "matchPerson", + "matchFace" + ] + } } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups/{personGroupId}/training": { - "get": { - "description": "Retrieve the training status of a person group (completed or ongoing).", - "operationId": "PersonGroup_GetTrainingStatus", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "personGroupId of target person group.", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns the person group's training status.", - "schema": { - "$ref": "#/definitions/TrainingStatus" - }, - "examples": { - "application/json": { - "status": "succeeded", - "createdDateTime": "1/3/2017 4:11:35 AM", - "lastActionDateTime": null, - "message": null - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "SimilarFaceResults": { + "type": "array", + "items": { + "$ref": "#/definitions/SimilarFaceResult" } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups": { - "get": { - "description": "List person groups and their information.", - "operationId": "PersonGroup_List", - "parameters": [ - { - "name": "start", - "in": "query", - "description": "List person groups from the least personGroupId greater than the \"start\".", - "type": "string", - "maxLength": 64 - }, - { - "name": "top", - "in": "query", - "description": "The number of person groups to list.", - "type": "integer", - "minimum": 1, - "maximum": 1000, - "default": 1000 - } - ], - "responses": { - "200": { - "description": "A successful call returns an array of person groups and their information.", - "schema": { - "$ref": "#/definitions/PersonGroupResults" - }, - "examples": { - "application/json": [ - { - "personGroupId": "sample_group", - "name": "group1", - "userData": "User-provideddataattachedtothepersongroup" - }, - { - "personGroupId": "sample_group2", - "name": "group2", - "userData": "User-provideddataattachedtothepersongroup" + "SimilarFaceResult": { + "type": "object", + "required": [ + "faceId", + "persistedFaceId" + ], + "description": "Response body for find similar face operation.", + "properties": { + "faceId": { + "type": "string", + "description": "faceId of candidate face when find by faceIds. faceId is created by Face - Detect and will expire 24 hours after the detection call", + "maxLength": 64 + }, + "persistedFaceId": { + "type": "string", + "description": "persistedFaceId of candidate face when find by faceListId. persistedFaceId in face list is persisted and will not expire. As showed in below response" + }, + "confidence": { + "type": "number", + "description": "Similarity confidence of the candidate face. The higher confidence, the more similar. Range between [0,1" } - ] - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups/{personGroupId}/train": { - "post": { - "description": "Queue a person group training task, the training task may not be started immediately.", - "operationId": "PersonGroup_Train", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Target person group to be trained.", - "required": true, - "type": "string" - } - ], - "responses": { - "202": { - "description": "The training task was queued successfully." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "GroupRequest": { + "type": "object", + "required": [ + "faceIds" + ], + "description": "Request body for group request.", + "properties": { + "faceIds": { + "type": "array", + "description": "Array of candidate faceId created by Face - Detect. The maximum is 1000 faces", + "maxItems": 1000, + "items": { + "type": "string", + "maxLength": 64 + } + } } - } }, - "produces": [ - "application/json" - ] - } - }, - "/facelists/{faceListId}": { - "put": { - "description": "Create an empty face list. Up to 64 face lists are allowed to exist in one subscription.", - "operationId": "FaceList_Create", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "Id referencing a particular face list.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/CreateFaceListRequest" - } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "GroupResponse": { + "type": "object", + "required": [ + "groups" + ], + "description": "An array of face groups based on face similarity.", + "properties": { + "groups": { + "type": "array", + "description": "A partition of the original faces based on face similarity. Groups are ranked by number of faces", + "items": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "messyGroup": { + "type": "array", + "description": "Face ids array of faces that cannot find any similar faces from original faces.", + "items": { + "type": "string" + } + } } - } }, - "produces": [ - "application/json" - ] - }, - "get": { - "description": "Retrieve a face list's information.", - "operationId": "FaceList_Get", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "Id referencing a Face List.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - } - ], - "responses": { - "200": { - "description": "A successful call returns the face list's information.", - "schema": { - "$ref": "#/definitions/GetFaceListResult" - }, - "examples": { - "application/json": { - "faceListId": "sample_list", - "name": "list1", - "userData": "User-provideddataattachedtothefacelist", - "persistedFaces": [ - { - "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBD", - "userData": "User-provideddataattachedtotheface" - } - ] - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "IdentifyRequest": { + "type": "object", + "required": [ + "personGroupId", + "faceIds" + ], + "description": "Request body for identify face operation.", + "properties": { + "personGroupId": { + "type": "string", + "description": "personGroupId of the target person group, created by PersonGroups.Create" + }, + "faceIds": { + "type": "array", + "description": "Array of candidate faceId created by Face - Detect.", + "maxItems": 1000, + "items": { + "type": "string" + } + }, + "maxNumOfCandidatesReturned": { + "type": "integer", + "description": "The number of top similar faces returned.", + "default": 1, + "minimum": 1, + "maximum": 1000 + }, + "confidenceThreshold": { + "type": "number", + "description": "Confidence threshold of identification, used to judge whether one face belong to one person.", + "minimum": 0, + "maximum": 1 + } } - } }, - "produces": [ - "application/json" - ] - }, - "patch": { - "description": "Update information of a face list. ", - "operationId": "FaceList_Update", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "Id referencing a Face List.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - { - "name": "body", - "in": "body", - "required": true, - "x-ms-client-flatten": true, - "schema": { - "$ref": "#/definitions/CreateFaceListRequest" - } - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "IdentifyResult": { + "type": "array", + "items": { + "$ref": "#/definitions/IdentifyResultItem" } - } }, - "produces": [ - "application/json" - ] - }, - "delete": { - "description": "Delete an existing face list according to faceListId. Persisted face images in the face list will also be deleted.", - "operationId": "FaceList_Delete", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "Id referencing a Face List.", - "required": true, - "type": "string", - "pattern": "^[a-z0-9-_]+$", - "maxLength": 64 - } - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "IdentifyResultItem": { + "type": "object", + "required": [ + "faceId", + "candidates" + ], + "description": "Response body for identify face operation.", + "properties": { + "faceId": { + "type": "string", + "description": "faceId of the query face", + "maxLength": 64 + }, + "candidates": { + "type": "array", + "items": { + "$ref": "#/definitions/IdentifyResultCandidate" + } + } } - } }, - "produces": [ - "application/json" - ] - } - }, - "/facelists": { - "get": { - "description": "Retrieve information about all existing face lists. Only faceListId, name and userData will be returned.", - "operationId": "FaceList_List", - "responses": { - "200": { - "description": "A successful call returns an array of faceList.", - "schema": { - "$ref": "#/definitions/FaceListResults" - }, - "examples": { - "application/json": [ - { - "faceListId": "sample_list", - "name": "list1", - "userData": "User-provideddataattachedtothefacelist" + "IdentifyResultCandidate": { + "type": "object", + "required": [ + "personId", + "confidence" + ], + "description": "All possible faces that may qualify.", + "properties": { + "personId": { + "type": "string", + "description": "Id of candidate" + }, + "confidence": { + "type": "number", + "description": "Confidence level in the candidate person: a float number between 0.0 and 1.0.", + "minimum": 0, + "maximum": 1 } - ] - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" } - } }, - "produces": [ - "application/json" - ] - } - }, - "/facelists/{faceListId}/persistedFaces/{persistedFaceId}": { - "delete": { - "description": "Delete an existing face from a face list (given by a persisitedFaceId and a faceListId). Persisted image related to the face will also be deleted.", - "operationId": "FaceList_DeleteFace", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "faceListId of an existing face list.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - { - "name": "persistedFaceId", - "in": "path", - "description": "persistedFaceId of an existing face. ", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "A successful call returns an empty response body." - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "VerifyRequest": { + "type": "object", + "required": [ + "faceId", + "personId", + "personGroupId" + ], + "description": "Request body for verify operation.", + "properties": { + "faceId": { + "type": "string", + "description": "faceId the face, comes from Face - Detect", + "maxLength": 64 + }, + "personId": { + "type": "string", + "description": "Specify a certain person in a person group. personId is created in Persons.Create." + }, + "personGroupId": { + "type": "string", + "description": "Using existing personGroupId and personId for fast loading a specified person. personGroupId is created in Person Groups.Create." + } } - } }, - "produces": [ - "application/json" - ] - } - } - }, - "x-ms-paths": { - "/detect?overload=url": { - "post": { - "description": "Detect human faces in an image and returns face locations, and optionally with faceIds, landmarks, and attributes.", - "operationId": "Face_Detect", - "parameters": [ - { - "name": "returnFaceId", - "in": "query", - "description": "A value indicating whether the operation should return faceIds of detected faces.", - "type": "boolean", - "default": true - }, - { - "name": "returnFaceLandmarks", - "in": "query", - "description": "A value indicating whether the operation should return landmarks of the detected faces.", - "type": "boolean", - "default": false - }, - { - "name": "returnFaceAttributes", - "in": "query", - "description": "Analyze and return the one or more specified face attributes in the comma-separated string like \"returnFaceAttributes=age,gender\". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost.", - "type": "string" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageUrl" - } - ], - "consumes": [ - "application/json" - ], - "responses": { - "200": { - "description": "A successful call returns an array of face entries ranked by face rectangle size in descendingorder. An empty response indicates no faces detected. ", - "schema": { - "$ref": "#/definitions/DetectionResult" - }, - "examples": { - "application/json": [ - { - "faceId": "c5c24a82-6845-4031-9d5d-978df9175426", - "faceRectangle": { - "width": 78, - "height": 78, - "left": 394, - "top": 54 - }, - "faceLandmarks": { - "pupilLeft": { - "x": 412, - "y": 78 - }, - "pupilRight": { - "x": 446, - "y": 74 - }, - "noseTip": { - "x": 437, - "y": 92 - }, - "mouthLeft": { - "x": 417, - "y": 114 - }, - "mouthRight": { - "x": 451, - "y": 109 - }, - "eyebrowLeftOuter": { - "x": 397, - "y": 78 - }, - "eyebrowLeftInner": { - "x": 425, - "y": 70 - }, - "eyeLeftOuter": { - "x": 406, - "y": 80 - }, - "eyeLeftTop": { - "x": 412, - "y": 76 - }, - "eyeLeftBottom": { - "x": 413, - "y": 80 - }, - "eyeLeftInner": { - "x": 418, - "y": 78 - }, - "eyebrowRightInner": { - "x": 4, - "y": 69 - }, - "eyebrowRightOuter": { - "x": 5, - "y": 68 - }, - "eyeRightInner": { - "x": 441, - "y": 75 - }, - "eyeRightTop": { - "x": 446, - "y": 71 - }, - "eyeRightBottom": { - "x": 447, - "y": 75 - }, - "eyeRightOuter": { - "x": 451, - "y": 73 - }, - "noseRootLeft": { - "x": 428, - "y": 77 - }, - "noseRootRight": { - "x": 435, - "y": 75 - }, - "noseLeftAlarTop": { - "x": 428, - "y": 89 - }, - "noseRightAlarTop": { - "x": 442, - "y": 87 - }, - "noseLeftAlarOutTip": { - "x": 424, - "y": 96 - }, - "noseRightAlarOutTip": { - "x": 446, - "y": 92 - }, - "upperLipTop": { - "x": 437, - "y": 105 - }, - "upperLipBottom": { - "x": 437, - "y": 108 - }, - "underLipTop": { - "x": 436, - "y": 111 - }, - "underLipBottom": { - "x": 437, - "y": 114 - } - }, - "faceAttributes": { - "age": 71, - "gender": "male", - "smile": 0.88, - "facialHair": { - "mustache": 0.8, - "beard": 0.1, - "sideburns": 0.02 - } - }, - "glasses": "sunglasses", - "headPose": { - "roll": 2.1, - "yaw": 3, - "pitch": 0 - }, - "emotion": { - "anger": 0.575, - "contempt": 0, - "disgust": 0.006, - "fear": 0.008, - "happiness": 0.394, - "neutral": 0.013, - "sadness": 0, - "surprise": 0.004 - } + "VerifyResult": { + "type": "object", + "required": [ + "isIdentical" + ], + "description": "Result of the verify operation.", + "properties": { + "isIdentical": { + "type": "boolean", + "description": "True if the two faces belong to the same person or the face belongs to the person, otherwise false." + }, + "confidence": { + "type": "number", + "description": "\"A number indicates the similarity confidence of whether two faces belong to the same person, or whether the face belongs to the person. By default, isIdentical is set to True if similarity confidence is greater than or equal to 0.5. This is useful for advanced users to override \"isIdentical\" and fine-tune the result on their own data\"", + "minimum": 0, + "maximum": 1 } - ] - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" } - } }, - "produces": [ - "application/json" - ] - } - }, - "/detect?overload=stream": { - "post": { - "description": "Detect human faces in an image and returns face locations, and optionally with faceIds, landmarks, and attributes.", - "operationId": "Face_DetectInStream", - "parameters": [ - { - "name": "returnFaceId", - "in": "query", - "description": "A value indicating whether the operation should return faceIds of detected faces.", - "type": "boolean", - "default": true - }, - { - "name": "returnFaceLandmarks", - "in": "query", - "description": "A value indicating whether the operation should return landmarks of the detected faces.", - "type": "boolean", - "default": false - }, - { - "name": "returnFaceAttributes", - "in": "query", - "description": "Analyze and return the one or more specified face attributes in the comma-separated string like \"returnFaceAttributes=age,gender\". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost.", - "type": "string" - }, - { - "$ref": "../../Common/Parameters.json#/parameters/ImageStream" - } - ], - "consumes": [ - "application/octet-stream" - ], - "responses": { - "200": { - "description": "A successful call returns an array of face entries ranked by face rectangle size in descendingorder. An empty response indicates no faces detected. ", - "schema": { - "$ref": "#/definitions/DetectionResult" - }, - "examples": { - "application/json": [ - { - "faceId": "c5c24a82-6845-4031-9d5d-978df9175426", - "faceRectangle": { - "width": 78, - "height": 78, - "left": 394, - "top": 54 - }, - "faceLandmarks": { - "pupilLeft": { - "x": 412, - "y": 78 - }, - "pupilRight": { - "x": 446, - "y": 74 - }, - "noseTip": { - "x": 437, - "y": 92 - }, - "mouthLeft": { - "x": 417, - "y": 114 - }, - "mouthRight": { - "x": 451, - "y": 109 - }, - "eyebrowLeftOuter": { - "x": 397, - "y": 78 - }, - "eyebrowLeftInner": { - "x": 425, - "y": 70 - }, - "eyeLeftOuter": { - "x": 406, - "y": 80 - }, - "eyeLeftTop": { - "x": 412, - "y": 76 - }, - "eyeLeftBottom": { - "x": 413, - "y": 80 - }, - "eyeLeftInner": { - "x": 418, - "y": 78 - }, - "eyebrowRightInner": { - "x": 4, - "y": 69 - }, - "eyebrowRightOuter": { - "x": 5, - "y": 68 - }, - "eyeRightInner": { - "x": 441, - "y": 75 - }, - "eyeRightTop": { - "x": 446, - "y": 71 - }, - "eyeRightBottom": { - "x": 447, - "y": 75 - }, - "eyeRightOuter": { - "x": 451, - "y": 73 - }, - "noseRootLeft": { - "x": 428, - "y": 77 - }, - "noseRootRight": { - "x": 435, - "y": 75 - }, - "noseLeftAlarTop": { - "x": 428, - "y": 89 - }, - "noseRightAlarTop": { - "x": 442, - "y": 87 - }, - "noseLeftAlarOutTip": { - "x": 424, - "y": 96 - }, - "noseRightAlarOutTip": { - "x": 446, - "y": 92 - }, - "upperLipTop": { - "x": 437, - "y": 105 - }, - "upperLipBottom": { - "x": 437, - "y": 108 - }, - "underLipTop": { - "x": 436, - "y": 111 - }, - "underLipBottom": { - "x": 437, - "y": 114 - } - }, - "faceAttributes": { - "age": 71, - "gender": "male", - "smile": 0.88, - "facialHair": { - "mustache": 0.8, - "beard": 0.1, - "sideburns": 0.02 - } - }, - "glasses": "sunglasses", - "headPose": { - "roll": 2.1, - "yaw": 3, - "pitch": 0 - }, - "emotion": { - "anger": 0.575, - "contempt": 0, - "disgust": 0.006, - "fear": 0.008, - "happiness": 0.394, - "neutral": 0.013, - "sadness": 0, - "surprise": 0.004 - } + "CreateFaceListRequest": { + "type": "object", + "description": "Request to create a face list.", + "properties": { + "name": { + "type": "string", + "description": "Name of the face list, maximum length is 128.", + "maxLength": 128 + }, + "userData": { + "type": "string", + "description": "Optional user defined data for the face list. Length should not exceed 16KB.", + "maxLength": 16384 } - ] - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups/{personGroupId}/persons/{personId}/persistedFaces?overload=url": { - "post": { - "description": "Add a representative face to a person for identification. The input face is specified as an image with a targetFace rectangle.", - "operationId": "Person_AddFace", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "Target person that the face is added to.", - "required": true, - "type": "string" - }, - { - "name": "userData", - "in": "query", - "description": "User-specified data about the target face to add for any purpose. The maximum length is 1KB. ", - "type": "string" - }, - { - "name": "targetFace", - "in": "query", - "description": "A face rectangle to specify the target face to be added to a person in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. ", - "type": "string" - } - ], - "consumes": [ - "application/json", - "application/octet-stream" - ], - "responses": { - "200": { - "description": "A successful call returns the new persistedFaceId.", - "examples": { - "application/json": { - "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "GetFaceListResult": { + "type": "object", + "required": [ + "faceListId" + ], + "description": "Result of the GetFaceList operation.", + "properties": { + "faceListId": { + "type": "string", + "description": "faceListId of the target face list.", + "maxLength": 64, + "pattern": "^[a-z0-9-_]+$" + }, + "name": { + "type": "string", + "description": "Face list's display name.", + "maxLength": 128 + }, + "userData": { + "type": "string", + "description": "User-provided data attached to this face list.", + "maxLength": 16384 + } } - } }, - "produces": [ - "application/json" - ] - } - }, - "/persongroups/{personGroupId}/persons/{personId}/persistedFaces?overload=stream": { - "post": { - "description": "Add a representative face to a person for identification. The input face is specified as an image with a targetFace rectangle.", - "operationId": "Person_AddFaceFromStream", - "parameters": [ - { - "name": "personGroupId", - "in": "path", - "description": "Specifying the person group containing the target person.", - "required": true, - "type": "string" - }, - { - "name": "personId", - "in": "path", - "description": "Target person that the face is added to.", - "required": true, - "type": "string" - }, - { - "name": "userData", - "in": "query", - "description": "User-specified data about the target face to add for any purpose. The maximum length is 1KB. ", - "type": "string" - }, - { - "name": "targetFace", - "in": "query", - "description": "A face rectangle to specify the target face to be added to a person, in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. ", - "type": "string" - } - ], - "consumes": [ - "application/json", - "application/octet-stream" - ], - "responses": { - "200": { - "description": "A successful call returns the new persistedFaceId.", - "examples": { - "application/json": { - "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "FaceListResults": { + "type": "array", + "description": "An array of face list results.", + "items": { + "$ref": "#/definitions/GetFaceListResult" } - } }, - "produces": [ - "application/json" - ] - } - }, - "/facelists/{faceListId}/persistedFaces?overload=url": { - "post": { - "description": "Add a face to a face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire. ", - "operationId": "FaceList_AddFace", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "Id referencing a Face List.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - { - "name": "userData", - "in": "query", - "description": "User-specified data about the face list for any purpose. The maximum length is 1KB.", - "type": "string" - }, - { - "name": "targetFace", - "in": "query", - "description": "A face rectangle to specify the target face to be added into the face list, in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image.", - "type": "string" - } - ], - "consumes": [ - "application/json", - "application/octet-stream" - ], - "responses": { - "200": { - "description": "A successful call returns a new persistedFaceId.", - "examples": { - "application/json": { - "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "CreatePersonGroupRequest": { + "type": "object", + "description": "Request to create a person group.", + "properties": { + "name": { + "type": "string", + "description": "Name of the face list, maximum length is 128.", + "maxLength": 128 + }, + "userData": { + "type": "string", + "description": "Optional user defined data for the face list. Length should not exceed 16KB.", + "maxLength": 16384 + } } - } }, - "produces": [ - "application/json" - ] - } - }, - "/facelists/{faceListId}/persistedFaces?overload=stream": { - "post": { - "description": "Add a face to a face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire.", - "operationId": "FaceList_AddFaceFromStream", - "parameters": [ - { - "name": "faceListId", - "in": "path", - "description": "Id referencing a Face List.", - "required": true, - "type": "string", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - { - "name": "userData", - "in": "query", - "description": "User-specified data about the face list for any purpose. The maximum length is 1KB.", - "type": "string" - }, - { - "name": "targetFace", - "in": "query", - "description": "A face rectangle to specify the target face to be added into the face list, in the format of \"targetFace=left,top,width,height\". E.g. \"targetFace=10,10,100,100\". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image.", - "type": "string" - } - ], - "consumes": [ - "application/json", - "application/octet-stream" - ], - "responses": { - "200": { - "description": "A successful call returns a new persistedFaceId.", - "examples": { - "application/json": { - "persistedFaceId": "B8D802CF-DD8F-4E61-B15C-9E6C5844CCBA" - } - } - }, - "default": { - "description": "Error response.", - "schema": { - "$ref": "#/definitions/APIError" + "PersonGroupResult": { + "type": "object", + "required": [ + "personGroupId" + ], + "description": "Person group object.", + "properties": { + "personGroupId": { + "type": "string", + "description": "faceListId of the target face list.", + "maxLength": 128 + }, + "name": { + "type": "string", + "description": "Face list's display name.", + "maxLength": 128 + }, + "userData": { + "type": "string", + "description": "User-provided data attached to this face list.", + "maxLength": 16384 + } } - } - }, - "produces": [ - "application/json" - ] - } - } - }, - "definitions": { - "APIError": { - "type": "object", - "description": "Error information returned by the API", - "properties": { - "error": { - "$ref": "#/definitions/Error" - } - } - }, - "Error": { - "type": "object", - "description": "Error body.", - "properties": { - "code": { - "type": "string" - }, - "message": { - "type": "string" - } - } - }, - "DetectionResult": { - "type": "array", - "items": { - "$ref": "#/definitions/DetectedFace" - } - }, - "DetectedFace": { - "type": "object", - "required": [ - "faceRectangle" - ], - "description": "Detected Face object.", - "properties": { - "faceId": { - "type": "string", - "maxLength": 64 - }, - "faceRectangle": { - "$ref": "#/definitions/FaceRectangle" - }, - "faceLandmarks": { - "$ref": "#/definitions/FaceLandmarks" - } - } - }, - "FaceRectangle": { - "type": "object", - "required": [ - "width", - "height", - "left", - "top" - ], - "description": "A rectangle within which a face can be found", - "properties": { - "width": { - "type": "integer", - "format": "int32", - "description": "The width of the rectangle, in pixels." - }, - "height": { - "type": "integer", - "format": "int32", - "description": "The height of the rectangle, in pixels." - }, - "left": { - "type": "integer", - "format": "int32", - "description": "The distance from the left edge if the image to the left edge of the rectangle, in pixels." - }, - "top": { - "type": "integer", - "format": "int32", - "description": "The distance from the top edge if the image to the top edge of the rectangle, in pixels." - } - } - }, - "Position": { - "type": "object", - "required": [ - "x", - "y" - ], - "description": "Coordinates within an image", - "properties": { - "x": { - "type": "integer", - "format": "int32", - "description": "The horizontal component, in pixels." - }, - "y": { - "type": "integer", - "format": "int32", - "description": "The vertical component, in pixels." - } - } - }, - "FaceLandmarks": { - "type": "object", - "description": "A collection of 27-point face landmarks pointing to the important positions of face components.", - "properties": { - "pupilLeft": { - "$ref": "#/definitions/Position" - }, - "pupilRight": { - "$ref": "#/definitions/Position" - }, - "noseTip": { - "$ref": "#/definitions/Position" - }, - "mouthLeft": { - "$ref": "#/definitions/Position" - }, - "mouthRight": { - "$ref": "#/definitions/Position" - }, - "eyebrowLeftOuter": { - "$ref": "#/definitions/Position" - }, - "eyebrowLeftInner": { - "$ref": "#/definitions/Position" - }, - "eyeLeftOuter": { - "$ref": "#/definitions/Position" - }, - "eyeLeftTop": { - "$ref": "#/definitions/Position" - }, - "eyeLeftBottom": { - "$ref": "#/definitions/Position" - }, - "eyeLeftInner": { - "$ref": "#/definitions/Position" - }, - "eyebrowRightInner": { - "$ref": "#/definitions/Position" }, - "eyebrowRightOuter": { - "$ref": "#/definitions/Position" - }, - "eyeRightInner": { - "$ref": "#/definitions/Position" - }, - "eyeRightTop": { - "$ref": "#/definitions/Position" - }, - "eyeRightBottom": { - "$ref": "#/definitions/Position" - }, - "eyeRightOuter": { - "$ref": "#/definitions/Position" - }, - "noseRootLeft": { - "$ref": "#/definitions/Position" - }, - "noseRootRight": { - "$ref": "#/definitions/Position" - }, - "noseLeftAlarTop": { - "$ref": "#/definitions/Position" - }, - "noseRightAlarTop": { - "$ref": "#/definitions/Position" - }, - "noseLeftAlarOutTip": { - "$ref": "#/definitions/Position" - }, - "noseRightAlarOutTip": { - "$ref": "#/definitions/Position" - }, - "upperLipTop": { - "$ref": "#/definitions/Position" - }, - "upperLipBottom": { - "$ref": "#/definitions/Position" - }, - "underLipTop": { - "$ref": "#/definitions/Position" - }, - "underLipBottom": { - "$ref": "#/definitions/Position" - } - } - }, - "FaceAttributes": { - "type": "object", - "description": "Face Attributes", - "properties": { - "age": { - "type": "number", - "description": "Age in years" - }, - "gender": { - "type": "string", - "description": "Gender: male or female", - "enum": [ - "male", - "female" - ] - }, - "smile": { - "type": "number", - "description": "Smile intensity, a number between [0,1] ", - "minimum": 0, - "maximum": 1 - }, - "glasses": { - "type": "string", - "description": "Glasses type. Possible values are 'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles'. ", - "enum": [ - "noGlasses", - "readingGlasses", - "sunglasses", - "swimmingGoggles" - ] - }, - "facialHair": { - "$ref": "#/definitions/FacialHairProperties" - }, - "headPose": { - "$ref": "#/definitions/HeadPoseProperties" - }, - "emotion": { - "$ref": "#/definitions/EmotionProperties" - } - } - }, - "FacialHairProperties": { - "type": "object", - "description": "Properties describing facial hair attributes.", - "properties": { - "mustache": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "beard": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "sideburns": { - "type": "number", - "minimum": 0, - "maximum": 1 - } - } - }, - "HeadPoseProperties": { - "type": "object", - "description": "Properties indicating head pose of the face.", - "properties": { - "roll": { - "type": "number" - }, - "yaw": { - "type": "number" - }, - "pitch": { - "type": "number" - } - } - }, - "EmotionProperties": { - "type": "object", - "description": "Properties describing facial emotion.", - "properties": { - "anger": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "contempt": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "disgust": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "fear": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "happiness": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "neutral": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "sadness": { - "type": "number", - "minimum": 0, - "maximum": 1 - }, - "surprise": { - "type": "number", - "minimum": 0, - "maximum": 1 - } - } - }, - "FindSimilarRequest": { - "type": "object", - "required": [ - "faceId" - ], - "description": "Request body for find similar operation.", - "properties": { - "faceId": { - "type": "string", - "description": "FaceId of the query face. User needs to call Face - Detect first to get a valid faceId. Note that this faceId is not persisted and will expire 24 hours after the detection call", - "maxLength": 64 - }, - "faceListId": { - "type": "string", - "description": "An existing user-specified unique candidate face list, created in Face List - Create a Face List. Face list contains a set of persistedFaceIds which are persisted and will never expire. Parameter faceListId and faceIds should not be provided at the same time", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - "faceIds": { - "type": "array", - "description": "An array of candidate faceIds. All of them are created by Face - Detect and the faceIds will expire 24 hours after the detection call.", - "maxItems": 1000, - "items": { - "type": "string", - "maxLength": 64 - } - }, - "maxNumOfCandidatesReturned": { - "type": "integer", - "description": "The number of top similar faces returned. The valid range is [1, 1000].", - "default": 20, - "minimum": 1, - "maximum": 1000 - }, - "mode": { - "type": "string", - "description": "Similar face searching mode. It can be \"matchPerson\" or \"matchFace\".", - "default": "matchPerson", - "enum": [ - "matchPerson", - "matchFace" - ] - } - } - }, - "SimilarFaceResults": { - "type": "array", - "items": { - "$ref": "#/definitions/SimilarFaceResult" - } - }, - "SimilarFaceResult": { - "type": "object", - "required": [ - "faceId", - "persistedFaceId" - ], - "description": "Response body for find similar face operation.", - "properties": { - "faceId": { - "type": "string", - "description": "faceId of candidate face when find by faceIds. faceId is created by Face - Detect and will expire 24 hours after the detection call", - "maxLength": 64 - }, - "persistedFaceId": { - "type": "string", - "description": "persistedFaceId of candidate face when find by faceListId. persistedFaceId in face list is persisted and will not expire. As showed in below response" - }, - "confidence": { - "type": "number", - "description": "Similarity confidence of the candidate face. The higher confidence, the more similar. Range between [0,1" - } - } - }, - "GroupRequest": { - "type": "object", - "required": [ - "faceIds" - ], - "description": "Request body for group request.", - "properties": { - "faceIds": { - "type": "array", - "description": "Array of candidate faceId created by Face - Detect. The maximum is 1000 faces", - "maxItems": 1000, - "items": { - "type": "string", - "maxLength": 64 - } - } - } - }, - "GroupResponse": { - "type": "object", - "required": [ - "groups" - ], - "description": "An array of face groups based on face similarity.", - "properties": { - "groups": { - "type": "array", - "description": "A partition of the original faces based on face similarity. Groups are ranked by number of faces", - "items": { + "PersonGroupResults": { "type": "array", + "description": "An array of person groups.", "items": { - "type": "string" + "$ref": "#/definitions/PersonGroupResult" } - } - }, - "messyGroup": { - "type": "array", - "description": "Face ids array of faces that cannot find any similar faces from original faces.", - "items": { - "type": "string" - } - } - } - }, - "IdentifyRequest": { - "type": "object", - "required": [ - "personGroupId", - "faceIds" - ], - "description": "Request body for identify face operation.", - "properties": { - "personGroupId": { - "type": "string", - "description": "personGroupId of the target person group, created by PersonGroups.Create" - }, - "faceIds": { - "type": "array", - "description": "Array of candidate faceId created by Face - Detect.", - "maxItems": 1000, - "items": { - "type": "string" - } - }, - "maxNumOfCandidatesReturned": { - "type": "integer", - "description": "The number of top similar faces returned.", - "default": 1, - "minimum": 1, - "maximum": 1000 - }, - "confidenceThreshold": { - "type": "number", - "description": "Confidence threshold of identification, used to judge whether one face belong to one person.", - "minimum": 0, - "maximum": 1 - } - } - }, - "IdentifyResult": { - "type": "array", - "items": { - "$ref": "#/definitions/IdentifyResultItem" - } - }, - "IdentifyResultItem": { - "type": "object", - "required": [ - "faceId", - "candidates" - ], - "description": "Response body for identify face operation.", - "properties": { - "faceId": { - "type": "string", - "description": "faceId of the query face", - "maxLength": 64 - }, - "candidates": { - "type": "array", - "items": { - "$ref": "#/definitions/IdentifyResultCandidate" - } - } - } - }, - "IdentifyResultCandidate": { - "type": "object", - "required": [ - "personId", - "confidence" - ], - "description": "All possible faces that may qualify.", - "properties": { - "personId": { - "type": "string", - "description": "Id of candidate" - }, - "confidence": { - "type": "number", - "description": "Confidence level in the candidate person: a float number between 0.0 and 1.0.", - "minimum": 0, - "maximum": 1 - } - } - }, - "VerifyRequest": { - "type": "object", - "required": [ - "faceId", - "personId", - "personGroupId" - ], - "description": "Request body for verify operation.", - "properties": { - "faceId": { - "type": "string", - "description": "faceId the face, comes from Face - Detect", - "maxLength": 64 - }, - "personId": { - "type": "string", - "description": "Specify a certain person in a person group. personId is created in Persons.Create." - }, - "personGroupId": { - "type": "string", - "description": "Using existing personGroupId and personId for fast loading a specified person. personGroupId is created in Person Groups.Create." - } - } - }, - "VerifyResult": { - "type": "object", - "required": [ - "isIdentical" - ], - "description": "Result of the verify operation.", - "properties": { - "isIdentical": { - "type": "boolean", - "description": "True if the two faces belong to the same person or the face belongs to the person, otherwise false." - }, - "confidence": { - "type": "number", - "description": "\"A number indicates the similarity confidence of whether two faces belong to the same person, or whether the face belongs to the person. By default, isIdentical is set to True if similarity confidence is greater than or equal to 0.5. This is useful for advanced users to override \"isIdentical\" and fine-tune the result on their own data\"", - "minimum": 0, - "maximum": 1 - } - } - }, - "CreateFaceListRequest": { - "type": "object", - "description": "Request to create a face list.", - "properties": { - "name": { - "type": "string", - "description": "Name of the face list, maximum length is 128.", - "maxLength": 128 - }, - "userData": { - "type": "string", - "description": "Optional user defined data for the face list. Length should not exceed 16KB.", - "maxLength": 16384 - } - } - }, - "GetFaceListResult": { - "type": "object", - "required": [ - "faceListId" - ], - "description": "Result of the GetFaceList operation.", - "properties": { - "faceListId": { - "type": "string", - "description": "faceListId of the target face list.", - "maxLength": 64, - "pattern": "^[a-z0-9-_]+$" - }, - "name": { - "type": "string", - "description": "Face list's display name.", - "maxLength": 128 - }, - "userData": { - "type": "string", - "description": "User-provided data attached to this face list.", - "maxLength": 16384 - } - } - }, - "FaceListResults": { - "type": "array", - "description": "An array of face list results.", - "items": { - "$ref": "#/definitions/GetFaceListResult" - } - }, - "CreatePersonGroupRequest": { - "type": "object", - "description": "Request to create a person group.", - "properties": { - "name": { - "type": "string", - "description": "Name of the face list, maximum length is 128.", - "maxLength": 128 - }, - "userData": { - "type": "string", - "description": "Optional user defined data for the face list. Length should not exceed 16KB.", - "maxLength": 16384 - } - } - }, - "PersonGroupResult": { - "type": "object", - "required": [ - "personGroupId" - ], - "description": "Person group object.", - "properties": { - "personGroupId": { - "type": "string", - "description": "faceListId of the target face list.", - "maxLength": 128 - }, - "name": { - "type": "string", - "description": "Face list's display name.", - "maxLength": 128 - }, - "userData": { - "type": "string", - "description": "User-provided data attached to this face list.", - "maxLength": 16384 - } - } - }, - "PersonGroupResults": { - "type": "array", - "description": "An array of person groups.", - "items": { - "$ref": "#/definitions/PersonGroupResult" - } - }, - "CreatePersonRequest": { - "type": "object", - "description": "Request to create a person object.", - "properties": { - "name": { - "type": "string", - "description": "Display name of the target person. The maximum length is 128.", - "maxLength": 128 - }, - "userData": { - "type": "string", - "description": "Optional fields for user-provided data attached to a person. Size limit is 16KB.", - "maxLength": 16384 - } - } - }, - "CreatePersonResult": { - "type": "object", - "required": [ - "personId" - ], - "description": "Result of creating person.", - "properties": { - "personId": { - "type": "string", - "description": "personID of the new created person." - } - } - }, - "PersonResult": { - "type": "object", - "required": [ - "personId" - ], - "description": "Person object.", - "properties": { - "personId": { - "type": "string", - "description": "personId of the target face list." }, - "persistedFaceIds": { - "type": "array", - "description": "persistedFaceIds of registered faces in the person. These persistedFaceIds are returned from Person - Add a Person Face, and will not expire.", - "items": { - "type": "string" - } + "CreatePersonRequest": { + "type": "object", + "description": "Request to create a person object.", + "properties": { + "name": { + "type": "string", + "description": "Display name of the target person. The maximum length is 128.", + "maxLength": 128 + }, + "userData": { + "type": "string", + "description": "Optional fields for user-provided data attached to a person. Size limit is 16KB.", + "maxLength": 16384 + } + } }, - "name": { - "type": "string", - "description": "Person's display name." + "CreatePersonResult": { + "type": "object", + "required": [ + "personId" + ], + "description": "Result of creating person.", + "properties": { + "personId": { + "type": "string", + "description": "personID of the new created person." + } + } }, - "userData": { - "type": "string", - "description": "User-provided data attached to this person." - } - } - }, - "PersonResults": { - "type": "array", - "description": "An array of PersonResults.", - "items": { - "$ref": "#/definitions/PersonResult" - } - }, - "PersonFaceResult": { - "type": "object", - "required": [ - "persistedFaceId" - ], - "description": "PersonFace object.", - "properties": { - "persistedFaceId": { - "type": "string", - "description": "The persistedFaceId of the target face, which is persisted and will not expire. Different from faceId created by Face - Detect and will expire in 24 hours after the detection call." + "PersonResult": { + "type": "object", + "required": [ + "personId" + ], + "description": "Person object.", + "properties": { + "personId": { + "type": "string", + "description": "personId of the target face list." + }, + "persistedFaceIds": { + "type": "array", + "description": "persistedFaceIds of registered faces in the person. These persistedFaceIds are returned from Person - Add a Person Face, and will not expire.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Person's display name." + }, + "userData": { + "type": "string", + "description": "User-provided data attached to this person." + } + } }, - "userData": { - "type": "string", - "description": "User-provided data attached to the face." - } - } - }, - "UpdatePersonFaceDataRequest": { - "type": "object", - "description": "Request to update person face data.", - "properties": { - "userData": { - "type": "string", - "description": "User-provided data attached to the face. The size limit is 1KB", - "maxLength": 1024 - } - } - }, - "TrainingStatus": { - "type": "object", - "required": [ - "status" - ], - "description": "Training status object.", - "properties": { - "status": { - "type": "string", - "description": "Training status: notstarted, running, succeeded, failed. If the training process is waiting to perform, the status is notstarted. If the training is ongoing, the status is running. Status succeed means this person group is ready for Face - Identify. Status failed is often caused by no person or no persisted face exist in the person group", - "enum": [ - "nonstarted", - "running", - "succeeded", - "failed" - ] + "PersonResults": { + "type": "array", + "description": "An array of PersonResults.", + "items": { + "$ref": "#/definitions/PersonResult" + } }, - "createdDateTime": { - "type": "string", - "format": "date-time", - "description": "A combined UTC date and time string that describes person group created time.", - "x-ms-client-name": "created" + "PersonFaceResult": { + "type": "object", + "required": [ + "persistedFaceId" + ], + "description": "PersonFace object.", + "properties": { + "persistedFaceId": { + "type": "string", + "description": "The persistedFaceId of the target face, which is persisted and will not expire. Different from faceId created by Face - Detect and will expire in 24 hours after the detection call." + }, + "userData": { + "type": "string", + "description": "User-provided data attached to the face." + } + } }, - "lastActionDateTime": { - "type": "string", - "format": "date-time", - "description": "Person group last modify time in the UTC, could be null value when the person group is not successfully trained.", - "x-ms-client-name": "lastAction" + "UpdatePersonFaceDataRequest": { + "type": "object", + "description": "Request to update person face data.", + "properties": { + "userData": { + "type": "string", + "description": "User-provided data attached to the face. The size limit is 1KB", + "maxLength": 1024 + } + } }, - "message": { - "type": "string", - "description": "Show failure message when training failed (omitted when training succeed)." + "TrainingStatus": { + "type": "object", + "required": [ + "status" + ], + "description": "Training status object.", + "properties": { + "status": { + "type": "string", + "description": "Training status: notstarted, running, succeeded, failed. If the training process is waiting to perform, the status is notstarted. If the training is ongoing, the status is running. Status succeed means this person group is ready for Face - Identify. Status failed is often caused by no person or no persisted face exist in the person group", + "enum": [ + "nonstarted", + "running", + "succeeded", + "failed" + ] + }, + "createdDateTime": { + "type": "string", + "format": "date-time", + "description": "A combined UTC date and time string that describes person group created time.", + "x-ms-client-name": "created" + }, + "lastActionDateTime": { + "type": "string", + "format": "date-time", + "description": "Person group last modify time in the UTC, could be null value when the person group is not successfully trained.", + "x-ms-client-name": "lastAction" + }, + "message": { + "type": "string", + "description": "Show failure message when training failed (omitted when training succeed)." + } + } } - } } - } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/TextAnalytics.json b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/TextAnalytics.json index f31fb9caf95b..e4f59b844ca8 100644 --- a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/TextAnalytics.json +++ b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/TextAnalytics.json @@ -1,454 +1,454 @@ { - "swagger": "2.0", - "info": { - "version": "v2.0", - "contact": { - "name": "Microsoft Cognitive Services", - "url": "https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/", - "email": "mlapi@microsoft.com" + "swagger": "2.0", + "info": { + "version": "v2.0", + "contact": { + "name": "Microsoft Cognitive Services", + "url": "https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/", + "email": "mlapi@microsoft.com" + }, + "title": "Text Analytics API", + "description": "The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview" + }, + "x-ms-parameterized-host": { + "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", + "parameters": [ + { + "$ref": "../../Common/BasicRegions.json#/parameters/AzureRegion" + } + ] + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + { + "apim_key": [] + } + ], + "basePath": "/text/analytics", + "schemes": [ + "https" + ], + "paths": { + "/v2.0/keyPhrases": { + "post": { + "tags": [ + "Key Phrases" + ], + "summary": "The API returns a list of strings denoting the key talking points in the input text.", + "description": "We employ techniques from Microsoft Office's sophisticated Natural Language Processing toolkit. See the Text Analytics Documentation for details about the languages that are supported by key phrase extraction.", + "operationId": "Key Phrases", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "input", + "in": "body", + "description": "Collection of documents to analyze. Documents can now contain a language field to indicate the text language", + "required": true, + "schema": { + "$ref": "#/definitions/MultiLanguageBatchInput" + } + } + ], + "responses": { + "200": { + "description": "A successful response results in 0 or more key phrases identified in each valid document", + "schema": { + "$ref": "#/definitions/KeyPhraseBatchResult" + } + }, + "default": { + "description": "Error Response", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Key Phrase request": { + "$ref": "./examples//SuccessfulKeyPhrasesRequest.json" + } }, - "title": "Text Analytics API", - "description": "The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview" + "deprecated": false + } }, - "x-ms-parameterized-host": { - "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", + "/v2.0/languages": { + "post": { + "tags": [ + "Detect Language" + ], + "summary": "The API returns the detected language and a numeric score between 0 and 1.", + "description": "Scores close to 1 indicate 100% certainty that the identified language is true. A total of 120 languages are supported.", + "operationId": "Detect Language", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "parameters": [ - { - "$ref": "../../Common/BasicRegions.json#/parameters/AzureRegion" + { + "name": "input", + "in": "body", + "description": "Collection of documents to analyze.", + "required": true, + "schema": { + "$ref": "#/definitions/BatchInput" } - ] + }, + { + "name": "numberOfLanguagesToDetect", + "in": "query", + "description": "(Optional. Deprecated) Number of languages to detect. Set to 1 by default. Irrespective of the value, the language with the highest score is returned.", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "responses": { + "200": { + "description": "A successful call results in the detected language with the highest probability for each valid document", + "schema": { + "$ref": "#/definitions/LanguageBatchResult" + } + }, + "default": { + "description": "Error Response", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Key Phrase request": { + "$ref": "./examples//SuccessfulLanguagesRequest.json" + } + }, + "deprecated": false + } }, - "securityDefinitions": { - "apim_key": { - "type": "apiKey", - "name": "Ocp-Apim-Subscription-Key", - "in": "header" + "/v2.0/sentiment": { + "post": { + "tags": [ + "Sentiment" + ], + "summary": "The API returns a numeric score between 0 and 1.", + "description": "Scores close to 1 indicate positive sentiment, while scores close to 0 indicate negative sentiment. Sentiment score is generated using classification techniques. The input features to the classifier include n-grams, features generated from part-of-speech tags, and word embeddings. See the Text Analytics Documentation for details about the languages that are supported by sentiment analysis.", + "operationId": "Sentiment", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "input", + "in": "body", + "description": "Collection of documents to analyze.", + "required": true, + "schema": { + "$ref": "#/definitions/MultiLanguageBatchInput" + } + } + ], + "responses": { + "200": { + "description": "A successful call results in each valid document getting a sentiment score between 0 and 1", + "schema": { + "$ref": "#/definitions/SentimentBatchResult" + } + }, + "default": { + "description": "Error Response", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Key Phrase request": { + "$ref": "./examples//SuccessfulSentimentRequest.json" + } + }, + "deprecated": false + } + } + }, + "definitions": { + "MultiLanguageBatchInput": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "items": { + "$ref": "#/definitions/MultiLanguageInput" + } } + }, + "example": { + "documents": [ + { + "language": "en", + "id": "1", + "text": "Hello world. This is some input text that I love." + }, + { + "language": "fr", + "id": "2", + "text": "Bonjour tout le monde" + }, + { + "language": "es", + "id": "3", + "text": "La carretera estaba atascada. Había mucho tráfico el día de ayer." + } + ] + } }, - "security": [ - { - "apim_key": [] - } - ], - "basePath": "/text/analytics", - "schemes": [ - "https" - ], - "paths": { - "/v2.0/keyPhrases": { - "post": { - "tags": [ - "Key Phrases" - ], - "summary": "The API returns a list of strings denoting the key talking points in the input text.", - "description": "We employ techniques from Microsoft Office's sophisticated Natural Language Processing toolkit. See the Text Analytics Documentation for details about the languages that are supported by key phrase extraction.", - "operationId": "Key Phrases", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "input", - "in": "body", - "description": "Collection of documents to analyze. Documents can now contain a language field to indicate the text language", - "required": true, - "schema": { - "$ref": "#/definitions/MultiLanguageBatchInput" - } - } - ], - "responses": { - "200": { - "description": "A successful response results in 0 or more key phrases identified in each valid document", - "schema": { - "$ref": "#/definitions/KeyPhraseBatchResult" - } - }, - "default": { - "description": "Error Response", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - } - }, - "x-ms-examples": { - "Successful Key Phrase request": { - "$ref": "./examples//SuccessfulKeyPhrasesRequest.json" - } - }, - "deprecated": false - } + "MultiLanguageInput": { + "type": "object", + "properties": { + "language": { + "description": "This is the 2 letter ISO 639-1 representation of a language. For example, use \"en\" for English; \"es\" for Spanish etc.,", + "type": "string" }, - "/v2.0/languages": { - "post": { - "tags": [ - "Detect Language" - ], - "summary": "The API returns the detected language and a numeric score between 0 and 1.", - "description": "Scores close to 1 indicate 100% certainty that the identified language is true. A total of 120 languages are supported.", - "operationId": "Detect Language", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "input", - "in": "body", - "description": "Collection of documents to analyze.", - "required": true, - "schema": { - "$ref": "#/definitions/BatchInput" - } - }, - { - "name": "numberOfLanguagesToDetect", - "in": "query", - "description": "(Optional. Deprecated) Number of languages to detect. Set to 1 by default. Irrespective of the value, the language with the highest score is returned.", - "required": false, - "type": "integer", - "format": "int32" - } - ], - "responses": { - "200": { - "description": "A successful call results in the detected language with the highest probability for each valid document", - "schema": { - "$ref": "#/definitions/LanguageBatchResult" - } - }, - "default": { - "description": "Error Response", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - } - }, - "x-ms-examples": { - "Successful Key Phrase request": { - "$ref": "./examples//SuccessfulLanguagesRequest.json" - } - }, - "deprecated": false - } + "id": { + "description": "Unique, non-empty document identifier.", + "type": "string" }, - "/v2.0/sentiment": { - "post": { - "tags": [ - "Sentiment" - ], - "summary": "The API returns a numeric score between 0 and 1.", - "description": "Scores close to 1 indicate positive sentiment, while scores close to 0 indicate negative sentiment. Sentiment score is generated using classification techniques. The input features to the classifier include n-grams, features generated from part-of-speech tags, and word embeddings. See the Text Analytics Documentation for details about the languages that are supported by sentiment analysis.", - "operationId": "Sentiment", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "parameters": [ - { - "name": "input", - "in": "body", - "description": "Collection of documents to analyze.", - "required": true, - "schema": { - "$ref": "#/definitions/MultiLanguageBatchInput" - } - } - ], - "responses": { - "200": { - "description": "A successful call results in each valid document getting a sentiment score between 0 and 1", - "schema": { - "$ref": "#/definitions/SentimentBatchResult" - } - }, - "default": { - "description": "Error Response", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - } - }, - "x-ms-examples": { - "Successful Key Phrase request": { - "$ref": "./examples//SuccessfulSentimentRequest.json" - } - }, - "deprecated": false - } + "text": { + "type": "string" } + } }, - "definitions": { - "MultiLanguageBatchInput": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/definitions/MultiLanguageInput" - } - } - }, - "example": { - "documents": [ - { - "language": "en", - "id": "1", - "text": "Hello world. This is some input text that I love." - }, - { - "language": "fr", - "id": "2", - "text": "Bonjour tout le monde" - }, - { - "language": "es", - "id": "3", - "text": "La carretera estaba atascada. Había mucho tráfico el día de ayer." - } - ] - } + "Object": { + "type": "object", + "properties": {} + }, + "KeyPhraseBatchResult": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/KeyPhraseBatchResultItem" + } }, - "MultiLanguageInput": { - "type": "object", - "properties": { - "language": { - "description": "This is the 2 letter ISO 639-1 representation of a language. For example, use \"en\" for English; \"es\" for Spanish etc.,", - "type": "string" - }, - "id": { - "description": "Unique, non-empty document identifier.", - "type": "string" - }, - "text": { - "type": "string" - } - } + "errors": { + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/ErrorRecord" + } + } + } + }, + "KeyPhraseBatchResultItem": { + "type": "object", + "properties": { + "keyPhrases": { + "description": "A list of representative words or phrases. The number of key phrases returned is proportional to the number of words in the input document.", + "type": "array", + "readOnly": true, + "items": { + "type": "string" + } + }, + "id": { + "description": "Unique document identifier.", + "type": "string", + "readOnly": true + } + } + }, + "ErrorRecord": { + "type": "object", + "properties": { + "id": { + "description": "Input document unique identifier the error refers to.", + "type": "string" }, - "Object": { - "type": "object", - "properties": {} + "message": { + "description": "Error message.", + "type": "string" + } + } + }, + "ErrorResponse": { + "type": "object", + "properties": { + "code": { + "type": "string" }, - "KeyPhraseBatchResult": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/KeyPhraseBatchResultItem" - } - }, - "errors": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/ErrorRecord" - } - } - } + "message": { + "type": "string" }, - "KeyPhraseBatchResultItem": { - "type": "object", - "properties": { - "keyPhrases": { - "description": "A list of representative words or phrases. The number of key phrases returned is proportional to the number of words in the input document.", - "type": "array", - "readOnly": true, - "items": { - "type": "string" - } - }, - "id": { - "description": "Unique document identifier.", - "type": "string", - "readOnly": true - } - } + "target": { + "type": "string" }, - "ErrorRecord": { - "type": "object", - "properties": { - "id": { - "description": "Input document unique identifier the error refers to.", - "type": "string" - }, - "message": { - "description": "Error message.", - "type": "string" - } - } + "innerError": { + "$ref": "#/definitions/InternalError" + } + } + }, + "InternalError": { + "type": "object", + "properties": { + "code": { + "type": "string" }, - "ErrorResponse": { - "type": "object", - "properties": { - "code": { - "type": "string" - }, - "message": { - "type": "string" - }, - "target": { - "type": "string" - }, - "innerError": { - "$ref": "#/definitions/InternalError" - } - } + "message": { + "type": "string" }, - "InternalError": { - "type": "object", - "properties": { - "code": { - "type": "string" - }, - "message": { - "type": "string" - }, - "innerError": { - "$ref": "#/definitions/InternalError" - } - } + "innerError": { + "$ref": "#/definitions/InternalError" + } + } + }, + "BatchInput": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "items": { + "$ref": "#/definitions/Input" + } + } + }, + "example": { + "documents": [ + { + "id": "1", + "text": "Hello world" + }, + { + "id": "2", + "text": "Bonjour tout le monde" + }, + { + "id": "3", + "text": "La carretera estaba atascada. Había mucho tráfico el día de ayer." + }, + { + "id": "4", + "text": ":) :( :D" + } + ] + } + }, + "Input": { + "type": "object", + "properties": { + "id": { + "description": "Unique, non-empty document identifier.", + "type": "string" }, - "BatchInput": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/definitions/Input" - } - } - }, - "example": { - "documents": [ - { - "id": "1", - "text": "Hello world" - }, - { - "id": "2", - "text": "Bonjour tout le monde" - }, - { - "id": "3", - "text": "La carretera estaba atascada. Había mucho tráfico el día de ayer." - }, - { - "id": "4", - "text": ":) :( :D" - } - ] - } + "text": { + "type": "string" + } + } + }, + "LanguageBatchResult": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/LanguageBatchResultItem" + } }, - "Input": { - "type": "object", - "properties": { - "id": { - "description": "Unique, non-empty document identifier.", - "type": "string" - }, - "text": { - "type": "string" - } - } + "errors": { + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/ErrorRecord" + } + } + } + }, + "LanguageBatchResultItem": { + "type": "object", + "properties": { + "id": { + "description": "Unique document identifier.", + "readOnly": true, + "type": "string" }, - "LanguageBatchResult": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/LanguageBatchResultItem" - } - }, - "errors": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/ErrorRecord" - } - } - } + "detectedLanguages": { + "description": "A list of extracted languages.", + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/DetectedLanguage" + } + } + } + }, + "DetectedLanguage": { + "type": "object", + "properties": { + "name": { + "description": "Long name of a detected language (e.g. English, French).", + "type": "string" }, - "LanguageBatchResultItem": { - "type": "object", - "properties": { - "id": { - "description": "Unique document identifier.", - "readOnly": true, - "type": "string" - }, - "detectedLanguages": { - "description": "A list of extracted languages.", - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/DetectedLanguage" - } - } - } + "iso6391Name": { + "description": "A two letter representation of the detected language according to the ISO 639-1 standard (e.g. en, fr).", + "type": "string" }, - "DetectedLanguage": { - "type": "object", - "properties": { - "name": { - "description": "Long name of a detected language (e.g. English, French).", - "type": "string" - }, - "iso6391Name": { - "description": "A two letter representation of the detected language according to the ISO 639-1 standard (e.g. en, fr).", - "type": "string" - }, - "score": { - "format": "double", - "description": "A confidence score between 0 and 1. Scores close to 1 indicate 100% certainty that the identified language is true.", - "type": "number" - } - } + "score": { + "format": "double", + "description": "A confidence score between 0 and 1. Scores close to 1 indicate 100% certainty that the identified language is true.", + "type": "number" + } + } + }, + "SentimentBatchResult": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/SentimentBatchResultItem" + } }, - "SentimentBatchResult": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/SentimentBatchResultItem" - } - }, - "errors": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "#/definitions/ErrorRecord" - } - } - } + "errors": { + "type": "array", + "readOnly": true, + "items": { + "$ref": "#/definitions/ErrorRecord" + } + } + } + }, + "SentimentBatchResultItem": { + "type": "object", + "properties": { + "score": { + "format": "double", + "description": "A decimal number between 0 and 1 denoting the sentiment of the document. A score above 0.7 usually refers to a positive document while a score below 0.3 normally has a negative connotation. Mid values refer to neutral text.", + "type": "number", + "readOnly": true }, - "SentimentBatchResultItem": { - "type": "object", - "properties": { - "score": { - "format": "double", - "description": "A decimal number between 0 and 1 denoting the sentiment of the document. A score above 0.7 usually refers to a positive document while a score below 0.3 normally has a negative connotation. Mid values refer to neutral text.", - "type": "number", - "readOnly": true - }, - "id": { - "description": "Unique document identifier.", - "readOnly": true, - "type": "string" - } - } + "id": { + "description": "Unique document identifier.", + "readOnly": true, + "type": "string" } + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulKeyPhrasesRequest.json b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulKeyPhrasesRequest.json index 5ac76f9af706..130d51f95106 100644 --- a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulKeyPhrasesRequest.json +++ b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulKeyPhrasesRequest.json @@ -1,63 +1,63 @@ { - "parameters": { - "Ocp-Apim-Subscription-Key": "{API key}", - "body": { - "documents": [ - { - "id": "1", - "text": "If this demo doesn't work I will be super sad", - "language": "en" - }, - { - "id": "2", - "text": "Español es muy difícil de aprender", - "language": "es" - }, - { - "id": "3", - "text": "I really really love machine learning", - "language": "en" - }, - { - "id": "4", - "text": "Le soleil se lève à l'Est", - "language": "fr" - } - ] - } - }, - "responses": { - "200": { - "headers": {}, - "body": { - "documents": [ - { - "keyPhrases": [ - "demo" - ], - "id": "1" - }, - { - "keyPhrases": [ - "Español" - ], - "id": "2" - }, - { - "keyPhrases": [ - "machine" - ], - "id": "3" - }, - { - "keyPhrases": [ - "soleil" - ], - "id": "4" - } - ], - "errors": [] - } + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "body": { + "documents": [ + { + "id": "1", + "text": "If this demo doesn't work I will be super sad", + "language": "en" + }, + { + "id": "2", + "text": "Español es muy difícil de aprender", + "language": "es" + }, + { + "id": "3", + "text": "I really really love machine learning", + "language": "en" + }, + { + "id": "4", + "text": "Le soleil se lève à l'Est", + "language": "fr" } + ] + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "documents": [ + { + "keyPhrases": [ + "demo" + ], + "id": "1" + }, + { + "keyPhrases": [ + "Español" + ], + "id": "2" + }, + { + "keyPhrases": [ + "machine" + ], + "id": "3" + }, + { + "keyPhrases": [ + "soleil" + ], + "id": "4" + } + ], + "errors": [] + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulLanguagesRequest.json b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulLanguagesRequest.json index 9a9babedde4f..25bc96107ef6 100644 --- a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulLanguagesRequest.json +++ b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulLanguagesRequest.json @@ -1,75 +1,75 @@ { - "parameters": { - "Ocp-Apim-Subscription-Key": "{API key}", - "body": { - "documents": [ - { - "id": "1", - "text": "If this demo doesn't work I will be super sad" - }, - { - "id": "2", - "text": "Español es muy difícil de aprender" - }, - { - "id": "3", - "text": "I really really love machine learning" - }, - { - "id": "4", - "text": "Le soleil se lève à l'Est" - } - ] - } - }, - "responses": { - "200": { - "headers": {}, - "body": { - "documents": [ - { - "id": "1", - "detectedLanguages": [ - { - "name": "English", - "iso6391Name": "en", - "score": 1.0 - } - ] - }, - { - "id": "2", - "detectedLanguages": [ - { - "name": "Spanish", - "iso6391Name": "es", - "score": 1.0 - } - ] - }, - { - "id": "3", - "detectedLanguages": [ - { - "name": "English", - "iso6391Name": "en", - "score": 1.0 - } - ] - }, - { - "id": "4", - "detectedLanguages": [ - { - "name": "French", - "iso6391Name": "fr", - "score": 1.0 - } - ] - } - ], - "errors": [] - } + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "body": { + "documents": [ + { + "id": "1", + "text": "If this demo doesn't work I will be super sad" + }, + { + "id": "2", + "text": "Español es muy difícil de aprender" + }, + { + "id": "3", + "text": "I really really love machine learning" + }, + { + "id": "4", + "text": "Le soleil se lève à l'Est" } + ] + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "documents": [ + { + "id": "1", + "detectedLanguages": [ + { + "name": "English", + "iso6391Name": "en", + "score": 1.0 + } + ] + }, + { + "id": "2", + "detectedLanguages": [ + { + "name": "Spanish", + "iso6391Name": "es", + "score": 1.0 + } + ] + }, + { + "id": "3", + "detectedLanguages": [ + { + "name": "English", + "iso6391Name": "en", + "score": 1.0 + } + ] + }, + { + "id": "4", + "detectedLanguages": [ + { + "name": "French", + "iso6391Name": "fr", + "score": 1.0 + } + ] + } + ], + "errors": [] + } } + } } \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulSentimentRequest.json b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulSentimentRequest.json index 641937bac70f..8bda71709e96 100644 --- a/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulSentimentRequest.json +++ b/specification/cognitiveservices/data-plane/TextAnalytics/v2.0/examples/SuccessfulSentimentRequest.json @@ -1,55 +1,55 @@ { - "parameters": { - "Ocp-Apim-Subscription-Key": "{API key}", - "body": { - "documents": [ - { - "id": "1", - "text": "If this demo doesn't work I will be super sad", - "language": "en" - }, - { - "id": "2", - "text": "Español es muy difícil de aprender", - "language": "es" - }, - { - "id": "3", - "text": "I really really love machine learning", - "language": "en" - }, - { - "id": "4", - "text": "Le soleil se lève à l'Est", - "language": "fr" - } - ] - } - }, - "responses": { - "200": { - "headers": {}, - "body": { - "documents": [ - { - "score": 0.0065730810165405273, - "id": "1" - }, - { - "score": 0.97333031892776489, - "id": "3" - }, - { - "score": 0.44219660758972168, - "id": "2" - }, - { - "score": 0.53425812721252441, - "id": "4" - } - ], - "errors": [] - } + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "body": { + "documents": [ + { + "id": "1", + "text": "If this demo doesn't work I will be super sad", + "language": "en" + }, + { + "id": "2", + "text": "Español es muy difícil de aprender", + "language": "es" + }, + { + "id": "3", + "text": "I really really love machine learning", + "language": "en" + }, + { + "id": "4", + "text": "Le soleil se lève à l'Est", + "language": "fr" } + ] + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "documents": [ + { + "score": 0.0065730810165405273, + "id": "1" + }, + { + "score": 0.97333031892776489, + "id": "3" + }, + { + "score": 0.44219660758972168, + "id": "2" + }, + { + "score": 0.53425812721252441, + "id": "4" + } + ], + "errors": [] + } } + } } \ No newline at end of file