From b25328b19b9053dab443cb13413d3551923857b8 Mon Sep 17 00:00:00 2001 From: Gerardo Lecaros Date: Wed, 21 Jun 2023 15:11:23 -0700 Subject: [PATCH] Fixing enum to have the proper values + reenabling csharp generation. (#24538) --- .../models/chat.completions.tsp | 2 + .../models/completions.common.tsp | 11 +- .../models/completions.create.tsp | 2 + .../models/embeddings.create.tsp | 2 + .../OpenAI.Inference/models/images.tsp | 2 +- .../OpenAI.Inference/tspconfig.yaml | 11 +- .../preview/2023-06-01-preview/generated.json | 30 +++ .../stable/2022-12-01/generated.json | 221 ++++++++++++++++++ .../stable/2023-05-15/generated.json | 30 +++ 9 files changed, 298 insertions(+), 13 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp b/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp index 8a4b284a414d..5405b21497b2 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp @@ -5,6 +5,8 @@ import "./completions.common.tsp"; using TypeSpec.Rest; using TypeSpec.Http; +namespace Azure.OpenAI; + @doc("A description of the intended purpose of a message within a chat completions interaction.") enum ChatRole { @doc("The role that instructs or sets the behavior of the assistant.") diff --git a/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp b/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp index 41b3ab740b2f..da919b0052bd 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp @@ -1,3 +1,5 @@ +namespace Azure.OpenAI; + @doc(""" Representation of the token counts processed for a completions request. Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and @@ -22,17 +24,14 @@ Representation of the manner in which a completions response concluded. """) enum CompletionsFinishReason { @doc("Completions ended normally and reached its end of token generation.") - @projectedName("json", "stop") - stopped, + stopped: "stop", @doc("Completions exhausted available token limits before generation could complete.") - @projectedName("json", "length") - tokenLimitReached, + tokenLimitReached: "length", @doc(""" Completions generated a response that was identified as potentially sensitive per content moderation policies. """) - @projectedName("json", "content_filter") - contentFiltered, + contentFiltered: "content_filter", } diff --git a/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp b/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp index 595f0aa01a21..2b899a49589a 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp @@ -5,6 +5,8 @@ import "./completions.common.tsp"; using TypeSpec.Rest; using TypeSpec.Http; +namespace Azure.OpenAI; + @doc(""" The configuration information for a completions request. Completions support a wide variety of tasks and generate text that continues from or "completes" diff --git a/specification/cognitiveservices/OpenAI.Inference/models/embeddings.create.tsp b/specification/cognitiveservices/OpenAI.Inference/models/embeddings.create.tsp index f80455c67866..506615ae7491 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/embeddings.create.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/embeddings.create.tsp @@ -4,6 +4,8 @@ import "@typespec/http"; using TypeSpec.Rest; using TypeSpec.Http; +namespace Azure.OpenAI; + @doc(""" The configuration information for an embeddings request. Embeddings measure the relatedness of text strings and are commonly used for search, clustering, diff --git a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp index d77aea2df4e7..c990a9ff2c24 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/images.tsp @@ -126,7 +126,7 @@ op OaiLongRunningRpcOperation< Azure.Core.Traits.TraitLocation.Parameters >, Azure.Core.Foundations.AcceptedResponse & - ImageOperationResponse & + TResponse & Foundations.LongRunningStatusLocation & Azure.Core.Traits.Private.TraitProperties< Traits, diff --git a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml index 74745bf89ff5..cbc2af3360ce 100644 --- a/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml +++ b/specification/cognitiveservices/OpenAI.Inference/tspconfig.yaml @@ -20,12 +20,11 @@ options: # Uncomment this line and add "@azure-tools/cadl-java" to your package.json to generate Java code # "@azure-tools/cadl-java": true # Uncomment this line and add "@azure-tools/cadl-csharp" to your package.json to generate C# code - # https://github.com/Azure/azure-rest-api-specs/issues/24496 - # "@azure-tools/typespec-csharp": - # package-dir: "Azure.AI.OpenAI" - # namespace: "Azure.AI.OpenAI" - # clear-output-folder: true - # model-namespace: false + "@azure-tools/typespec-csharp": + package-dir: "Azure.AI.OpenAI" + namespace: "Azure.AI.OpenAI" + clear-output-folder: true + model-namespace: false # https://github.com/Azure/azure-rest-api-specs/issues/24497 # "@azure-tools/typespec-java": # package-dir: "azure-ai-openai" diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index 7eb2eb6afcce..408442988421 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -632,6 +632,36 @@ "usage" ] }, + "CompletionsFinishReason": { + "type": "string", + "description": "Representation of the manner in which a completions response concluded.", + "enum": [ + "stop", + "length", + "content_filter" + ], + "x-ms-enum": { + "name": "CompletionsFinishReason", + "modelAsString": true, + "values": [ + { + "name": "stopped", + "value": "stop", + "description": "Completions ended normally and reached its end of token generation." + }, + { + "name": "tokenLimitReached", + "value": "length", + "description": "Completions exhausted available token limits before generation could complete." + }, + { + "name": "contentFiltered", + "value": "content_filter", + "description": "Completions generated a response that was identified as potentially sensitive per content\nmoderation policies." + } + ] + } + }, "CompletionsLogProbabilityModel": { "type": "object", "properties": { diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json index bc4abd3ea214..13f6ad8e1121 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json @@ -225,6 +225,197 @@ }, "description": "An object containing more specific information about the error. As per Microsoft One API guidelines - https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses." }, + "ChatChoice": { + "type": "object", + "properties": { + "message": { + "$ref": "#/definitions/ChatMessage", + "description": "The chat message for a given chat completions prompt." + }, + "index": { + "type": "integer", + "format": "int32", + "description": "The ordered index associated with this chat completions choice." + }, + "finish_reason": { + "x-typespec-name": "CompletionsFinishReason | null", + "description": "The reason that this chat completions choice completed its generated.", + "x-ms-client-name": "finishReason" + }, + "delta": { + "$ref": "#/definitions/ChatMessage", + "description": "The delta message content for a streaming response." + } + }, + "description": "The representation of a single prompt completion as part of an overall chat completions request.\nGenerally, `n` choices are generated per provided prompt with a default value of 1.\nToken limits and other settings may limit the number of choices generated.", + "required": [ + "index", + "finish_reason" + ] + }, + "ChatCompletions": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "A unique identifier associated with this chat completions response." + }, + "created": { + "type": "integer", + "format": "int32", + "description": "The first timestamp associated with generation activity for this completions response,\nrepresented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970." + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/definitions/ChatChoice" + }, + "x-ms-identifiers": [], + "description": "The collection of completions choices associated with this completions response.\nGenerally, `n` choices are generated per provided prompt with a default value of 1.\nToken limits and other settings may limit the number of choices generated.", + "x-typespec-name": "ChatChoice[]" + }, + "usage": { + "$ref": "#/definitions/CompletionsUsage", + "description": "Usage information for tokens processed and generated as part of this completions operation." + } + }, + "description": "Representation of the response data from a chat completions request.\nCompletions support a wide variety of tasks and generate text that continues from or \"completes\"\nprovided prompt data.", + "required": [ + "id", + "created", + "choices", + "usage" + ] + }, + "ChatCompletionsOptions": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "items": { + "$ref": "#/definitions/ChatMessage" + }, + "x-ms-identifiers": [], + "description": "The collection of context messages associated with this chat completions request.\nTypical usage begins with a chat message for the System role that provides instructions for\nthe behavior of the assistant, followed by alternating messages between the User and\nAssistant roles.", + "x-typespec-name": "ChatMessage[]" + }, + "max_tokens": { + "type": "integer", + "format": "int32", + "description": "The maximum number of tokens to generate.", + "x-ms-client-name": "maxTokens" + }, + "temperature": { + "type": "number", + "format": "float", + "description": "The sampling temperature to use that controls the apparent creativity of generated completions.\nHigher values will make output more random while lower values will make results more focused\nand deterministic.\nIt is not recommended to modify temperature and top_p for the same completions request as the\ninteraction of these two settings is difficult to predict." + }, + "top_p": { + "type": "number", + "format": "float", + "description": "An alternative to sampling with temperature called nucleus sampling. This value causes the\nmodel to consider the results of tokens with the provided probability mass. As an example, a\nvalue of 0.15 will cause only the tokens comprising the top 15% of probability mass to be\nconsidered.\nIt is not recommended to modify temperature and top_p for the same completions request as the\ninteraction of these two settings is difficult to predict.", + "x-ms-client-name": "topP" + }, + "logit_bias": { + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int32" + }, + "description": "A map between GPT token IDs and bias scores that influences the probability of specific tokens\nappearing in a completions response. Token IDs are computed via external tokenizer tools, while\nbias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to\na full ban or exclusive selection of a token, respectively. The exact behavior of a given bias\nscore varies by model.", + "x-typespec-name": "Record", + "x-ms-client-name": "logitBias" + }, + "user": { + "type": "string", + "description": "An identifier for the caller or end user of the operation. This may be used for tracking\nor rate-limiting purposes." + }, + "n": { + "type": "integer", + "format": "int32", + "description": "The number of chat completions choices that should be generated for a chat completions\nresponse.\nBecause this setting can generate many completions, it may quickly consume your token quota.\nUse carefully and ensure reasonable settings for max_tokens and stop." + }, + "stop": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A collection of textual sequences that will end completions generation.", + "x-typespec-name": "string[]" + }, + "presence_penalty": { + "type": "number", + "format": "float", + "description": "A value that influences the probability of generated tokens appearing based on their existing\npresence in generated text.\nPositive values will make tokens less likely to appear when they already exist and increase the\nmodel's likelihood to output new topics.", + "x-ms-client-name": "presencePenalty" + }, + "frequency_penalty": { + "type": "number", + "format": "float", + "description": "A value that influences the probability of generated tokens appearing based on their cumulative\nfrequency in generated text.\nPositive values will make tokens less likely to appear as their frequency increases and\ndecrease the likelihood of the model repeating the same statements verbatim.", + "x-ms-client-name": "frequencyPenalty" + }, + "stream": { + "type": "boolean", + "description": "A value indicating whether chat completions should be streamed for this request." + }, + "model": { + "type": "string", + "description": "The model name to provide as part of this completions request.\nNot applicable to Azure OpenAI, where deployment information should be included in the Azure\nresource URI that's connected to." + } + }, + "description": "The configuration information for a chat completions request.\nCompletions support a wide variety of tasks and generate text that continues from or \"completes\"\nprovided prompt data.", + "required": [ + "messages" + ] + }, + "ChatMessage": { + "type": "object", + "properties": { + "role": { + "$ref": "#/definitions/ChatRole", + "description": "The role associated with this message payload." + }, + "content": { + "type": "string", + "description": "The text associated with this message payload." + } + }, + "description": "A single, role-attributed message within a chat completion interaction.", + "required": [ + "role" + ] + }, + "ChatRole": { + "type": "string", + "description": "A description of the intended purpose of a message within a chat completions interaction.", + "enum": [ + "system", + "assistant", + "user" + ], + "x-ms-enum": { + "name": "ChatRole", + "modelAsString": true, + "values": [ + { + "name": "system", + "value": "system", + "description": "The role that instructs or sets the behavior of the assistant." + }, + { + "name": "assistant", + "value": "assistant", + "description": "The role that provides responses to system-instructed, user-prompted input." + }, + { + "name": "user", + "value": "user", + "description": "The role that provides input for chat completions." + } + ] + } + }, "Choice": { "type": "object", "properties": { @@ -296,6 +487,36 @@ "usage" ] }, + "CompletionsFinishReason": { + "type": "string", + "description": "Representation of the manner in which a completions response concluded.", + "enum": [ + "stop", + "length", + "content_filter" + ], + "x-ms-enum": { + "name": "CompletionsFinishReason", + "modelAsString": true, + "values": [ + { + "name": "stopped", + "value": "stop", + "description": "Completions ended normally and reached its end of token generation." + }, + { + "name": "tokenLimitReached", + "value": "length", + "description": "Completions exhausted available token limits before generation could complete." + }, + { + "name": "contentFiltered", + "value": "content_filter", + "description": "Completions generated a response that was identified as potentially sensitive per content\nmoderation policies." + } + ] + } + }, "CompletionsLogProbabilityModel": { "type": "object", "properties": { diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json index 2d0b4674d2d3..78fd2fe57232 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json @@ -538,6 +538,36 @@ "usage" ] }, + "CompletionsFinishReason": { + "type": "string", + "description": "Representation of the manner in which a completions response concluded.", + "enum": [ + "stop", + "length", + "content_filter" + ], + "x-ms-enum": { + "name": "CompletionsFinishReason", + "modelAsString": true, + "values": [ + { + "name": "stopped", + "value": "stop", + "description": "Completions ended normally and reached its end of token generation." + }, + { + "name": "tokenLimitReached", + "value": "length", + "description": "Completions exhausted available token limits before generation could complete." + }, + { + "name": "contentFiltered", + "value": "content_filter", + "description": "Completions generated a response that was identified as potentially sensitive per content\nmoderation policies." + } + ] + } + }, "CompletionsLogProbabilityModel": { "type": "object", "properties": {