From ee511a93c654fe9cb7f4b2c59c9de6b0d5f6b437 Mon Sep 17 00:00:00 2001 From: Brandon Miller Date: Thu, 29 Jun 2023 17:57:27 -0700 Subject: [PATCH] move Filter definitions to common --- .../models/chat.completions.tsp | 52 +----------------- .../models/completions.common.tsp | 55 +++++++++++++++++++ .../models/completions.create.tsp | 10 ++++ 3 files changed, 67 insertions(+), 50 deletions(-) diff --git a/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp b/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp index 771f4fb384d1..ad4b98572503 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp @@ -178,55 +178,6 @@ model ChatChoice { contentFilterResults?: ContentFilterResults; } -@doc("Ratings for the intensity and risk level of harmful content.") -enum ContentFilterSeverity { - @doc("General content or related content in generic or non-harmful contexts.") - safe: "safe"; - - @doc("Harmful content at a low intensity and risk level.") - low: "low"; - - @doc("Harmful content at a medium intensity and risk level.") - medium: "medium"; - - @doc("Harmful content at a high intensity and risk level.") - high: "high"; -} - -@doc("Information about filtered content severity level and if it has been filtered or not.") -model ContentFilterResult { - @doc("") - @projectedName("json", "severity") - severity: ContentFilterSeverity; - - @doc("") - @projectedName("json", "filtered") - filtered: boolean; - -} - -@doc("Information about the content filtering category, if it has been detected.") -model ContentFilterResults { - sexual: ContentFilterResult; - violence: ContentFilterResult; - hate: ContentFilterResult; - selfHarm: ContentFilterResult; - //error? -} - -@doc(""" -Content filtering results for a single prompt in the request. -""") -model PromptFilterResult { - @doc("") - @projectedName("json", "prompt_index") - promptIndex: int32; - - @doc("asdfasdfasdf") - @projectedName("json", "contentFilterResults") - contentFilterResults?: ContentFilterResults; -} - @doc(""" Representation of the response data from a chat completions request. Completions support a wide variety of tasks and generate text that continues from or "completes" @@ -257,7 +208,8 @@ model ChatCompletions { Content filtering results for zero or more prompts in the request. In a streaming request, results for different prompts may arrive at different times or in different orders. """) - @projectedName("json", "promptFilterResults") + @added(ServiceApiVersions.v2023_06_01_Preview) + @projectedName("json", "promptFilterResults") // TODO: or "prompt_filter_results"? promptFilterResults?: PromptFilterResult[]; @doc(""" diff --git a/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp b/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp index da919b0052bd..4939f2ee9237 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/completions.common.tsp @@ -1,5 +1,7 @@ namespace Azure.OpenAI; +using TypeSpec.Versioning; + @doc(""" Representation of the token counts processed for a completions request. Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and @@ -35,3 +37,56 @@ enum CompletionsFinishReason { """) contentFiltered: "content_filter", } + +@added(ServiceApiVersions.v2023_06_01_Preview) +@doc("Ratings for the intensity and risk level of harmful content.") +enum ContentFilterSeverity { + @doc("General content or related content in generic or non-harmful contexts.") + safe: "safe"; + + @doc("Harmful content at a low intensity and risk level.") + low: "low"; + + @doc("Harmful content at a medium intensity and risk level.") + medium: "medium"; + + @doc("Harmful content at a high intensity and risk level.") + high: "high"; +} + +@added(ServiceApiVersions.v2023_06_01_Preview) +@doc("Information about filtered content severity level and if it has been filtered or not.") +model ContentFilterResult { + @doc("") + @projectedName("json", "severity") + severity: ContentFilterSeverity; + + @doc("") + @projectedName("json", "filtered") + filtered: boolean; + +} + +@added(ServiceApiVersions.v2023_06_01_Preview) +@doc("Information about the content filtering category, if it has been detected.") +model ContentFilterResults { + sexual: ContentFilterResult; + violence: ContentFilterResult; + hate: ContentFilterResult; + selfHarm: ContentFilterResult; + //error? +} + +@added(ServiceApiVersions.v2023_06_01_Preview) +@doc(""" +Content filtering results for a single prompt in the request. +""") +model PromptFilterResult { + @doc("") + @projectedName("json", "prompt_index") + promptIndex: int32; + + @doc("asdfasdfasdf") + @projectedName("json", "contentFilterResults") + contentFilterResults?: ContentFilterResults; +} \ No newline at end of file diff --git a/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp b/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp index c3f695e16ecf..2869b5464d50 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/completions.create.tsp @@ -1,9 +1,11 @@ import "@typespec/rest"; import "@typespec/http"; +import "@typespec/versioning"; import "./completions.common.tsp"; using TypeSpec.Rest; using TypeSpec.Http; +using TypeSpec.Versioning; namespace Azure.OpenAI; @@ -159,6 +161,14 @@ model Completions { @projectedName("csharp", "InternalCreatedSecondsAfterUnixEpoch") created: int32; + @doc(""" + Content filtering results for zero or more prompts in the request. In a streaming request, + results for different prompts may arrive at different times or in different orders. + """) + @added(ServiceApiVersions.v2023_06_01_Preview) + @projectedName("json", "promptFilterResults") + promptFilterResults?: PromptFilterResult[]; + @doc(""" The collection of completions choices associated with this completions response. Generally, `n` choices are generated per provided prompt with a default value of 1.