diff --git a/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp b/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp index 0333f1635dae..6c93e8aa0463 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/chat.completions.tsp @@ -49,6 +49,14 @@ model ChatCompletionsOptions { @projectedName("json", "max_tokens") maxTokens?: int32; + @doc(""" + Information about the content filtering category (hate, sexual, violence, self_harm), if it + has been detected, as well as the severity level (very_low, low, medium, high-scale that + determines the intensity and risk level of harmful content) and if it has been filtered or not. + """) + @projectedName("json","contentFilterResults") + annotation?: string; + @doc(""" The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused