diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java index 5e19032203e12..bf87cb5f7bcc4 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java @@ -13,9 +13,7 @@ import com.azure.ai.openai.models.CompletionsOptions; import com.azure.ai.openai.models.Embeddings; import com.azure.ai.openai.models.EmbeddingsOptions; -import com.azure.ai.openai.models.ImageGenerationOptions; import com.azure.ai.openai.models.ImageOperationResponse; -import com.azure.ai.openai.models.ImageOperationStatus; import com.azure.core.annotation.Generated; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceClient; @@ -24,7 +22,6 @@ import com.azure.core.exception.HttpResponseException; import com.azure.core.exception.ResourceModifiedException; import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.experimental.models.PollResult; import com.azure.core.http.rest.RequestOptions; import com.azure.core.http.rest.Response; import com.azure.core.util.BinaryData; @@ -482,7 +479,7 @@ public Flux getChatCompletionsStream( * } * ] * } - * status: String(notRunning/running/succeeded/canceled/failed/deleted) (Required) + * status: String(notRunning/running/succeeded/canceled/failed) (Required) * error (Optional): (recursive schema, see error above) * } * } @@ -548,7 +545,7 @@ public Mono> getImageOperationStatusWithResponse( */ @Generated @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) - public PollerFlux beginStartGenerateImage( + PollerFlux beginStartGenerateImage( BinaryData imageGenerationOptions, RequestOptions requestOptions) { return this.serviceClient.beginStartGenerateImageAsync(imageGenerationOptions, requestOptions); } @@ -574,26 +571,4 @@ public Mono getImageOperationStatus(String operationId) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(ImageOperationResponse.class)); } - - /** - * Starts the generation of a batch of images from a text caption. - * - * @param imageGenerationOptions Represents the request data used to generate images. - * @throws IllegalArgumentException thrown if parameters fail the validation. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. - * @return the {@link PollerFlux} for polling of status details for long running operations. - */ - @Generated - @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) - public PollerFlux beginStartGenerateImage( - ImageGenerationOptions imageGenerationOptions) { - // Generated convenience method for beginStartGenerateImageWithModel - RequestOptions requestOptions = new RequestOptions(); - return serviceClient.beginStartGenerateImageWithModelAsync( - BinaryData.fromObject(imageGenerationOptions), requestOptions); - } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java index 257f0022261fb..49be4d675e873 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java @@ -13,9 +13,7 @@ import com.azure.ai.openai.models.CompletionsOptions; import com.azure.ai.openai.models.Embeddings; import com.azure.ai.openai.models.EmbeddingsOptions; -import com.azure.ai.openai.models.ImageGenerationOptions; import com.azure.ai.openai.models.ImageOperationResponse; -import com.azure.ai.openai.models.ImageOperationStatus; import com.azure.core.annotation.Generated; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceClient; @@ -24,7 +22,6 @@ import com.azure.core.exception.HttpResponseException; import com.azure.core.exception.ResourceModifiedException; import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.experimental.models.PollResult; import com.azure.core.http.rest.RequestOptions; import com.azure.core.http.rest.Response; import com.azure.core.util.BinaryData; @@ -474,7 +471,7 @@ public IterableStream getChatCompletionsStream( * } * ] * } - * status: String(notRunning/running/succeeded/canceled/failed/deleted) (Required) + * status: String(notRunning/running/succeeded/canceled/failed) (Required) * error (Optional): (recursive schema, see error above) * } * } @@ -538,7 +535,7 @@ public Response getImageOperationStatusWithResponse(String operation */ @Generated @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) - public SyncPoller beginStartGenerateImage( + SyncPoller beginStartGenerateImage( BinaryData imageGenerationOptions, RequestOptions requestOptions) { return this.serviceClient.beginStartGenerateImage(imageGenerationOptions, requestOptions); } @@ -564,26 +561,4 @@ public ImageOperationResponse getImageOperationStatus(String operationId) { .getValue() .toObject(ImageOperationResponse.class); } - - /** - * Starts the generation of a batch of images from a text caption. - * - * @param imageGenerationOptions Represents the request data used to generate images. - * @throws IllegalArgumentException thrown if parameters fail the validation. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. - * @return the {@link SyncPoller} for polling of status details for long running operations. - */ - @Generated - @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) - public SyncPoller beginStartGenerateImage( - ImageGenerationOptions imageGenerationOptions) { - // Generated convenience method for beginStartGenerateImageWithModel - RequestOptions requestOptions = new RequestOptions(); - return serviceClient.beginStartGenerateImageWithModel( - BinaryData.fromObject(imageGenerationOptions), requestOptions); - } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java index 4f2ee8fe0b2a8..1035b16362eec 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java @@ -5,7 +5,6 @@ package com.azure.ai.openai.implementation; import com.azure.ai.openai.OpenAIServiceVersion; -import com.azure.ai.openai.models.ImageOperationStatus; import com.azure.core.annotation.BodyParam; import com.azure.core.annotation.ExpectedResponses; import com.azure.core.annotation.Get; @@ -23,7 +22,6 @@ import com.azure.core.exception.HttpResponseException; import com.azure.core.exception.ResourceModifiedException; import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.experimental.models.PollResult; import com.azure.core.http.HttpPipeline; import com.azure.core.http.HttpPipelineBuilder; import com.azure.core.http.policy.CookiePolicy; @@ -548,7 +546,7 @@ public Response getEmbeddingsWithResponse( * int (Required) * ] * } - * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) + * finish_reason: String(stop/length/content_filter) (Required) * } * ] * usage (Required): { @@ -645,7 +643,7 @@ public Mono> getCompletionsWithResponseAsync( * int (Required) * ] * } - * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) + * finish_reason: String(stop/length/content_filter) (Required) * } * ] * usage (Required): { @@ -726,7 +724,7 @@ public Response getCompletionsWithResponse( * content: String (Optional) * } * index: int (Required) - * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) + * finish_reason: String(stop/length/content_filter) (Required) * delta (Optional): (recursive schema, see delta above) * } * ] @@ -811,7 +809,7 @@ public Mono> getChatCompletionsWithResponseAsync( * content: String (Optional) * } * index: int (Required) - * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) + * finish_reason: String(stop/length/content_filter) (Required) * delta (Optional): (recursive schema, see delta above) * } * ] @@ -878,7 +876,7 @@ public Response getChatCompletionsWithResponse( * } * ] * } - * status: String(notRunning/running/succeeded/canceled/failed/deleted) (Required) + * status: String(notRunning/running/succeeded/canceled/failed) (Required) * error (Optional): (recursive schema, see error above) * } * } @@ -937,7 +935,7 @@ public Mono> getImageOperationStatusWithResponseAsync( * } * ] * } - * status: String(notRunning/running/succeeded/canceled/failed/deleted) (Required) + * status: String(notRunning/running/succeeded/canceled/failed) (Required) * error (Optional): (recursive schema, see error above) * } * } @@ -1196,124 +1194,4 @@ public SyncPoller beginStartGenerateImage( TypeReference.createInstance(BinaryData.class), TypeReference.createInstance(BinaryData.class)); } - - /** - * Starts the generation of a batch of images from a text caption. - * - *

Request Body Schema - * - *

{@code
-     * {
-     *     prompt: String (Required)
-     *     n: Integer (Optional)
-     *     size: String(256x256/512x512/1024x1024) (Optional)
-     *     user: String (Optional)
-     * }
-     * }
- * - *

Response Body Schema - * - *

{@code
-     * {
-     *     id: String (Required)
-     *     status: String (Required)
-     *     error (Optional): {
-     *         code: String (Required)
-     *         message: String (Required)
-     *         target: String (Optional)
-     *         details (Optional): [
-     *             (recursive schema, see above)
-     *         ]
-     *         innererror (Optional): {
-     *             code: String (Optional)
-     *             innererror (Optional): (recursive schema, see innererror above)
-     *         }
-     *     }
-     * }
-     * }
- * - * @param imageGenerationOptions Represents the request data used to generate images. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return the {@link PollerFlux} for polling of status details for long running operations. - */ - @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) - public PollerFlux beginStartGenerateImageWithModelAsync( - BinaryData imageGenerationOptions, RequestOptions requestOptions) { - return PollerFlux.create( - Duration.ofSeconds(1), - () -> this.startGenerateImageWithResponseAsync(imageGenerationOptions, requestOptions), - new DefaultPollingStrategy<>( - new PollingStrategyOptions(this.getHttpPipeline()) - .setEndpoint("{endpoint}/openai".replace("{endpoint}", this.getEndpoint())) - .setContext( - requestOptions != null && requestOptions.getContext() != null - ? requestOptions.getContext() - : Context.NONE)), - TypeReference.createInstance(PollResult.class), - TypeReference.createInstance(ImageOperationStatus.class)); - } - - /** - * Starts the generation of a batch of images from a text caption. - * - *

Request Body Schema - * - *

{@code
-     * {
-     *     prompt: String (Required)
-     *     n: Integer (Optional)
-     *     size: String(256x256/512x512/1024x1024) (Optional)
-     *     user: String (Optional)
-     * }
-     * }
- * - *

Response Body Schema - * - *

{@code
-     * {
-     *     id: String (Required)
-     *     status: String (Required)
-     *     error (Optional): {
-     *         code: String (Required)
-     *         message: String (Required)
-     *         target: String (Optional)
-     *         details (Optional): [
-     *             (recursive schema, see above)
-     *         ]
-     *         innererror (Optional): {
-     *             code: String (Optional)
-     *             innererror (Optional): (recursive schema, see innererror above)
-     *         }
-     *     }
-     * }
-     * }
- * - * @param imageGenerationOptions Represents the request data used to generate images. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return the {@link SyncPoller} for polling of status details for long running operations. - */ - @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) - public SyncPoller beginStartGenerateImageWithModel( - BinaryData imageGenerationOptions, RequestOptions requestOptions) { - return SyncPoller.createPoller( - Duration.ofSeconds(1), - () -> this.startGenerateImageWithResponse(imageGenerationOptions, requestOptions), - new SyncDefaultPollingStrategy<>( - new PollingStrategyOptions(this.getHttpPipeline()) - .setEndpoint("{endpoint}/openai".replace("{endpoint}", this.getEndpoint())) - .setContext( - requestOptions != null && requestOptions.getContext() != null - ? requestOptions.getContext() - : Context.NONE)), - TypeReference.createInstance(PollResult.class), - TypeReference.createInstance(ImageOperationStatus.class)); - } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ChatCompletions.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ChatCompletions.java index 431e79f27c63f..9c2491022b66f 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ChatCompletions.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ChatCompletions.java @@ -23,14 +23,6 @@ public final class ChatCompletions { @JsonProperty(value = "id") private String id; - /* - * The first timestamp associated with generation activity for this completions response, - * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. - */ - @Generated - @JsonProperty(value = "created") - private int created; - /* * The collection of completions choices associated with this completions response. * Generally, `n` choices are generated per provided prompt with a default value of 1. @@ -51,7 +43,7 @@ public final class ChatCompletions { * Creates an instance of ChatCompletions class. * * @param id the id value to set. - * @param created the created value to set. + * @param createdAt the createdAt value to set. * @param choices the choices value to set. * @param usage the usage value to set. */ @@ -59,11 +51,11 @@ public final class ChatCompletions { @JsonCreator private ChatCompletions( @JsonProperty(value = "id") String id, - @JsonProperty(value = "created") int created, + @JsonProperty(value = "created") int createdAt, @JsonProperty(value = "choices") List choices, @JsonProperty(value = "usage") CompletionsUsage usage) { this.id = id; - this.created = created; + this.createdAt = createdAt; this.choices = choices; this.usage = usage; } @@ -78,17 +70,6 @@ public String getId() { return this.id; } - /** - * Get the created property: The first timestamp associated with generation activity for this completions response, - * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. - * - * @return the created value. - */ - @Generated - public int getCreated() { - return this.created; - } - /** * Get the choices property: The collection of completions choices associated with this completions response. * Generally, `n` choices are generated per provided prompt with a default value of 1. Token limits and other @@ -111,4 +92,23 @@ public List getChoices() { public CompletionsUsage getUsage() { return this.usage; } + + /* + * The first timestamp associated with generation activity for this completions response, + * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. + */ + @Generated + @JsonProperty(value = "created") + private int createdAt; + + /** + * Get the createdAt property: The first timestamp associated with generation activity for this completions + * response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. + * + * @return the createdAt value. + */ + @Generated + public int getCreatedAt() { + return this.createdAt; + } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsFinishReason.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsFinishReason.java index f82f7226e8510..c267098eca5b4 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsFinishReason.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsFinishReason.java @@ -12,15 +12,15 @@ public final class CompletionsFinishReason extends ExpandableStringEnum { /** Completions ended normally and reached its end of token generation. */ - @Generated public static final CompletionsFinishReason STOPPED = fromString("stopped"); + @Generated public static final CompletionsFinishReason STOPPED = fromString("stop"); /** Completions exhausted available token limits before generation could complete. */ - @Generated public static final CompletionsFinishReason TOKEN_LIMIT_REACHED = fromString("tokenLimitReached"); + @Generated public static final CompletionsFinishReason TOKEN_LIMIT_REACHED = fromString("length"); /** * Completions generated a response that was identified as potentially sensitive per content moderation policies. */ - @Generated public static final CompletionsFinishReason CONTENT_FILTERED = fromString("contentFiltered"); + @Generated public static final CompletionsFinishReason CONTENT_FILTERED = fromString("content_filter"); /** * Creates a new instance of CompletionsFinishReason value. diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsLogProbabilityModel.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsLogProbabilityModel.java index af195175a85e2..52f10e15a54ad 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsLogProbabilityModel.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/CompletionsLogProbabilityModel.java @@ -21,46 +21,25 @@ public final class CompletionsLogProbabilityModel { @JsonProperty(value = "tokens") private List tokens; - /* - * A collection of log probability values for the tokens in this completions data. - */ - @Generated - @JsonProperty(value = "token_logprobs") - private List tokenLogprobs; - - /* - * A mapping of tokens to maximum log probability values in this completions data. - */ - @Generated - @JsonProperty(value = "top_logprobs") - private List> topLogprobs; - - /* - * The text offsets associated with tokens in this completions data. - */ - @Generated - @JsonProperty(value = "text_offset") - private List textOffset; - /** * Creates an instance of CompletionsLogProbabilityModel class. * * @param tokens the tokens value to set. - * @param tokenLogprobs the tokenLogprobs value to set. - * @param topLogprobs the topLogprobs value to set. - * @param textOffset the textOffset value to set. + * @param tokenLogProbabilities the tokenLogProbabilities value to set. + * @param topLogProbabilities the topLogProbabilities value to set. + * @param textOffsets the textOffsets value to set. */ @Generated @JsonCreator private CompletionsLogProbabilityModel( @JsonProperty(value = "tokens") List tokens, - @JsonProperty(value = "token_logprobs") List tokenLogprobs, - @JsonProperty(value = "top_logprobs") List> topLogprobs, - @JsonProperty(value = "text_offset") List textOffset) { + @JsonProperty(value = "token_logprobs") List tokenLogProbabilities, + @JsonProperty(value = "top_logprobs") List> topLogProbabilities, + @JsonProperty(value = "text_offset") List textOffsets) { this.tokens = tokens; - this.tokenLogprobs = tokenLogprobs; - this.topLogprobs = topLogprobs; - this.textOffset = textOffset; + this.tokenLogProbabilities = tokenLogProbabilities; + this.topLogProbabilities = topLogProbabilities; + this.textOffsets = textOffsets; } /** @@ -73,33 +52,56 @@ public List getTokens() { return this.tokens; } + /* + * A collection of log probability values for the tokens in this completions data. + */ + @Generated + @JsonProperty(value = "token_logprobs") + private List tokenLogProbabilities; + + /* + * A mapping of tokens to maximum log probability values in this completions data. + */ + @Generated + @JsonProperty(value = "top_logprobs") + private List> topLogProbabilities; + + /* + * The text offsets associated with tokens in this completions data. + */ + @Generated + @JsonProperty(value = "text_offset") + private List textOffsets; + /** - * Get the tokenLogprobs property: A collection of log probability values for the tokens in this completions data. + * Get the tokenLogProbabilities property: A collection of log probability values for the tokens in this completions + * data. * - * @return the tokenLogprobs value. + * @return the tokenLogProbabilities value. */ @Generated - public List getTokenLogprobs() { - return this.tokenLogprobs; + public List getTokenLogProbabilities() { + return this.tokenLogProbabilities; } /** - * Get the topLogprobs property: A mapping of tokens to maximum log probability values in this completions data. + * Get the topLogProbabilities property: A mapping of tokens to maximum log probability values in this completions + * data. * - * @return the topLogprobs value. + * @return the topLogProbabilities value. */ @Generated - public List> getTopLogprobs() { - return this.topLogprobs; + public List> getTopLogProbabilities() { + return this.topLogProbabilities; } /** - * Get the textOffset property: The text offsets associated with tokens in this completions data. + * Get the textOffsets property: The text offsets associated with tokens in this completions data. * - * @return the textOffset value. + * @return the textOffsets value. */ @Generated - public List getTextOffset() { - return this.textOffset; + public List getTextOffsets() { + return this.textOffsets; } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/EmbeddingItem.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/EmbeddingItem.java index 4403acc88ec7f..307130d189e48 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/EmbeddingItem.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/EmbeddingItem.java @@ -21,25 +21,18 @@ public final class EmbeddingItem { @JsonProperty(value = "embedding") private List embedding; - /* - * Index of the prompt to which the EmbeddingItem corresponds. - */ - @Generated - @JsonProperty(value = "index") - private int index; - /** * Creates an instance of EmbeddingItem class. * * @param embedding the embedding value to set. - * @param index the index value to set. + * @param promptIndex the promptIndex value to set. */ @Generated @JsonCreator private EmbeddingItem( - @JsonProperty(value = "embedding") List embedding, @JsonProperty(value = "index") int index) { + @JsonProperty(value = "embedding") List embedding, @JsonProperty(value = "index") int promptIndex) { this.embedding = embedding; - this.index = index; + this.promptIndex = promptIndex; } /** @@ -53,13 +46,20 @@ public List getEmbedding() { return this.embedding; } + /* + * Index of the prompt to which the EmbeddingItem corresponds. + */ + @Generated + @JsonProperty(value = "index") + private int promptIndex; + /** - * Get the index property: Index of the prompt to which the EmbeddingItem corresponds. + * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * - * @return the index value. + * @return the promptIndex value. */ @Generated - public int getIndex() { - return this.index; + public int getPromptIndex() { + return this.promptIndex; } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/State.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/State.java index 7662a08ca9a6d..94ef70482c837 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/State.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/State.java @@ -26,9 +26,6 @@ public final class State extends ExpandableStringEnum { /** The operation has completed processing with a failure and cannot be further consumed. */ @Generated public static final State FAILED = fromString("failed"); - /** The entity has been deleted but may still be referenced by other entities predating the deletion. */ - @Generated public static final State DELETED = fromString("deleted"); - /** * Creates a new instance of State value. * diff --git a/sdk/openai/azure-ai-openai/tsp-location.yaml b/sdk/openai/azure-ai-openai/tsp-location.yaml index a8e2785e61e86..83666729f2c40 100644 --- a/sdk/openai/azure-ai-openai/tsp-location.yaml +++ b/sdk/openai/azure-ai-openai/tsp-location.yaml @@ -1,5 +1,5 @@ -directory: specification/cognitiveservices/OpenAI.Inference -additionalDirectories: - - specification/cognitiveservices/OpenAI.Authoring -commit: fe056966cf070be84e92dd2dc1b566bae35002cf +commit: 5e0f4a156eef6f01d210cd79641bff6938bcd2d2 repo: Azure/azure-rest-api-specs +directory: specification/cognitiveservices/OpenAI.Inference +additionalDirectories: [] +