Skip to content

Commit

Permalink
feat(api): add incomplete state (#846)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot committed May 14, 2024
1 parent 3f4b743 commit 8701aa6
Show file tree
Hide file tree
Showing 7 changed files with 61 additions and 39 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 64
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-47007cc1aa5bc7b74107a99b377925978a0bd376ed67bdae724e80d5d0b63d57.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml
9 changes: 6 additions & 3 deletions src/resources/batches.ts
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,11 @@ export interface BatchCreateParams {

/**
* The endpoint to be used for all requests in the batch. Currently
* `/v1/chat/completions` and `/v1/embeddings` are supported.
* `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
* Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
* embedding inputs across all requests in the batch.
*/
endpoint: '/v1/chat/completions' | '/v1/embeddings';
endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';

/**
* The ID of an uploaded file that contains requests for the new batch.
Expand All @@ -227,7 +229,8 @@ export interface BatchCreateParams {
*
* Your input file must be formatted as a
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
* and must be uploaded with the purpose `batch`.
* and must be uploaded with the purpose `batch`. The file can contain up to 50,000
* requests, and can be up to 100 MB in size.
*/
input_file_id: string;

Expand Down
15 changes: 9 additions & 6 deletions src/resources/beta/assistants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,9 @@ export interface Assistant {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -1047,8 +1048,9 @@ export interface AssistantCreateParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -1193,8 +1195,9 @@ export interface AssistantUpdateParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down
35 changes: 21 additions & 14 deletions src/resources/beta/threads/runs/runs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ export class Runs extends APIResource {
break;
//We return the run in any terminal state.
case 'requires_action':
case 'incomplete':
case 'cancelled':
case 'completed':
case 'failed':
Expand Down Expand Up @@ -409,8 +410,9 @@ export interface Run {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand All @@ -432,8 +434,8 @@ export interface Run {

/**
* The status of the run, which can be either `queued`, `in_progress`,
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
* `expired`.
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
* `incomplete`, or `expired`.
*/
status: RunStatus;

Expand Down Expand Up @@ -584,8 +586,8 @@ export namespace Run {

/**
* The status of the run, which can be either `queued`, `in_progress`,
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
* `expired`.
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
* `incomplete`, or `expired`.
*/
export type RunStatus =
| 'queued'
Expand All @@ -595,6 +597,7 @@ export type RunStatus =
| 'cancelled'
| 'failed'
| 'completed'
| 'incomplete'
| 'expired';

export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming;
Expand Down Expand Up @@ -684,8 +687,9 @@ export interface RunCreateParamsBase {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -945,8 +949,9 @@ export interface RunCreateAndPollParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -1152,8 +1157,9 @@ export interface RunCreateAndStreamParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -1359,8 +1365,9 @@ export interface RunStreamParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down
20 changes: 12 additions & 8 deletions src/resources/beta/threads/threads.ts
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,9 @@ export interface AssistantResponseFormat {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -516,8 +517,9 @@ export interface ThreadCreateAndRunParamsBase {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -875,8 +877,9 @@ export interface ThreadCreateAndRunPollParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down Expand Up @@ -1206,8 +1209,9 @@ export interface ThreadCreateAndRunStreamParams {

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
Expand Down
1 change: 1 addition & 0 deletions src/resources/beta/vector-stores/file-batches.ts
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ export class FileBatches extends APIResource {
await sleep(sleepInterval);
break;
case 'failed':
case 'cancelled':
case 'completed':
return batch;
}
Expand Down
18 changes: 11 additions & 7 deletions src/resources/files.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,18 @@ import { Page } from '../pagination';

export class Files extends APIResource {
/**
* Upload a file that can be used across various endpoints. The size of all the
* files uploaded by one organization can be up to 100 GB.
* Upload a file that can be used across various endpoints. Individual files can be
* up to 512 MB, and the size of all files uploaded by one organization can be up
* to 100 GB.
*
* The size of individual files can be a maximum of 512 MB or 2 million tokens for
* Assistants. See the
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
* learn more about the types of files supported. The Fine-tuning API only supports
* `.jsonl` files.
* The Assistants API supports files up to 2 million tokens and of specific file
* types. See the
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
* details.
*
* The Fine-tuning API only supports `.jsonl` files.
*
* The Batch API only supports `.jsonl` files up to 100 MB in size.
*
* Please [contact us](https://help.openai.com/) if you need to increase these
* storage limits.
Expand Down

0 comments on commit 8701aa6

Please sign in to comment.