From 12374f256d93377a8e20084f724305bb239c9e87 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 12 Sep 2024 14:42:26 +0000
Subject: [PATCH] Release 0.0.1-beta20
---
pyproject.toml | 2 +-
reference.md | 248 +--
src/gooey/__init__.py | 122 +-
src/gooey/client.py | 1365 +++++++++--------
src/gooey/copilot/__init__.py | 44 +-
src/gooey/copilot/client.py | 399 ++---
src/gooey/copilot/types/__init__.py | 46 +-
.../copilot_completion_request_asr_model.py | 23 +
...pilot_completion_request_citation_style.py | 25 +
...ilot_completion_request_embedding_model.py | 18 +
...pilot_completion_request_functions_item.py | 24 +
...mpletion_request_functions_item_trigger.py | 5 +
...opilot_completion_request_lipsync_model.py | 5 +
...lot_completion_request_openai_tts_model.py | 5 +
...ot_completion_request_openai_voice_name.py | 7 +
...completion_request_response_format_type.py | 5 +
...t_completion_request_sadtalker_settings.py | 42 +
...n_request_sadtalker_settings_preprocess.py | 7 +
...ot_completion_request_translation_model.py | 5 +
...copilot_completion_request_tts_provider.py | 7 +
src/gooey/core/client_wrapper.py | 2 +-
src/gooey/types/__init__.py | 102 ++
src/gooey/types/asr_page_request.py | 43 +
src/gooey/types/bulk_runner_page_request.py | 55 +
.../types/compare_upscaler_page_request.py | 37 +
src/gooey/types/doc_extract_page_request.py | 43 +
src/gooey/types/doc_summary_page_request.py | 43 +
...oc_summary_request_response_format_type.py | 5 +
.../doc_summary_request_selected_asr_model.py | 23 +
.../types/face_inpainting_page_request.py | 42 +
.../types/image_segmentation_page_request.py | 36 +
src/gooey/types/img2img_page_request.py | 43 +
src/gooey/types/lipsync_page_request.py | 37 +
.../types/lipsync_request_selected_model.py | 5 +
src/gooey/types/lipsync_tts_page_request.py | 62 +
.../lipsync_tts_request_openai_tts_model.py | 5 +
.../lipsync_tts_request_openai_voice_name.py | 7 +
.../lipsync_tts_request_selected_model.py | 5 +
.../types/lipsync_tts_request_tts_provider.py | 7 +
.../types/object_inpainting_page_request.py | 43 +
.../types/portrait_request_selected_model.py | 5 +
.../product_image_request_selected_model.py | 5 +
.../types/qr_code_generator_page_request.py | 66 +
...est_image_prompt_controlnet_models_item.py | 20 +
src/gooey/types/qr_code_request_scheduler.py | 23 +
..._request_selected_controlnet_model_item.py | 20 +
.../types/qr_code_request_selected_model.py | 22 +
src/gooey/types/recipe_function.py | 8 +-
...image_request_selected_controlnet_model.py | 19 +
..._request_selected_controlnet_model_item.py | 20 +
.../remix_image_request_selected_model.py | 21 +
...emove_background_request_selected_model.py | 5 +
src/gooey/types/sad_talker_settings.py | 12 +-
...peech_recognition_request_output_format.py | 5 +
...eech_recognition_request_selected_model.py | 23 +
...h_recognition_request_translation_model.py | 5 +
...esize_data_request_response_format_type.py | 5 +
...thesize_data_request_selected_asr_model.py | 23 +
.../types/translate_request_selected_model.py | 5 +
src/gooey/types/translation_page_request.py | 33 +
.../upscale_request_selected_models_item.py | 7 +
src/gooey/types/video_bots_page_request.py | 131 ++
.../video_bots_page_request_asr_model.py | 0
.../video_bots_page_request_citation_style.py | 0
...video_bots_page_request_embedding_model.py | 0
.../video_bots_page_request_functions_item.py | 24 +
...ots_page_request_functions_item_trigger.py | 5 +
.../video_bots_page_request_lipsync_model.py | 0
...ideo_bots_page_request_openai_tts_model.py | 0
...deo_bots_page_request_openai_voice_name.py | 0
..._bots_page_request_response_format_type.py | 0
...eo_bots_page_request_sadtalker_settings.py | 40 +
...e_request_sadtalker_settings_preprocess.py | 7 +
...deo_bots_page_request_translation_model.py | 0
.../video_bots_page_request_tts_provider.py | 0
75 files changed, 2565 insertions(+), 1043 deletions(-)
create mode 100644 src/gooey/copilot/types/copilot_completion_request_asr_model.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_citation_style.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_embedding_model.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_functions_item.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_response_format_type.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_translation_model.py
create mode 100644 src/gooey/copilot/types/copilot_completion_request_tts_provider.py
create mode 100644 src/gooey/types/asr_page_request.py
create mode 100644 src/gooey/types/bulk_runner_page_request.py
create mode 100644 src/gooey/types/compare_upscaler_page_request.py
create mode 100644 src/gooey/types/doc_extract_page_request.py
create mode 100644 src/gooey/types/doc_summary_page_request.py
create mode 100644 src/gooey/types/doc_summary_request_response_format_type.py
create mode 100644 src/gooey/types/doc_summary_request_selected_asr_model.py
create mode 100644 src/gooey/types/face_inpainting_page_request.py
create mode 100644 src/gooey/types/image_segmentation_page_request.py
create mode 100644 src/gooey/types/img2img_page_request.py
create mode 100644 src/gooey/types/lipsync_page_request.py
create mode 100644 src/gooey/types/lipsync_request_selected_model.py
create mode 100644 src/gooey/types/lipsync_tts_page_request.py
create mode 100644 src/gooey/types/lipsync_tts_request_openai_tts_model.py
create mode 100644 src/gooey/types/lipsync_tts_request_openai_voice_name.py
create mode 100644 src/gooey/types/lipsync_tts_request_selected_model.py
create mode 100644 src/gooey/types/lipsync_tts_request_tts_provider.py
create mode 100644 src/gooey/types/object_inpainting_page_request.py
create mode 100644 src/gooey/types/portrait_request_selected_model.py
create mode 100644 src/gooey/types/product_image_request_selected_model.py
create mode 100644 src/gooey/types/qr_code_generator_page_request.py
create mode 100644 src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
create mode 100644 src/gooey/types/qr_code_request_scheduler.py
create mode 100644 src/gooey/types/qr_code_request_selected_controlnet_model_item.py
create mode 100644 src/gooey/types/qr_code_request_selected_model.py
create mode 100644 src/gooey/types/remix_image_request_selected_controlnet_model.py
create mode 100644 src/gooey/types/remix_image_request_selected_controlnet_model_item.py
create mode 100644 src/gooey/types/remix_image_request_selected_model.py
create mode 100644 src/gooey/types/remove_background_request_selected_model.py
create mode 100644 src/gooey/types/speech_recognition_request_output_format.py
create mode 100644 src/gooey/types/speech_recognition_request_selected_model.py
create mode 100644 src/gooey/types/speech_recognition_request_translation_model.py
create mode 100644 src/gooey/types/synthesize_data_request_response_format_type.py
create mode 100644 src/gooey/types/synthesize_data_request_selected_asr_model.py
create mode 100644 src/gooey/types/translate_request_selected_model.py
create mode 100644 src/gooey/types/translation_page_request.py
create mode 100644 src/gooey/types/upscale_request_selected_models_item.py
create mode 100644 src/gooey/types/video_bots_page_request.py
rename src/gooey/{copilot => }/types/video_bots_page_request_asr_model.py (100%)
rename src/gooey/{copilot => }/types/video_bots_page_request_citation_style.py (100%)
rename src/gooey/{copilot => }/types/video_bots_page_request_embedding_model.py (100%)
create mode 100644 src/gooey/types/video_bots_page_request_functions_item.py
create mode 100644 src/gooey/types/video_bots_page_request_functions_item_trigger.py
rename src/gooey/{copilot => }/types/video_bots_page_request_lipsync_model.py (100%)
rename src/gooey/{copilot => }/types/video_bots_page_request_openai_tts_model.py (100%)
rename src/gooey/{copilot => }/types/video_bots_page_request_openai_voice_name.py (100%)
rename src/gooey/{copilot => }/types/video_bots_page_request_response_format_type.py (100%)
create mode 100644 src/gooey/types/video_bots_page_request_sadtalker_settings.py
create mode 100644 src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py
rename src/gooey/{copilot => }/types/video_bots_page_request_translation_model.py (100%)
rename src/gooey/{copilot => }/types/video_bots_page_request_tts_provider.py (100%)
diff --git a/pyproject.toml b/pyproject.toml
index 7863965..94bf3d1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gooeyai"
-version = "0.0.1-beta19"
+version = "0.0.1-beta20"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 7222382..a067ff0 100644
--- a/reference.md
+++ b/reference.md
@@ -232,7 +232,7 @@ client.qr_code(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -256,7 +256,9 @@ client.qr_code(
-
-**qr_code_input_image:** `typing.Optional[str]`
+**qr_code_input_image:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -272,7 +274,9 @@ client.qr_code(
-
-**qr_code_file:** `typing.Optional[str]`
+**qr_code_file:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -304,9 +308,7 @@ client.qr_code(
-
-**image_prompt_controlnet_models:** `typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
-]`
+**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]`
@@ -346,7 +348,7 @@ client.qr_code(
-
-**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]`
@@ -354,9 +356,7 @@ client.qr_code(
-
-**selected_controlnet_model:** `typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
-]`
+**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]`
@@ -388,7 +388,7 @@ client.qr_code(
-
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
+**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
@@ -412,7 +412,7 @@ client.qr_code(
-
-**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]`
+**scheduler:** `typing.Optional[QrCodeRequestScheduler]`
@@ -1378,7 +1378,6 @@ client = Gooey(
api_key="YOUR_API_KEY",
)
client.bulk_run(
- documents=["documents"],
run_urls=["run_urls"],
input_columns={"key": "value"},
output_columns={"key": "value"},
@@ -1398,13 +1397,9 @@ client.bulk_run(
-
-**documents:** `typing.Sequence[str]`
-
+**documents:** `from __future__ import annotations
-Upload or link to a CSV or google sheet that contains your sample input data.
-For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
-Remember to includes header names in your CSV too.
-
+typing.List[core.File]` — See core.File for more documentation
@@ -1412,7 +1407,7 @@ Remember to includes header names in your CSV too.
-
-**run_urls:** `typing.Sequence[str]`
+**run_urls:** `typing.List[str]`
Provide one or more Gooey.AI workflow runs.
@@ -1457,7 +1452,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -1473,7 +1468,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-
-**eval_urls:** `typing.Optional[typing.Sequence[str]]`
+**eval_urls:** `typing.Optional[typing.List[str]]`
_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
@@ -1698,9 +1693,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.synthesize_data(
- documents=["documents"],
-)
+client.synthesize_data()
```
@@ -1716,7 +1709,9 @@ client.synthesize_data(
-
-**documents:** `typing.Sequence[str]`
+**documents:** `from __future__ import annotations
+
+typing.List[core.File]` — See core.File for more documentation
@@ -1732,7 +1727,7 @@ client.synthesize_data(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -1748,7 +1743,9 @@ client.synthesize_data(
-
-**sheet_url:** `typing.Optional[str]`
+**sheet_url:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -1756,7 +1753,7 @@ client.synthesize_data(
-
-**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
+**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]`
@@ -1772,10 +1769,9 @@ client.synthesize_data(
-
-**glossary_document:** `typing.Optional[str]`
+**glossary_document:** `from __future__ import annotations
-Provide a glossary to customize translation and improve accuracy of domain-specific terms.
-If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+typing.Optional[core.File]` — See core.File for more documentation
@@ -1839,7 +1835,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-
-**response_format_type:** `typing.Optional[DocExtractPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]`
@@ -2432,9 +2428,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.doc_summary(
- documents=["documents"],
-)
+client.doc_summary()
```
@@ -2450,7 +2444,9 @@ client.doc_summary(
-
-**documents:** `typing.Sequence[str]`
+**documents:** `from __future__ import annotations
+
+typing.List[core.File]` — See core.File for more documentation
@@ -2466,7 +2462,7 @@ client.doc_summary(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -2514,7 +2510,7 @@ client.doc_summary(
-
-**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
+**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]`
@@ -2570,7 +2566,7 @@ client.doc_summary(
-
-**response_format_type:** `typing.Optional[DocSummaryPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]`
@@ -2718,7 +2714,7 @@ client.lipsync()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -2734,7 +2730,9 @@ client.lipsync()
-
-**input_face:** `typing.Optional[str]`
+**input_face:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -2782,7 +2780,7 @@ client.lipsync()
-
-**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[LipsyncRequestSelectedModel]`
@@ -2790,7 +2788,9 @@ client.lipsync()
-
-**input_audio:** `typing.Optional[str]`
+**input_audio:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -2870,7 +2870,7 @@ client.lipsync_tts(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -2886,7 +2886,7 @@ client.lipsync_tts(
-
-**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
+**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]`
@@ -3014,7 +3014,7 @@ client.lipsync_tts(
-
-**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
+**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]`
@@ -3022,7 +3022,7 @@ client.lipsync_tts(
-
-**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
+**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]`
@@ -3030,7 +3030,9 @@ client.lipsync_tts(
-
-**input_face:** `typing.Optional[str]`
+**input_face:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -3078,7 +3080,7 @@ client.lipsync_tts(
-
-**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]`
@@ -3356,9 +3358,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.speech_recognition(
- documents=["documents"],
-)
+client.speech_recognition()
```
@@ -3374,7 +3374,9 @@ client.speech_recognition(
-
-**documents:** `typing.Sequence[str]`
+**documents:** `from __future__ import annotations
+
+typing.List[core.File]` — See core.File for more documentation
@@ -3390,7 +3392,7 @@ client.speech_recognition(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -3406,7 +3408,7 @@ client.speech_recognition(
-
-**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]`
@@ -3422,7 +3424,7 @@ client.speech_recognition(
-
-**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]`
+**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]`
@@ -3430,7 +3432,7 @@ client.speech_recognition(
-
-**output_format:** `typing.Optional[AsrPageRequestOutputFormat]`
+**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]`
@@ -3462,10 +3464,9 @@ client.speech_recognition(
-
-**glossary_document:** `typing.Optional[str]`
+**glossary_document:** `from __future__ import annotations
-Provide a glossary to customize translation and improve accuracy of domain-specific terms.
-If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+typing.Optional[core.File]` — See core.File for more documentation
@@ -3687,7 +3688,7 @@ client.translate()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -3703,7 +3704,7 @@ client.translate()
-
-**texts:** `typing.Optional[typing.Sequence[str]]`
+**texts:** `typing.Optional[typing.List[str]]`
@@ -3711,7 +3712,7 @@ client.translate()
-
-**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[TranslateRequestSelectedModel]`
@@ -3735,10 +3736,9 @@ client.translate()
-
-**glossary_document:** `typing.Optional[str]`
+**glossary_document:** `from __future__ import annotations
-Provide a glossary to customize translation and improve accuracy of domain-specific terms.
-If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+typing.Optional[core.File]` — See core.File for more documentation
@@ -3784,9 +3784,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.remix_image(
- input_image="input_image",
-)
+client.remix_image()
```
@@ -3802,7 +3800,9 @@ client.remix_image(
-
-**input_image:** `str`
+**input_image:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -3818,7 +3818,7 @@ client.remix_image(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -3842,7 +3842,7 @@ client.remix_image(
-
-**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]`
@@ -3850,7 +3850,7 @@ client.remix_image(
-
-**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
+**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]`
@@ -3914,7 +3914,7 @@ client.remix_image(
-
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
+**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
@@ -4177,7 +4177,6 @@ client = Gooey(
api_key="YOUR_API_KEY",
)
client.product_image(
- input_image="input_image",
text_prompt="text_prompt",
)
@@ -4195,7 +4194,9 @@ client.product_image(
-
-**input_image:** `str`
+**input_image:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -4219,7 +4220,7 @@ client.product_image(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -4267,7 +4268,7 @@ client.product_image(
-
-**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]`
@@ -4378,8 +4379,7 @@ client = Gooey(
api_key="YOUR_API_KEY",
)
client.portrait(
- input_image="input_image",
- text_prompt="tony stark from the iron man",
+ text_prompt="text_prompt",
)
```
@@ -4396,7 +4396,9 @@ client.portrait(
-
-**input_image:** `str`
+**input_image:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -4420,7 +4422,7 @@ client.portrait(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -4460,7 +4462,7 @@ client.portrait(
-
-**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[PortraitRequestSelectedModel]`
@@ -5020,9 +5022,7 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.remove_background(
- input_image="input_image",
-)
+client.remove_background()
```
@@ -5038,7 +5038,9 @@ client.remove_background(
-
-**input_image:** `str`
+**input_image:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -5054,7 +5056,7 @@ client.remove_background(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -5070,7 +5072,7 @@ client.remove_background(
-
-**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]`
@@ -5198,7 +5200,7 @@ client.upscale(
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -5214,7 +5216,9 @@ client.upscale(
-
-**input_image:** `typing.Optional[str]` — Input Image
+**input_image:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -5222,7 +5226,9 @@ client.upscale(
-
-**input_video:** `typing.Optional[str]` — Input Video
+**input_video:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -5230,7 +5236,7 @@ client.upscale(
-
-**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
+**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]`
@@ -5720,7 +5726,7 @@ client.copilot.completion()
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+**functions:** `typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]`
@@ -5752,7 +5758,9 @@ client.copilot.completion()
-
-**input_images:** `typing.Optional[typing.Sequence[str]]`
+**input_images:** `from __future__ import annotations
+
+typing.Optional[typing.List[core.File]]` — See core.File for more documentation
@@ -5760,7 +5768,9 @@ client.copilot.completion()
-
-**input_documents:** `typing.Optional[typing.Sequence[str]]`
+**input_documents:** `from __future__ import annotations
+
+typing.Optional[typing.List[core.File]]` — See core.File for more documentation
@@ -5776,7 +5786,7 @@ client.copilot.completion()
-
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
+**messages:** `typing.Optional[typing.List[ConversationEntry]]`
@@ -5832,7 +5842,9 @@ client.copilot.completion()
-
-**documents:** `typing.Optional[typing.Sequence[str]]`
+**documents:** `from __future__ import annotations
+
+typing.Optional[typing.List[core.File]]` — See core.File for more documentation
@@ -5864,7 +5876,7 @@ client.copilot.completion()
-
-**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[CopilotCompletionRequestEmbeddingModel]`
@@ -5885,7 +5897,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]`
+**citation_style:** `typing.Optional[CopilotCompletionRequestCitationStyle]`
@@ -5901,7 +5913,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+**asr_model:** `typing.Optional[CopilotCompletionRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -5917,7 +5929,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]`
+**translation_model:** `typing.Optional[CopilotCompletionRequestTranslationModel]`
@@ -5933,11 +5945,9 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**input_glossary_document:** `typing.Optional[str]`
-
+**input_glossary_document:** `from __future__ import annotations
-Translation Glossary for User Langauge -> LLM Language (English)
-
+typing.Optional[core.File]` — See core.File for more documentation
@@ -5945,11 +5955,9 @@ Translation Glossary for User Langauge -> LLM Language (English)
-
-**output_glossary_document:** `typing.Optional[str]`
-
+**output_glossary_document:** `from __future__ import annotations
-Translation Glossary for LLM Language (English) -> User Langauge
-
+typing.Optional[core.File]` — See core.File for more documentation
@@ -5957,7 +5965,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]`
+**lipsync_model:** `typing.Optional[CopilotCompletionRequestLipsyncModel]`
@@ -5965,7 +5973,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -6013,7 +6021,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**response_format_type:** `typing.Optional[VideoBotsPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[CopilotCompletionRequestResponseFormatType]`
@@ -6021,7 +6029,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]`
+**tts_provider:** `typing.Optional[CopilotCompletionRequestTtsProvider]`
@@ -6149,7 +6157,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]`
+**openai_voice_name:** `typing.Optional[CopilotCompletionRequestOpenaiVoiceName]`
@@ -6157,7 +6165,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]`
+**openai_tts_model:** `typing.Optional[CopilotCompletionRequestOpenaiTtsModel]`
@@ -6165,7 +6173,9 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**input_face:** `typing.Optional[str]`
+**input_face:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -6205,7 +6215,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
+**sadtalker_settings:** `typing.Optional[CopilotCompletionRequestSadtalkerSettings]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
index 494a2e7..86305e9 100644
--- a/src/gooey/__init__.py
+++ b/src/gooey/__init__.py
@@ -10,6 +10,7 @@
AsrOutputJson,
AsrPageOutput,
AsrPageOutputOutputTextItem,
+ AsrPageRequest,
AsrPageRequestOutputFormat,
AsrPageRequestSelectedModel,
AsrPageRequestTranslationModel,
@@ -22,6 +23,7 @@
BulkEvalPageRequestResponseFormatType,
BulkEvalPageStatusResponse,
BulkRunnerPageOutput,
+ BulkRunnerPageRequest,
BulkRunnerPageStatusResponse,
ButtonPressed,
CalledFunctionResponse,
@@ -39,6 +41,7 @@
CompareText2ImgPageRequestSelectedModelsItem,
CompareText2ImgPageStatusResponse,
CompareUpscalerPageOutput,
+ CompareUpscalerPageRequest,
CompareUpscalerPageRequestSelectedModelsItem,
CompareUpscalerPageStatusResponse,
ConsoleLogs,
@@ -65,6 +68,7 @@
DeforumSdPageRequestSelectedModel,
DeforumSdPageStatusResponse,
DocExtractPageOutput,
+ DocExtractPageRequest,
DocExtractPageRequestResponseFormatType,
DocExtractPageRequestSelectedAsrModel,
DocExtractPageStatusResponse,
@@ -75,9 +79,12 @@
DocSearchPageRequestResponseFormatType,
DocSearchPageStatusResponse,
DocSummaryPageOutput,
+ DocSummaryPageRequest,
DocSummaryPageRequestResponseFormatType,
DocSummaryPageRequestSelectedAsrModel,
DocSummaryPageStatusResponse,
+ DocSummaryRequestResponseFormatType,
+ DocSummaryRequestSelectedAsrModel,
EmailFaceInpaintingPageOutput,
EmailFaceInpaintingPageRequestSelectedModel,
EmailFaceInpaintingPageStatusResponse,
@@ -86,6 +93,7 @@
EmbeddingsPageStatusResponse,
EvalPrompt,
FaceInpaintingPageOutput,
+ FaceInpaintingPageRequest,
FaceInpaintingPageRequestSelectedModel,
FaceInpaintingPageStatusResponse,
FinalResponse,
@@ -102,11 +110,13 @@
GoogleImageGenPageStatusResponse,
HttpValidationError,
ImageSegmentationPageOutput,
+ ImageSegmentationPageRequest,
ImageSegmentationPageRequestSelectedModel,
ImageSegmentationPageStatusResponse,
ImageUrl,
ImageUrlDetail,
Img2ImgPageOutput,
+ Img2ImgPageRequest,
Img2ImgPageRequestSelectedControlnetModel,
Img2ImgPageRequestSelectedControlnetModelItem,
Img2ImgPageRequestSelectedModel,
@@ -116,27 +126,42 @@
LetterWriterPageRequest,
LetterWriterPageStatusResponse,
LipsyncPageOutput,
+ LipsyncPageRequest,
LipsyncPageRequestSelectedModel,
LipsyncPageStatusResponse,
+ LipsyncRequestSelectedModel,
LipsyncTtsPageOutput,
+ LipsyncTtsPageRequest,
LipsyncTtsPageRequestOpenaiTtsModel,
LipsyncTtsPageRequestOpenaiVoiceName,
LipsyncTtsPageRequestSelectedModel,
LipsyncTtsPageRequestTtsProvider,
LipsyncTtsPageStatusResponse,
+ LipsyncTtsRequestOpenaiTtsModel,
+ LipsyncTtsRequestOpenaiVoiceName,
+ LipsyncTtsRequestSelectedModel,
+ LipsyncTtsRequestTtsProvider,
LlmTools,
MessagePart,
ObjectInpaintingPageOutput,
+ ObjectInpaintingPageRequest,
ObjectInpaintingPageRequestSelectedModel,
ObjectInpaintingPageStatusResponse,
+ PortraitRequestSelectedModel,
+ ProductImageRequestSelectedModel,
PromptTreeNode,
PromptTreeNodePrompt,
QrCodeGeneratorPageOutput,
+ QrCodeGeneratorPageRequest,
QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
QrCodeGeneratorPageRequestScheduler,
QrCodeGeneratorPageRequestSelectedControlnetModelItem,
QrCodeGeneratorPageRequestSelectedModel,
QrCodeGeneratorPageStatusResponse,
+ QrCodeRequestImagePromptControlnetModelsItem,
+ QrCodeRequestScheduler,
+ QrCodeRequestSelectedControlnetModelItem,
+ QrCodeRequestSelectedModel,
RecipeFunction,
RecipeFunctionTrigger,
RecipeRunState,
@@ -152,6 +177,10 @@
RelatedQnAPageRequestEmbeddingModel,
RelatedQnAPageRequestResponseFormatType,
RelatedQnAPageStatusResponse,
+ RemixImageRequestSelectedControlnetModel,
+ RemixImageRequestSelectedControlnetModelItem,
+ RemixImageRequestSelectedModel,
+ RemoveBackgroundRequestSelectedModel,
ReplyButton,
ResponseModel,
ResponseModelFinalKeywordQuery,
@@ -173,7 +202,12 @@
SocialLookupEmailPageOutput,
SocialLookupEmailPageRequestResponseFormatType,
SocialLookupEmailPageStatusResponse,
+ SpeechRecognitionRequestOutputFormat,
+ SpeechRecognitionRequestSelectedModel,
+ SpeechRecognitionRequestTranslationModel,
StreamError,
+ SynthesizeDataRequestResponseFormatType,
+ SynthesizeDataRequestSelectedAsrModel,
Text2AudioPageOutput,
Text2AudioPageStatusResponse,
TextToSpeechPageOutput,
@@ -182,30 +216,51 @@
TextToSpeechPageRequestTtsProvider,
TextToSpeechPageStatusResponse,
TrainingDataModel,
+ TranslateRequestSelectedModel,
TranslationPageOutput,
+ TranslationPageRequest,
TranslationPageRequestSelectedModel,
TranslationPageStatusResponse,
+ UpscaleRequestSelectedModelsItem,
ValidationError,
ValidationErrorLocItem,
Vcard,
VideoBotsPageOutput,
VideoBotsPageOutputFinalKeywordQuery,
VideoBotsPageOutputFinalPrompt,
- VideoBotsPageStatusResponse,
-)
-from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
-from . import copilot
-from .client import AsyncGooey, Gooey
-from .copilot import (
+ VideoBotsPageRequest,
VideoBotsPageRequestAsrModel,
VideoBotsPageRequestCitationStyle,
VideoBotsPageRequestEmbeddingModel,
+ VideoBotsPageRequestFunctionsItem,
+ VideoBotsPageRequestFunctionsItemTrigger,
VideoBotsPageRequestLipsyncModel,
VideoBotsPageRequestOpenaiTtsModel,
VideoBotsPageRequestOpenaiVoiceName,
VideoBotsPageRequestResponseFormatType,
+ VideoBotsPageRequestSadtalkerSettings,
+ VideoBotsPageRequestSadtalkerSettingsPreprocess,
VideoBotsPageRequestTranslationModel,
VideoBotsPageRequestTtsProvider,
+ VideoBotsPageStatusResponse,
+)
+from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
+from . import copilot
+from .client import AsyncGooey, Gooey
+from .copilot import (
+ CopilotCompletionRequestAsrModel,
+ CopilotCompletionRequestCitationStyle,
+ CopilotCompletionRequestEmbeddingModel,
+ CopilotCompletionRequestFunctionsItem,
+ CopilotCompletionRequestFunctionsItemTrigger,
+ CopilotCompletionRequestLipsyncModel,
+ CopilotCompletionRequestOpenaiTtsModel,
+ CopilotCompletionRequestOpenaiVoiceName,
+ CopilotCompletionRequestResponseFormatType,
+ CopilotCompletionRequestSadtalkerSettings,
+ CopilotCompletionRequestSadtalkerSettingsPreprocess,
+ CopilotCompletionRequestTranslationModel,
+ CopilotCompletionRequestTtsProvider,
)
from .environment import GooeyEnvironment
from .version import __version__
@@ -220,6 +275,7 @@
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
+ "AsrPageRequest",
"AsrPageRequestOutputFormat",
"AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
@@ -233,6 +289,7 @@
"BulkEvalPageRequestResponseFormatType",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
+ "BulkRunnerPageRequest",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
"CalledFunctionResponse",
@@ -250,6 +307,7 @@
"CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
+ "CompareUpscalerPageRequest",
"CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
@@ -261,6 +319,19 @@
"ConversationEntryContentItem_Text",
"ConversationEntryRole",
"ConversationStart",
+ "CopilotCompletionRequestAsrModel",
+ "CopilotCompletionRequestCitationStyle",
+ "CopilotCompletionRequestEmbeddingModel",
+ "CopilotCompletionRequestFunctionsItem",
+ "CopilotCompletionRequestFunctionsItemTrigger",
+ "CopilotCompletionRequestLipsyncModel",
+ "CopilotCompletionRequestOpenaiTtsModel",
+ "CopilotCompletionRequestOpenaiVoiceName",
+ "CopilotCompletionRequestResponseFormatType",
+ "CopilotCompletionRequestSadtalkerSettings",
+ "CopilotCompletionRequestSadtalkerSettingsPreprocess",
+ "CopilotCompletionRequestTranslationModel",
+ "CopilotCompletionRequestTtsProvider",
"CreateStreamRequest",
"CreateStreamRequestAsrModel",
"CreateStreamRequestCitationStyle",
@@ -276,6 +347,7 @@
"DeforumSdPageRequestSelectedModel",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
+ "DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
"DocExtractPageRequestSelectedAsrModel",
"DocExtractPageStatusResponse",
@@ -286,9 +358,12 @@
"DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
+ "DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
"DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageStatusResponse",
+ "DocSummaryRequestResponseFormatType",
+ "DocSummaryRequestSelectedAsrModel",
"EmailFaceInpaintingPageOutput",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
@@ -297,6 +372,7 @@
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
+ "FaceInpaintingPageRequest",
"FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageStatusResponse",
"FinalResponse",
@@ -315,11 +391,13 @@
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
"ImageSegmentationPageOutput",
+ "ImageSegmentationPageRequest",
"ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
+ "Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
"Img2ImgPageRequestSelectedControlnetModelItem",
"Img2ImgPageRequestSelectedModel",
@@ -329,28 +407,43 @@
"LetterWriterPageRequest",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
+ "LipsyncPageRequest",
"LipsyncPageRequestSelectedModel",
"LipsyncPageStatusResponse",
+ "LipsyncRequestSelectedModel",
"LipsyncTtsPageOutput",
+ "LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
"LipsyncTtsPageRequestSelectedModel",
"LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
+ "LipsyncTtsRequestOpenaiTtsModel",
+ "LipsyncTtsRequestOpenaiVoiceName",
+ "LipsyncTtsRequestSelectedModel",
+ "LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
+ "ObjectInpaintingPageRequest",
"ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageStatusResponse",
"PaymentRequiredError",
+ "PortraitRequestSelectedModel",
+ "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
+ "QrCodeGeneratorPageRequest",
"QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
"QrCodeGeneratorPageRequestScheduler",
"QrCodeGeneratorPageRequestSelectedControlnetModelItem",
"QrCodeGeneratorPageRequestSelectedModel",
"QrCodeGeneratorPageStatusResponse",
+ "QrCodeRequestImagePromptControlnetModelsItem",
+ "QrCodeRequestScheduler",
+ "QrCodeRequestSelectedControlnetModelItem",
+ "QrCodeRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
@@ -366,6 +459,10 @@
"RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
+ "RemixImageRequestSelectedControlnetModel",
+ "RemixImageRequestSelectedControlnetModelItem",
+ "RemixImageRequestSelectedModel",
+ "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
@@ -387,7 +484,12 @@
"SocialLookupEmailPageOutput",
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
+ "SpeechRecognitionRequestOutputFormat",
+ "SpeechRecognitionRequestSelectedModel",
+ "SpeechRecognitionRequestTranslationModel",
"StreamError",
+ "SynthesizeDataRequestResponseFormatType",
+ "SynthesizeDataRequestSelectedAsrModel",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
@@ -397,23 +499,31 @@
"TextToSpeechPageStatusResponse",
"TooManyRequestsError",
"TrainingDataModel",
+ "TranslateRequestSelectedModel",
"TranslationPageOutput",
+ "TranslationPageRequest",
"TranslationPageRequestSelectedModel",
"TranslationPageStatusResponse",
"UnprocessableEntityError",
+ "UpscaleRequestSelectedModelsItem",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
+ "VideoBotsPageRequest",
"VideoBotsPageRequestAsrModel",
"VideoBotsPageRequestCitationStyle",
"VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestFunctionsItem",
+ "VideoBotsPageRequestFunctionsItemTrigger",
"VideoBotsPageRequestLipsyncModel",
"VideoBotsPageRequestOpenaiTtsModel",
"VideoBotsPageRequestOpenaiVoiceName",
"VideoBotsPageRequestResponseFormatType",
+ "VideoBotsPageRequestSadtalkerSettings",
+ "VideoBotsPageRequestSadtalkerSettingsPreprocess",
"VideoBotsPageRequestTranslationModel",
"VideoBotsPageRequestTtsProvider",
"VideoBotsPageStatusResponse",
diff --git a/src/gooey/client.py b/src/gooey/client.py
index beba2f0..6767f27 100644
--- a/src/gooey/client.py
+++ b/src/gooey/client.py
@@ -20,15 +20,12 @@
from .types.http_validation_error import HttpValidationError
from .errors.too_many_requests_error import TooManyRequestsError
from json.decoder import JSONDecodeError
+from . import core
from .types.vcard import Vcard
-from .types.qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
-from .types.qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
+from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
+from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel
+from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
+from .types.qr_code_request_scheduler import QrCodeRequestScheduler
from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput
from .types.large_language_models import LargeLanguageModels
from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
@@ -48,8 +45,8 @@
from .types.agg_function import AggFunction
from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .types.bulk_eval_page_output import BulkEvalPageOutput
-from .types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
-from .types.doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
+from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
+from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
from .types.doc_extract_page_output import DocExtractPageOutput
from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
from .types.compare_llm_page_output import CompareLlmPageOutput
@@ -60,46 +57,46 @@
from .types.doc_search_page_output import DocSearchPageOutput
from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
from .types.smart_gpt_page_output import SmartGptPageOutput
-from .types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
-from .types.doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
+from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
+from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
from .types.doc_summary_page_output import DocSummaryPageOutput
from .types.functions_page_output import FunctionsPageOutput
from .types.sad_talker_settings import SadTalkerSettings
-from .types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from .types.lipsync_request_selected_model import LipsyncRequestSelectedModel
from .types.lipsync_page_output import LipsyncPageOutput
-from .types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
-from .types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
-from .types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
+from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
+from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
+from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
from .types.lipsync_tts_page_output import LipsyncTtsPageOutput
from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
from .types.text_to_speech_page_output import TextToSpeechPageOutput
-from .types.asr_page_request_selected_model import AsrPageRequestSelectedModel
-from .types.asr_page_request_translation_model import AsrPageRequestTranslationModel
-from .types.asr_page_request_output_format import AsrPageRequestOutputFormat
+from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
+from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
+from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
from .types.asr_page_output import AsrPageOutput
from .types.text2audio_page_output import Text2AudioPageOutput
-from .types.translation_page_request_selected_model import TranslationPageRequestSelectedModel
+from .types.translate_request_selected_model import TranslateRequestSelectedModel
from .types.translation_page_output import TranslationPageOutput
-from .types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
-from .types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
+from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel
+from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
from .types.img2img_page_output import Img2ImgPageOutput
from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
from .types.compare_text2img_page_output import CompareText2ImgPageOutput
-from .types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from .types.product_image_request_selected_model import ProductImageRequestSelectedModel
from .types.object_inpainting_page_output import ObjectInpaintingPageOutput
-from .types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from .types.portrait_request_selected_model import PortraitRequestSelectedModel
from .types.face_inpainting_page_output import FaceInpaintingPageOutput
from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
from .types.google_image_gen_page_output import GoogleImageGenPageOutput
-from .types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
from .types.image_segmentation_page_output import ImageSegmentationPageOutput
-from .types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
from .types.compare_upscaler_page_output import CompareUpscalerPageOutput
from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
from .types.embeddings_page_output import EmbeddingsPageOutput
@@ -334,38 +331,36 @@ def qr_code(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- qr_code_data: typing.Optional[str] = OMIT,
- qr_code_input_image: typing.Optional[str] = OMIT,
- qr_code_vcard: typing.Optional[Vcard] = OMIT,
- qr_code_file: typing.Optional[str] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- image_prompt: typing.Optional[str] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ qr_code_data: typing.Optional[str] = None,
+ qr_code_input_image: typing.Optional[core.File] = None,
+ qr_code_vcard: typing.Optional[Vcard] = None,
+ qr_code_file: typing.Optional[core.File] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ negative_prompt: typing.Optional[str] = None,
+ image_prompt: typing.Optional[str] = None,
image_prompt_controlnet_models: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = OMIT,
- image_prompt_strength: typing.Optional[float] = OMIT,
- image_prompt_scale: typing.Optional[float] = OMIT,
- image_prompt_pos_x: typing.Optional[float] = OMIT,
- image_prompt_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
- ] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT,
- seed: typing.Optional[int] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ typing.List[QrCodeRequestImagePromptControlnetModelsItem]
+ ] = None,
+ image_prompt_strength: typing.Optional[float] = None,
+ image_prompt_scale: typing.Optional[float] = None,
+ image_prompt_pos_x: typing.Optional[float] = None,
+ image_prompt_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ scheduler: typing.Optional[QrCodeRequestScheduler] = None,
+ seed: typing.Optional[int] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> QrCodeGeneratorPageOutput:
"""
@@ -375,18 +370,20 @@ def qr_code(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
qr_code_data : typing.Optional[str]
- qr_code_input_image : typing.Optional[str]
+ qr_code_input_image : typing.Optional[core.File]
+ See core.File for more documentation
qr_code_vcard : typing.Optional[Vcard]
- qr_code_file : typing.Optional[str]
+ qr_code_file : typing.Optional[core.File]
+ See core.File for more documentation
use_url_shortener : typing.Optional[bool]
@@ -394,7 +391,7 @@ def qr_code(
image_prompt : typing.Optional[str]
- image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
+ image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
image_prompt_strength : typing.Optional[float]
@@ -404,9 +401,9 @@ def qr_code(
image_prompt_pos_y : typing.Optional[float]
- selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
+ selected_model : typing.Optional[QrCodeRequestSelectedModel]
- selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
+ selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
output_width : typing.Optional[int]
@@ -414,13 +411,13 @@ def qr_code(
guidance_scale : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
num_outputs : typing.Optional[int]
quality : typing.Optional[int]
- scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler]
+ scheduler : typing.Optional[QrCodeRequestScheduler]
seed : typing.Optional[int]
@@ -457,13 +454,11 @@ def qr_code(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"qr_code_data": qr_code_data,
- "qr_code_input_image": qr_code_input_image,
"qr_code_vcard": qr_code_vcard,
- "qr_code_file": qr_code_file,
"use_url_shortener": use_url_shortener,
"text_prompt": text_prompt,
"negative_prompt": negative_prompt,
@@ -488,6 +483,10 @@ def qr_code(
"obj_pos_y": obj_pos_y,
"settings": settings,
},
+ files={
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_file": qr_code_file,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1224,28 +1223,24 @@ def personalize_email(
def bulk_run(
self,
*,
- documents: typing.Sequence[str],
- run_urls: typing.Sequence[str],
+ documents: typing.List[core.File],
+ run_urls: typing.List[str],
input_columns: typing.Dict[str, str],
output_columns: typing.Dict[str, str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ eval_urls: typing.Optional[typing.List[str]] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> BulkRunnerPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
+ documents : typing.List[core.File]
+ See core.File for more documentation
-
- run_urls : typing.Sequence[str]
+ run_urls : typing.List[str]
Provide one or more Gooey.AI workflow runs.
You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
@@ -1263,12 +1258,12 @@ def bulk_run(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- eval_urls : typing.Optional[typing.Sequence[str]]
+ eval_urls : typing.Optional[typing.List[str]]
_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
@@ -1291,7 +1286,6 @@ def bulk_run(
api_key="YOUR_API_KEY",
)
client.bulk_run(
- documents=["documents"],
run_urls=["run_urls"],
input_columns={"key": "value"},
output_columns={"key": "value"},
@@ -1303,16 +1297,18 @@ def bulk_run(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
"run_urls": run_urls,
"input_columns": input_columns,
"output_columns": output_columns,
"eval_urls": eval_urls,
"settings": settings,
},
+ files={
+ "documents": documents,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1513,46 +1509,47 @@ def eval(
def synthesize_data(
self,
*,
- documents: typing.Sequence[str],
+ documents: typing.List[core.File],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- sheet_url: typing.Optional[str] = OMIT,
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ sheet_url: typing.Optional[core.File] = None,
+ selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[core.File] = None,
+ task_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[LargeLanguageModels] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocExtractPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
+ documents : typing.List[core.File]
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- sheet_url : typing.Optional[str]
+ sheet_url : typing.Optional[core.File]
+ See core.File for more documentation
- selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
task_instructions : typing.Optional[str]
@@ -1568,7 +1565,7 @@ def synthesize_data(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType]
+ response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1587,9 +1584,7 @@ def synthesize_data(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.synthesize_data(
- documents=["documents"],
- )
+ client.synthesize_data()
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-extract/async",
@@ -1597,14 +1592,11 @@ def synthesize_data(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
- "sheet_url": sheet_url,
"selected_asr_model": selected_asr_model,
"google_translate_target": google_translate_target,
- "glossary_document": glossary_document,
"task_instructions": task_instructions,
"selected_model": selected_model,
"avoid_repetition": avoid_repetition,
@@ -1615,6 +1607,11 @@ def synthesize_data(
"response_format_type": response_format_type,
"settings": settings,
},
+ files={
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "glossary_document": glossary_document,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2113,33 +2110,34 @@ def smart_gpt(
def doc_summary(
self,
*,
- documents: typing.Sequence[str],
+ documents: typing.List[core.File],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- merge_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ task_instructions: typing.Optional[str] = None,
+ merge_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[LargeLanguageModels] = None,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
+ selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSummaryPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
+ documents : typing.List[core.File]
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2152,7 +2150,7 @@ def doc_summary(
chain_type : typing.Optional[typing.Literal["map_reduce"]]
- selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
@@ -2166,7 +2164,7 @@ def doc_summary(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType]
+ response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -2185,9 +2183,7 @@ def doc_summary(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.doc_summary(
- documents=["documents"],
- )
+ client.doc_summary()
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-summary/async",
@@ -2195,10 +2191,9 @@ def doc_summary(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
"task_instructions": task_instructions,
"merge_instructions": merge_instructions,
"selected_model": selected_model,
@@ -2213,6 +2208,9 @@ def doc_summary(
"response_format_type": response_format_type,
"settings": settings,
},
+ files={
+ "documents": documents,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2361,17 +2359,17 @@ def lipsync(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ input_face: typing.Optional[core.File] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[LipsyncRequestSelectedModel] = None,
+ input_audio: typing.Optional[core.File] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncPageOutput:
"""
@@ -2379,12 +2377,13 @@ def lipsync(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_face : typing.Optional[str]
+ input_face : typing.Optional[core.File]
+ See core.File for more documentation
face_padding_top : typing.Optional[int]
@@ -2396,9 +2395,10 @@ def lipsync(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncRequestSelectedModel]
- input_audio : typing.Optional[str]
+ input_audio : typing.Optional[core.File]
+ See core.File for more documentation
settings : typing.Optional[RunSettings]
@@ -2425,19 +2425,21 @@ def lipsync(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
"face_padding_right": face_padding_right,
"sadtalker_settings": sadtalker_settings,
"selected_model": selected_model,
- "input_audio": input_audio,
"settings": settings,
},
+ files={
+ "input_face": input_face,
+ "input_audio": input_audio,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2490,34 +2492,34 @@ def lipsync_tts(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[core.File] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncTtsPageOutput:
"""
@@ -2527,12 +2529,12 @@ def lipsync_tts(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
+ tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -2565,11 +2567,12 @@ def lipsync_tts(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel]
- input_face : typing.Optional[str]
+ input_face : typing.Optional[core.File]
+ See core.File for more documentation
face_padding_top : typing.Optional[int]
@@ -2581,7 +2584,7 @@ def lipsync_tts(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
settings : typing.Optional[RunSettings]
@@ -2610,7 +2613,7 @@ def lipsync_tts(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"text_prompt": text_prompt,
@@ -2632,7 +2635,6 @@ def lipsync_tts(
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
- "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -2641,6 +2643,9 @@ def lipsync_tts(
"selected_model": selected_model,
"settings": settings,
},
+ files={
+ "input_face": input_face,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2866,40 +2871,41 @@ def text_to_speech(
def speech_recognition(
self,
*,
- documents: typing.Sequence[str],
+ documents: typing.List[core.File],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
- language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
- output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
+ language: typing.Optional[str] = None,
+ translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
+ output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
+ google_translate_target: typing.Optional[str] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[core.File] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsrPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
+ documents : typing.List[core.File]
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[AsrPageRequestSelectedModel]
+ selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
language : typing.Optional[str]
- translation_model : typing.Optional[AsrPageRequestTranslationModel]
+ translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
- output_format : typing.Optional[AsrPageRequestOutputFormat]
+ output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
google_translate_target : typing.Optional[str]
use `translation_model` & `translation_target` instead.
@@ -2908,9 +2914,8 @@ def speech_recognition(
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
settings : typing.Optional[RunSettings]
@@ -2929,9 +2934,7 @@ def speech_recognition(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.speech_recognition(
- documents=["documents"],
- )
+ client.speech_recognition()
"""
_response = self._client_wrapper.httpx_client.request(
"v3/asr/async",
@@ -2939,10 +2942,9 @@ def speech_recognition(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
"selected_model": selected_model,
"language": language,
"translation_model": translation_model,
@@ -2950,9 +2952,12 @@ def speech_recognition(
"google_translate_target": google_translate_target,
"translation_source": translation_source,
"translation_target": translation_target,
- "glossary_document": glossary_document,
"settings": settings,
},
+ files={
+ "documents": documents,
+ "glossary_document": glossary_document,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3138,14 +3143,14 @@ def translate(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- texts: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ texts: typing.Optional[typing.List[str]] = None,
+ selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[core.File] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> TranslationPageOutput:
"""
@@ -3153,22 +3158,21 @@ def translate(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- texts : typing.Optional[typing.Sequence[str]]
+ texts : typing.Optional[typing.List[str]]
- selected_model : typing.Optional[TranslationPageRequestSelectedModel]
+ selected_model : typing.Optional[TranslateRequestSelectedModel]
translation_source : typing.Optional[str]
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
settings : typing.Optional[RunSettings]
@@ -3195,16 +3199,18 @@ def translate(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"texts": texts,
"selected_model": selected_model,
"translation_source": translation_source,
"translation_target": translation_target,
- "glossary_document": glossary_document,
"settings": settings,
},
+ files={
+ "glossary_document": glossary_document,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3255,43 +3261,44 @@ def translate(
def remix_image(
self,
*,
- input_image: str,
+ input_image: core.File,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- text_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- prompt_strength: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- seed: typing.Optional[int] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ text_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ prompt_strength: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ seed: typing.Optional[int] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> Img2ImgPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
text_prompt : typing.Optional[str]
- selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
+ selected_model : typing.Optional[RemixImageRequestSelectedModel]
- selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
+ selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
negative_prompt : typing.Optional[str]
@@ -3307,7 +3314,7 @@ def remix_image(
prompt_strength : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
seed : typing.Optional[int]
@@ -3330,9 +3337,7 @@ def remix_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remix_image(
- input_image="input_image",
- )
+ client.remix_image()
"""
_response = self._client_wrapper.httpx_client.request(
"v3/Img2Img/async",
@@ -3340,10 +3345,9 @@ def remix_image(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"text_prompt": text_prompt,
"selected_model": selected_model,
"selected_controlnet_model": selected_controlnet_model,
@@ -3359,6 +3363,9 @@ def remix_image(
"image_guidance_scale": image_guidance_scale,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3567,37 +3574,38 @@ def text_to_image(
def product_image(
self,
*,
- input_image: str,
+ input_image: core.File,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ mask_threshold: typing.Optional[float] = None,
+ selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> ObjectInpaintingPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3610,7 +3618,7 @@ def product_image(
mask_threshold : typing.Optional[float]
- selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
+ selected_model : typing.Optional[ProductImageRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -3646,7 +3654,6 @@ def product_image(
api_key="YOUR_API_KEY",
)
client.product_image(
- input_image="input_image",
text_prompt="text_prompt",
)
"""
@@ -3656,10 +3663,9 @@ def product_image(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"text_prompt": text_prompt,
"obj_scale": obj_scale,
"obj_pos_x": obj_pos_x,
@@ -3676,6 +3682,9 @@ def product_image(
"seed": seed,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3726,36 +3735,37 @@ def product_image(
def portrait(
self,
*,
- input_image: str,
+ input_image: core.File,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- face_scale: typing.Optional[float] = OMIT,
- face_pos_x: typing.Optional[float] = OMIT,
- face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- upscale_factor: typing.Optional[float] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ face_scale: typing.Optional[float] = None,
+ face_pos_x: typing.Optional[float] = None,
+ face_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ upscale_factor: typing.Optional[float] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> FaceInpaintingPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3766,7 +3776,7 @@ def portrait(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
+ selected_model : typing.Optional[PortraitRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -3802,8 +3812,7 @@ def portrait(
api_key="YOUR_API_KEY",
)
client.portrait(
- input_image="input_image",
- text_prompt="tony stark from the iron man",
+ text_prompt="text_prompt",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -3812,10 +3821,9 @@ def portrait(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"text_prompt": text_prompt,
"face_scale": face_scale,
"face_pos_x": face_pos_x,
@@ -3831,6 +3839,9 @@ def portrait(
"seed": seed,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -4224,33 +4235,34 @@ def image_from_web_search(
def remove_background(
self,
*,
- input_image: str,
+ input_image: core.File,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- rect_persepective_transform: typing.Optional[bool] = OMIT,
- reflection_opacity: typing.Optional[float] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
+ mask_threshold: typing.Optional[float] = None,
+ rect_persepective_transform: typing.Optional[bool] = None,
+ reflection_opacity: typing.Optional[float] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> ImageSegmentationPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
+ selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
mask_threshold : typing.Optional[float]
@@ -4281,9 +4293,7 @@ def remove_background(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remove_background(
- input_image="input_image",
- )
+ client.remove_background()
"""
_response = self._client_wrapper.httpx_client.request(
"v3/ImageSegmentation/async",
@@ -4291,10 +4301,9 @@ def remove_background(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"selected_model": selected_model,
"mask_threshold": mask_threshold,
"rect_persepective_transform": rect_persepective_transform,
@@ -4304,6 +4313,9 @@ def remove_background(
"obj_pos_y": obj_pos_y,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -4356,13 +4368,13 @@ def upscale(
*,
scale: int,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- input_image: typing.Optional[str] = OMIT,
- input_video: typing.Optional[str] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ input_image: typing.Optional[core.File] = None,
+ input_video: typing.Optional[core.File] = None,
+ selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> CompareUpscalerPageOutput:
"""
@@ -4373,18 +4385,18 @@ def upscale(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_image : typing.Optional[str]
- Input Image
+ input_image : typing.Optional[core.File]
+ See core.File for more documentation
- input_video : typing.Optional[str]
- Input Video
+ input_video : typing.Optional[core.File]
+ See core.File for more documentation
- selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
@@ -4415,16 +4427,18 @@ def upscale(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
- "input_video": input_video,
"scale": scale,
"selected_models": selected_models,
"selected_bg_model": selected_bg_model,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ "input_video": input_video,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -5041,38 +5055,36 @@ async def qr_code(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- qr_code_data: typing.Optional[str] = OMIT,
- qr_code_input_image: typing.Optional[str] = OMIT,
- qr_code_vcard: typing.Optional[Vcard] = OMIT,
- qr_code_file: typing.Optional[str] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- image_prompt: typing.Optional[str] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ qr_code_data: typing.Optional[str] = None,
+ qr_code_input_image: typing.Optional[core.File] = None,
+ qr_code_vcard: typing.Optional[Vcard] = None,
+ qr_code_file: typing.Optional[core.File] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ negative_prompt: typing.Optional[str] = None,
+ image_prompt: typing.Optional[str] = None,
image_prompt_controlnet_models: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = OMIT,
- image_prompt_strength: typing.Optional[float] = OMIT,
- image_prompt_scale: typing.Optional[float] = OMIT,
- image_prompt_pos_x: typing.Optional[float] = OMIT,
- image_prompt_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[
- typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
- ] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT,
- seed: typing.Optional[int] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ typing.List[QrCodeRequestImagePromptControlnetModelsItem]
+ ] = None,
+ image_prompt_strength: typing.Optional[float] = None,
+ image_prompt_scale: typing.Optional[float] = None,
+ image_prompt_pos_x: typing.Optional[float] = None,
+ image_prompt_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ scheduler: typing.Optional[QrCodeRequestScheduler] = None,
+ seed: typing.Optional[int] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> QrCodeGeneratorPageOutput:
"""
@@ -5082,18 +5094,20 @@ async def qr_code(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
qr_code_data : typing.Optional[str]
- qr_code_input_image : typing.Optional[str]
+ qr_code_input_image : typing.Optional[core.File]
+ See core.File for more documentation
qr_code_vcard : typing.Optional[Vcard]
- qr_code_file : typing.Optional[str]
+ qr_code_file : typing.Optional[core.File]
+ See core.File for more documentation
use_url_shortener : typing.Optional[bool]
@@ -5101,7 +5115,7 @@ async def qr_code(
image_prompt : typing.Optional[str]
- image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
+ image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
image_prompt_strength : typing.Optional[float]
@@ -5111,9 +5125,9 @@ async def qr_code(
image_prompt_pos_y : typing.Optional[float]
- selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
+ selected_model : typing.Optional[QrCodeRequestSelectedModel]
- selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
+ selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
output_width : typing.Optional[int]
@@ -5121,13 +5135,13 @@ async def qr_code(
guidance_scale : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
num_outputs : typing.Optional[int]
quality : typing.Optional[int]
- scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler]
+ scheduler : typing.Optional[QrCodeRequestScheduler]
seed : typing.Optional[int]
@@ -5172,13 +5186,11 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"qr_code_data": qr_code_data,
- "qr_code_input_image": qr_code_input_image,
"qr_code_vcard": qr_code_vcard,
- "qr_code_file": qr_code_file,
"use_url_shortener": use_url_shortener,
"text_prompt": text_prompt,
"negative_prompt": negative_prompt,
@@ -5203,6 +5215,10 @@ async def main() -> None:
"obj_pos_y": obj_pos_y,
"settings": settings,
},
+ files={
+ "qr_code_input_image": qr_code_input_image,
+ "qr_code_file": qr_code_file,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -5971,28 +5987,24 @@ async def main() -> None:
async def bulk_run(
self,
*,
- documents: typing.Sequence[str],
- run_urls: typing.Sequence[str],
+ documents: typing.List[core.File],
+ run_urls: typing.List[str],
input_columns: typing.Dict[str, str],
output_columns: typing.Dict[str, str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ eval_urls: typing.Optional[typing.List[str]] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> BulkRunnerPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
-
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
+ documents : typing.List[core.File]
+ See core.File for more documentation
-
- run_urls : typing.Sequence[str]
+ run_urls : typing.List[str]
Provide one or more Gooey.AI workflow runs.
You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
@@ -6010,12 +6022,12 @@ async def bulk_run(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- eval_urls : typing.Optional[typing.Sequence[str]]
+ eval_urls : typing.Optional[typing.List[str]]
_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
@@ -6043,7 +6055,6 @@ async def bulk_run(
async def main() -> None:
await client.bulk_run(
- documents=["documents"],
run_urls=["run_urls"],
input_columns={"key": "value"},
output_columns={"key": "value"},
@@ -6058,16 +6069,18 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
"run_urls": run_urls,
"input_columns": input_columns,
"output_columns": output_columns,
"eval_urls": eval_urls,
"settings": settings,
},
+ files={
+ "documents": documents,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -6276,46 +6289,47 @@ async def main() -> None:
async def synthesize_data(
self,
*,
- documents: typing.Sequence[str],
+ documents: typing.List[core.File],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- sheet_url: typing.Optional[str] = OMIT,
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ sheet_url: typing.Optional[core.File] = None,
+ selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[core.File] = None,
+ task_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[LargeLanguageModels] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocExtractPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
+ documents : typing.List[core.File]
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- sheet_url : typing.Optional[str]
+ sheet_url : typing.Optional[core.File]
+ See core.File for more documentation
- selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
task_instructions : typing.Optional[str]
@@ -6331,7 +6345,7 @@ async def synthesize_data(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType]
+ response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6355,9 +6369,7 @@ async def synthesize_data(
async def main() -> None:
- await client.synthesize_data(
- documents=["documents"],
- )
+ await client.synthesize_data()
asyncio.run(main())
@@ -6368,14 +6380,11 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
- "sheet_url": sheet_url,
"selected_asr_model": selected_asr_model,
"google_translate_target": google_translate_target,
- "glossary_document": glossary_document,
"task_instructions": task_instructions,
"selected_model": selected_model,
"avoid_repetition": avoid_repetition,
@@ -6386,6 +6395,11 @@ async def main() -> None:
"response_format_type": response_format_type,
"settings": settings,
},
+ files={
+ "documents": documents,
+ "sheet_url": sheet_url,
+ "glossary_document": glossary_document,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -6908,33 +6922,34 @@ async def main() -> None:
async def doc_summary(
self,
*,
- documents: typing.Sequence[str],
+ documents: typing.List[core.File],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- merge_instructions: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ task_instructions: typing.Optional[str] = None,
+ merge_instructions: typing.Optional[str] = None,
+ selected_model: typing.Optional[LargeLanguageModels] = None,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
+ selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
+ google_translate_target: typing.Optional[str] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSummaryPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
+ documents : typing.List[core.File]
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6947,7 +6962,7 @@ async def doc_summary(
chain_type : typing.Optional[typing.Literal["map_reduce"]]
- selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
@@ -6961,7 +6976,7 @@ async def doc_summary(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType]
+ response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6985,9 +7000,7 @@ async def doc_summary(
async def main() -> None:
- await client.doc_summary(
- documents=["documents"],
- )
+ await client.doc_summary()
asyncio.run(main())
@@ -6998,10 +7011,9 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
"task_instructions": task_instructions,
"merge_instructions": merge_instructions,
"selected_model": selected_model,
@@ -7016,6 +7028,9 @@ async def main() -> None:
"response_format_type": response_format_type,
"settings": settings,
},
+ files={
+ "documents": documents,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -7172,17 +7187,17 @@ async def lipsync(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ input_face: typing.Optional[core.File] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[LipsyncRequestSelectedModel] = None,
+ input_audio: typing.Optional[core.File] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncPageOutput:
"""
@@ -7190,12 +7205,13 @@ async def lipsync(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_face : typing.Optional[str]
+ input_face : typing.Optional[core.File]
+ See core.File for more documentation
face_padding_top : typing.Optional[int]
@@ -7207,9 +7223,10 @@ async def lipsync(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncRequestSelectedModel]
- input_audio : typing.Optional[str]
+ input_audio : typing.Optional[core.File]
+ See core.File for more documentation
settings : typing.Optional[RunSettings]
@@ -7244,19 +7261,21 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
"face_padding_right": face_padding_right,
"sadtalker_settings": sadtalker_settings,
"selected_model": selected_model,
- "input_audio": input_audio,
"settings": settings,
},
+ files={
+ "input_face": input_face,
+ "input_audio": input_audio,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -7309,34 +7328,34 @@ async def lipsync_tts(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[core.File] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
+ selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncTtsPageOutput:
"""
@@ -7346,12 +7365,12 @@ async def lipsync_tts(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
+ tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -7384,11 +7403,12 @@ async def lipsync_tts(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel]
- input_face : typing.Optional[str]
+ input_face : typing.Optional[core.File]
+ See core.File for more documentation
face_padding_top : typing.Optional[int]
@@ -7400,7 +7420,7 @@ async def lipsync_tts(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
settings : typing.Optional[RunSettings]
@@ -7437,7 +7457,7 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"text_prompt": text_prompt,
@@ -7459,7 +7479,6 @@ async def main() -> None:
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
- "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -7468,6 +7487,9 @@ async def main() -> None:
"selected_model": selected_model,
"settings": settings,
},
+ files={
+ "input_face": input_face,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -7701,40 +7723,41 @@ async def main() -> None:
async def speech_recognition(
self,
*,
- documents: typing.Sequence[str],
+ documents: typing.List[core.File],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
- language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
- output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
- google_translate_target: typing.Optional[str] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
+ language: typing.Optional[str] = None,
+ translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
+ output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
+ google_translate_target: typing.Optional[str] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[core.File] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsrPageOutput:
"""
Parameters
----------
- documents : typing.Sequence[str]
+ documents : typing.List[core.File]
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[AsrPageRequestSelectedModel]
+ selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
language : typing.Optional[str]
- translation_model : typing.Optional[AsrPageRequestTranslationModel]
+ translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
- output_format : typing.Optional[AsrPageRequestOutputFormat]
+ output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
google_translate_target : typing.Optional[str]
use `translation_model` & `translation_target` instead.
@@ -7743,9 +7766,8 @@ async def speech_recognition(
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
settings : typing.Optional[RunSettings]
@@ -7769,9 +7791,7 @@ async def speech_recognition(
async def main() -> None:
- await client.speech_recognition(
- documents=["documents"],
- )
+ await client.speech_recognition()
asyncio.run(main())
@@ -7782,10 +7802,9 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "documents": documents,
"selected_model": selected_model,
"language": language,
"translation_model": translation_model,
@@ -7793,9 +7812,12 @@ async def main() -> None:
"google_translate_target": google_translate_target,
"translation_source": translation_source,
"translation_target": translation_target,
- "glossary_document": glossary_document,
"settings": settings,
},
+ files={
+ "documents": documents,
+ "glossary_document": glossary_document,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -7989,14 +8011,14 @@ async def translate(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- texts: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
- translation_source: typing.Optional[str] = OMIT,
- translation_target: typing.Optional[str] = OMIT,
- glossary_document: typing.Optional[str] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ texts: typing.Optional[typing.List[str]] = None,
+ selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
+ translation_source: typing.Optional[str] = None,
+ translation_target: typing.Optional[str] = None,
+ glossary_document: typing.Optional[core.File] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> TranslationPageOutput:
"""
@@ -8004,22 +8026,21 @@ async def translate(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- texts : typing.Optional[typing.Sequence[str]]
+ texts : typing.Optional[typing.List[str]]
- selected_model : typing.Optional[TranslationPageRequestSelectedModel]
+ selected_model : typing.Optional[TranslateRequestSelectedModel]
translation_source : typing.Optional[str]
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[str]
- Provide a glossary to customize translation and improve accuracy of domain-specific terms.
- If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
+ glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
settings : typing.Optional[RunSettings]
@@ -8054,16 +8075,18 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"texts": texts,
"selected_model": selected_model,
"translation_source": translation_source,
"translation_target": translation_target,
- "glossary_document": glossary_document,
"settings": settings,
},
+ files={
+ "glossary_document": glossary_document,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -8114,43 +8137,44 @@ async def main() -> None:
async def remix_image(
self,
*,
- input_image: str,
+ input_image: core.File,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- text_prompt: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
- selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- prompt_strength: typing.Optional[float] = OMIT,
- controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
- seed: typing.Optional[int] = OMIT,
- image_guidance_scale: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ text_prompt: typing.Optional[str] = None,
+ selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
+ selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ prompt_strength: typing.Optional[float] = None,
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
+ seed: typing.Optional[int] = None,
+ image_guidance_scale: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> Img2ImgPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
text_prompt : typing.Optional[str]
- selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
+ selected_model : typing.Optional[RemixImageRequestSelectedModel]
- selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
+ selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
negative_prompt : typing.Optional[str]
@@ -8166,7 +8190,7 @@ async def remix_image(
prompt_strength : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.List[float]]
seed : typing.Optional[int]
@@ -8194,9 +8218,7 @@ async def remix_image(
async def main() -> None:
- await client.remix_image(
- input_image="input_image",
- )
+ await client.remix_image()
asyncio.run(main())
@@ -8207,10 +8229,9 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"text_prompt": text_prompt,
"selected_model": selected_model,
"selected_controlnet_model": selected_controlnet_model,
@@ -8226,6 +8247,9 @@ async def main() -> None:
"image_guidance_scale": image_guidance_scale,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -8442,37 +8466,38 @@ async def main() -> None:
async def product_image(
self,
*,
- input_image: str,
+ input_image: core.File,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- sd2upscaling: typing.Optional[bool] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ mask_threshold: typing.Optional[float] = None,
+ selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ sd2upscaling: typing.Optional[bool] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> ObjectInpaintingPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -8485,7 +8510,7 @@ async def product_image(
mask_threshold : typing.Optional[float]
- selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
+ selected_model : typing.Optional[ProductImageRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -8526,7 +8551,6 @@ async def product_image(
async def main() -> None:
await client.product_image(
- input_image="input_image",
text_prompt="text_prompt",
)
@@ -8539,10 +8563,9 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"text_prompt": text_prompt,
"obj_scale": obj_scale,
"obj_pos_x": obj_pos_x,
@@ -8559,6 +8582,9 @@ async def main() -> None:
"seed": seed,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -8609,36 +8635,37 @@ async def main() -> None:
async def portrait(
self,
*,
- input_image: str,
+ input_image: core.File,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- face_scale: typing.Optional[float] = OMIT,
- face_pos_x: typing.Optional[float] = OMIT,
- face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
- negative_prompt: typing.Optional[str] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[int] = OMIT,
- upscale_factor: typing.Optional[float] = OMIT,
- output_width: typing.Optional[int] = OMIT,
- output_height: typing.Optional[int] = OMIT,
- guidance_scale: typing.Optional[float] = OMIT,
- seed: typing.Optional[int] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ face_scale: typing.Optional[float] = None,
+ face_pos_x: typing.Optional[float] = None,
+ face_pos_y: typing.Optional[float] = None,
+ selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
+ negative_prompt: typing.Optional[str] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[int] = None,
+ upscale_factor: typing.Optional[float] = None,
+ output_width: typing.Optional[int] = None,
+ output_height: typing.Optional[int] = None,
+ guidance_scale: typing.Optional[float] = None,
+ seed: typing.Optional[int] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> FaceInpaintingPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -8649,7 +8676,7 @@ async def portrait(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
+ selected_model : typing.Optional[PortraitRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -8690,8 +8717,7 @@ async def portrait(
async def main() -> None:
await client.portrait(
- input_image="input_image",
- text_prompt="tony stark from the iron man",
+ text_prompt="text_prompt",
)
@@ -8703,10 +8729,9 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"text_prompt": text_prompt,
"face_scale": face_scale,
"face_pos_x": face_pos_x,
@@ -8722,6 +8747,9 @@ async def main() -> None:
"seed": seed,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -9131,33 +9159,34 @@ async def main() -> None:
async def remove_background(
self,
*,
- input_image: str,
+ input_image: core.File,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
- mask_threshold: typing.Optional[float] = OMIT,
- rect_persepective_transform: typing.Optional[bool] = OMIT,
- reflection_opacity: typing.Optional[float] = OMIT,
- obj_scale: typing.Optional[float] = OMIT,
- obj_pos_x: typing.Optional[float] = OMIT,
- obj_pos_y: typing.Optional[float] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
+ mask_threshold: typing.Optional[float] = None,
+ rect_persepective_transform: typing.Optional[bool] = None,
+ reflection_opacity: typing.Optional[float] = None,
+ obj_scale: typing.Optional[float] = None,
+ obj_pos_x: typing.Optional[float] = None,
+ obj_pos_y: typing.Optional[float] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> ImageSegmentationPageOutput:
"""
Parameters
----------
- input_image : str
+ input_image : core.File
+ See core.File for more documentation
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
+ selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
mask_threshold : typing.Optional[float]
@@ -9193,9 +9222,7 @@ async def remove_background(
async def main() -> None:
- await client.remove_background(
- input_image="input_image",
- )
+ await client.remove_background()
asyncio.run(main())
@@ -9206,10 +9233,9 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
"selected_model": selected_model,
"mask_threshold": mask_threshold,
"rect_persepective_transform": rect_persepective_transform,
@@ -9219,6 +9245,9 @@ async def main() -> None:
"obj_pos_y": obj_pos_y,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -9271,13 +9300,13 @@ async def upscale(
*,
scale: int,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- input_image: typing.Optional[str] = OMIT,
- input_video: typing.Optional[str] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[RecipeFunction]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ input_image: typing.Optional[core.File] = None,
+ input_video: typing.Optional[core.File] = None,
+ selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> CompareUpscalerPageOutput:
"""
@@ -9288,18 +9317,18 @@ async def upscale(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_image : typing.Optional[str]
- Input Image
+ input_image : typing.Optional[core.File]
+ See core.File for more documentation
- input_video : typing.Optional[str]
- Input Video
+ input_video : typing.Optional[core.File]
+ See core.File for more documentation
- selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
@@ -9338,16 +9367,18 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
- "input_image": input_image,
- "input_video": input_video,
"scale": scale,
"selected_models": selected_models,
"selected_bg_model": selected_bg_model,
"settings": settings,
},
+ files={
+ "input_image": input_image,
+ "input_video": input_video,
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py
index db36163..3234b31 100644
--- a/src/gooey/copilot/__init__.py
+++ b/src/gooey/copilot/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
- VideoBotsPageRequestAsrModel,
- VideoBotsPageRequestCitationStyle,
- VideoBotsPageRequestEmbeddingModel,
- VideoBotsPageRequestLipsyncModel,
- VideoBotsPageRequestOpenaiTtsModel,
- VideoBotsPageRequestOpenaiVoiceName,
- VideoBotsPageRequestResponseFormatType,
- VideoBotsPageRequestTranslationModel,
- VideoBotsPageRequestTtsProvider,
+ CopilotCompletionRequestAsrModel,
+ CopilotCompletionRequestCitationStyle,
+ CopilotCompletionRequestEmbeddingModel,
+ CopilotCompletionRequestFunctionsItem,
+ CopilotCompletionRequestFunctionsItemTrigger,
+ CopilotCompletionRequestLipsyncModel,
+ CopilotCompletionRequestOpenaiTtsModel,
+ CopilotCompletionRequestOpenaiVoiceName,
+ CopilotCompletionRequestResponseFormatType,
+ CopilotCompletionRequestSadtalkerSettings,
+ CopilotCompletionRequestSadtalkerSettingsPreprocess,
+ CopilotCompletionRequestTranslationModel,
+ CopilotCompletionRequestTtsProvider,
)
__all__ = [
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
+ "CopilotCompletionRequestAsrModel",
+ "CopilotCompletionRequestCitationStyle",
+ "CopilotCompletionRequestEmbeddingModel",
+ "CopilotCompletionRequestFunctionsItem",
+ "CopilotCompletionRequestFunctionsItemTrigger",
+ "CopilotCompletionRequestLipsyncModel",
+ "CopilotCompletionRequestOpenaiTtsModel",
+ "CopilotCompletionRequestOpenaiVoiceName",
+ "CopilotCompletionRequestResponseFormatType",
+ "CopilotCompletionRequestSadtalkerSettings",
+ "CopilotCompletionRequestSadtalkerSettingsPreprocess",
+ "CopilotCompletionRequestTranslationModel",
+ "CopilotCompletionRequestTtsProvider",
]
diff --git a/src/gooey/copilot/client.py b/src/gooey/copilot/client.py
index a27e8d5..9dcc465 100644
--- a/src/gooey/copilot/client.py
+++ b/src/gooey/copilot/client.py
@@ -2,20 +2,21 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
-from ..types.recipe_function import RecipeFunction
+from .types.copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem
+from .. import core
from ..types.conversation_entry import ConversationEntry
from ..types.large_language_models import LargeLanguageModels
-from .types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .types.copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel
+from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
+from .types.copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel
+from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
+from .types.copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel
from ..types.llm_tools import LlmTools
-from .types.video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
-from .types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from ..types.sad_talker_settings import SadTalkerSettings
+from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
+from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
+from .types.copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
+from .types.copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
+from .types.copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings
from ..types.run_settings import RunSettings
from ..core.request_options import RequestOptions
from ..types.video_bots_page_output import VideoBotsPageOutput
@@ -41,67 +42,67 @@ def completion(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- input_images: typing.Optional[typing.Sequence[str]] = OMIT,
- input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
- bot_script: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- document_model: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- keyword_instructions: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
- asr_language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
- user_language: typing.Optional[str] = OMIT,
- input_glossary_document: typing.Optional[str] = OMIT,
- output_glossary_document: typing.Optional[str] = OMIT,
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
- tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT,
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ input_prompt: typing.Optional[str] = None,
+ input_audio: typing.Optional[str] = None,
+ input_images: typing.Optional[typing.List[core.File]] = None,
+ input_documents: typing.Optional[typing.List[core.File]] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ messages: typing.Optional[typing.List[ConversationEntry]] = None,
+ bot_script: typing.Optional[str] = None,
+ selected_model: typing.Optional[LargeLanguageModels] = None,
+ document_model: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ keyword_instructions: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[core.File]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None,
+ asr_language: typing.Optional[str] = None,
+ translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
+ user_language: typing.Optional[str] = None,
+ input_glossary_document: typing.Optional[core.File] = None,
+ output_glossary_document: typing.Optional[core.File] = None,
+ lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None,
+ tools: typing.Optional[typing.List[LlmTools]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None,
+ tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[core.File] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[CopilotCompletionRequestSadtalkerSettings] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> VideoBotsPageOutput:
"""
@@ -109,7 +110,7 @@ def completion(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -118,14 +119,16 @@ def completion(
input_audio : typing.Optional[str]
- input_images : typing.Optional[typing.Sequence[str]]
+ input_images : typing.Optional[typing.List[core.File]]
+ See core.File for more documentation
- input_documents : typing.Optional[typing.Sequence[str]]
+ input_documents : typing.Optional[typing.List[core.File]]
+ See core.File for more documentation
doc_extract_url : typing.Optional[str]
Select a workflow to extract text from documents and images.
- messages : typing.Optional[typing.Sequence[ConversationEntry]]
+ messages : typing.Optional[typing.List[ConversationEntry]]
bot_script : typing.Optional[str]
@@ -140,7 +143,8 @@ def completion(
keyword_instructions : typing.Optional[str]
- documents : typing.Optional[typing.Sequence[str]]
+ documents : typing.Optional[typing.List[core.File]]
+ See core.File for more documentation
max_references : typing.Optional[int]
@@ -148,7 +152,7 @@ def completion(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel]
dense_weight : typing.Optional[float]
@@ -156,34 +160,30 @@ def completion(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
+ citation_style : typing.Optional[CopilotCompletionRequestCitationStyle]
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
+ asr_model : typing.Optional[CopilotCompletionRequestAsrModel]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
+ translation_model : typing.Optional[CopilotCompletionRequestTranslationModel]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
- input_glossary_document : typing.Optional[str]
+ input_glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
- Translation Glossary for User Langauge -> LLM Language (English)
+ output_glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
+ lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel]
- output_glossary_document : typing.Optional[str]
-
- Translation Glossary for LLM Language (English) -> User Langauge
-
-
- lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
-
- tools : typing.Optional[typing.Sequence[LlmTools]]
+ tools : typing.Optional[typing.List[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
avoid_repetition : typing.Optional[bool]
@@ -196,9 +196,9 @@ def completion(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType]
+ response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType]
- tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
+ tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -231,11 +231,12 @@ def completion(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel]
- input_face : typing.Optional[str]
+ input_face : typing.Optional[core.File]
+ See core.File for more documentation
face_padding_top : typing.Optional[int]
@@ -245,7 +246,7 @@ def completion(
face_padding_right : typing.Optional[int]
- sadtalker_settings : typing.Optional[SadTalkerSettings]
+ sadtalker_settings : typing.Optional[CopilotCompletionRequestSadtalkerSettings]
settings : typing.Optional[RunSettings]
@@ -272,13 +273,11 @@ def completion(
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"input_prompt": input_prompt,
"input_audio": input_audio,
- "input_images": input_images,
- "input_documents": input_documents,
"doc_extract_url": doc_extract_url,
"messages": messages,
"bot_script": bot_script,
@@ -287,7 +286,6 @@ def completion(
"task_instructions": task_instructions,
"query_instructions": query_instructions,
"keyword_instructions": keyword_instructions,
- "documents": documents,
"max_references": max_references,
"max_context_words": max_context_words,
"scroll_jump": scroll_jump,
@@ -299,8 +297,6 @@ def completion(
"asr_language": asr_language,
"translation_model": translation_model,
"user_language": user_language,
- "input_glossary_document": input_glossary_document,
- "output_glossary_document": output_glossary_document,
"lipsync_model": lipsync_model,
"tools": tools,
"avoid_repetition": avoid_repetition,
@@ -327,7 +323,6 @@ def completion(
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
- "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -335,6 +330,14 @@ def completion(
"sadtalker_settings": sadtalker_settings,
"settings": settings,
},
+ files={
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "documents": documents,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "input_face": input_face,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -391,67 +394,67 @@ async def completion(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- input_prompt: typing.Optional[str] = OMIT,
- input_audio: typing.Optional[str] = OMIT,
- input_images: typing.Optional[typing.Sequence[str]] = OMIT,
- input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
- doc_extract_url: typing.Optional[str] = OMIT,
- messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
- bot_script: typing.Optional[str] = OMIT,
- selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- document_model: typing.Optional[str] = OMIT,
- task_instructions: typing.Optional[str] = OMIT,
- query_instructions: typing.Optional[str] = OMIT,
- keyword_instructions: typing.Optional[str] = OMIT,
- documents: typing.Optional[typing.Sequence[str]] = OMIT,
- max_references: typing.Optional[int] = OMIT,
- max_context_words: typing.Optional[int] = OMIT,
- scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
- dense_weight: typing.Optional[float] = OMIT,
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
- use_url_shortener: typing.Optional[bool] = OMIT,
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
- asr_language: typing.Optional[str] = OMIT,
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
- user_language: typing.Optional[str] = OMIT,
- input_glossary_document: typing.Optional[str] = OMIT,
- output_glossary_document: typing.Optional[str] = OMIT,
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
- tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
- avoid_repetition: typing.Optional[bool] = OMIT,
- num_outputs: typing.Optional[int] = OMIT,
- quality: typing.Optional[float] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT,
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
- uberduck_voice_name: typing.Optional[str] = OMIT,
- uberduck_speaking_rate: typing.Optional[float] = OMIT,
- google_voice_name: typing.Optional[str] = OMIT,
- google_speaking_rate: typing.Optional[float] = OMIT,
- google_pitch: typing.Optional[float] = OMIT,
- bark_history_prompt: typing.Optional[str] = OMIT,
- elevenlabs_voice_name: typing.Optional[str] = OMIT,
- elevenlabs_api_key: typing.Optional[str] = OMIT,
- elevenlabs_voice_id: typing.Optional[str] = OMIT,
- elevenlabs_model: typing.Optional[str] = OMIT,
- elevenlabs_stability: typing.Optional[float] = OMIT,
- elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
- elevenlabs_style: typing.Optional[float] = OMIT,
- elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
- azure_voice_name: typing.Optional[str] = OMIT,
- openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
- openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
- input_face: typing.Optional[str] = OMIT,
- face_padding_top: typing.Optional[int] = OMIT,
- face_padding_bottom: typing.Optional[int] = OMIT,
- face_padding_left: typing.Optional[int] = OMIT,
- face_padding_right: typing.Optional[int] = OMIT,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
- settings: typing.Optional[RunSettings] = OMIT,
+ functions: typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] = None,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
+ input_prompt: typing.Optional[str] = None,
+ input_audio: typing.Optional[str] = None,
+ input_images: typing.Optional[typing.List[core.File]] = None,
+ input_documents: typing.Optional[typing.List[core.File]] = None,
+ doc_extract_url: typing.Optional[str] = None,
+ messages: typing.Optional[typing.List[ConversationEntry]] = None,
+ bot_script: typing.Optional[str] = None,
+ selected_model: typing.Optional[LargeLanguageModels] = None,
+ document_model: typing.Optional[str] = None,
+ task_instructions: typing.Optional[str] = None,
+ query_instructions: typing.Optional[str] = None,
+ keyword_instructions: typing.Optional[str] = None,
+ documents: typing.Optional[typing.List[core.File]] = None,
+ max_references: typing.Optional[int] = None,
+ max_context_words: typing.Optional[int] = None,
+ scroll_jump: typing.Optional[int] = None,
+ embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None,
+ dense_weight: typing.Optional[float] = None,
+ citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
+ use_url_shortener: typing.Optional[bool] = None,
+ asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None,
+ asr_language: typing.Optional[str] = None,
+ translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
+ user_language: typing.Optional[str] = None,
+ input_glossary_document: typing.Optional[core.File] = None,
+ output_glossary_document: typing.Optional[core.File] = None,
+ lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None,
+ tools: typing.Optional[typing.List[LlmTools]] = None,
+ avoid_repetition: typing.Optional[bool] = None,
+ num_outputs: typing.Optional[int] = None,
+ quality: typing.Optional[float] = None,
+ max_tokens: typing.Optional[int] = None,
+ sampling_temperature: typing.Optional[float] = None,
+ response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None,
+ tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None,
+ uberduck_voice_name: typing.Optional[str] = None,
+ uberduck_speaking_rate: typing.Optional[float] = None,
+ google_voice_name: typing.Optional[str] = None,
+ google_speaking_rate: typing.Optional[float] = None,
+ google_pitch: typing.Optional[float] = None,
+ bark_history_prompt: typing.Optional[str] = None,
+ elevenlabs_voice_name: typing.Optional[str] = None,
+ elevenlabs_api_key: typing.Optional[str] = None,
+ elevenlabs_voice_id: typing.Optional[str] = None,
+ elevenlabs_model: typing.Optional[str] = None,
+ elevenlabs_stability: typing.Optional[float] = None,
+ elevenlabs_similarity_boost: typing.Optional[float] = None,
+ elevenlabs_style: typing.Optional[float] = None,
+ elevenlabs_speaker_boost: typing.Optional[bool] = None,
+ azure_voice_name: typing.Optional[str] = None,
+ openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None,
+ openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None,
+ input_face: typing.Optional[core.File] = None,
+ face_padding_top: typing.Optional[int] = None,
+ face_padding_bottom: typing.Optional[int] = None,
+ face_padding_left: typing.Optional[int] = None,
+ face_padding_right: typing.Optional[int] = None,
+ sadtalker_settings: typing.Optional[CopilotCompletionRequestSadtalkerSettings] = None,
+ settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> VideoBotsPageOutput:
"""
@@ -459,7 +462,7 @@ async def completion(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.Sequence[RecipeFunction]]
+ functions : typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -468,14 +471,16 @@ async def completion(
input_audio : typing.Optional[str]
- input_images : typing.Optional[typing.Sequence[str]]
+ input_images : typing.Optional[typing.List[core.File]]
+ See core.File for more documentation
- input_documents : typing.Optional[typing.Sequence[str]]
+ input_documents : typing.Optional[typing.List[core.File]]
+ See core.File for more documentation
doc_extract_url : typing.Optional[str]
Select a workflow to extract text from documents and images.
- messages : typing.Optional[typing.Sequence[ConversationEntry]]
+ messages : typing.Optional[typing.List[ConversationEntry]]
bot_script : typing.Optional[str]
@@ -490,7 +495,8 @@ async def completion(
keyword_instructions : typing.Optional[str]
- documents : typing.Optional[typing.Sequence[str]]
+ documents : typing.Optional[typing.List[core.File]]
+ See core.File for more documentation
max_references : typing.Optional[int]
@@ -498,7 +504,7 @@ async def completion(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel]
dense_weight : typing.Optional[float]
@@ -506,34 +512,30 @@ async def completion(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
+ citation_style : typing.Optional[CopilotCompletionRequestCitationStyle]
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
+ asr_model : typing.Optional[CopilotCompletionRequestAsrModel]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
+ translation_model : typing.Optional[CopilotCompletionRequestTranslationModel]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
- input_glossary_document : typing.Optional[str]
+ input_glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
- Translation Glossary for User Langauge -> LLM Language (English)
+ output_glossary_document : typing.Optional[core.File]
+ See core.File for more documentation
+ lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel]
- output_glossary_document : typing.Optional[str]
-
- Translation Glossary for LLM Language (English) -> User Langauge
-
-
- lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
-
- tools : typing.Optional[typing.Sequence[LlmTools]]
+ tools : typing.Optional[typing.List[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
avoid_repetition : typing.Optional[bool]
@@ -546,9 +548,9 @@ async def completion(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType]
+ response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType]
- tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
+ tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -581,11 +583,12 @@ async def completion(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel]
- input_face : typing.Optional[str]
+ input_face : typing.Optional[core.File]
+ See core.File for more documentation
face_padding_top : typing.Optional[int]
@@ -595,7 +598,7 @@ async def completion(
face_padding_right : typing.Optional[int]
- sadtalker_settings : typing.Optional[SadTalkerSettings]
+ sadtalker_settings : typing.Optional[CopilotCompletionRequestSadtalkerSettings]
settings : typing.Optional[RunSettings]
@@ -630,13 +633,11 @@ async def main() -> None:
params={
"example_id": example_id,
},
- json={
+ data={
"functions": functions,
"variables": variables,
"input_prompt": input_prompt,
"input_audio": input_audio,
- "input_images": input_images,
- "input_documents": input_documents,
"doc_extract_url": doc_extract_url,
"messages": messages,
"bot_script": bot_script,
@@ -645,7 +646,6 @@ async def main() -> None:
"task_instructions": task_instructions,
"query_instructions": query_instructions,
"keyword_instructions": keyword_instructions,
- "documents": documents,
"max_references": max_references,
"max_context_words": max_context_words,
"scroll_jump": scroll_jump,
@@ -657,8 +657,6 @@ async def main() -> None:
"asr_language": asr_language,
"translation_model": translation_model,
"user_language": user_language,
- "input_glossary_document": input_glossary_document,
- "output_glossary_document": output_glossary_document,
"lipsync_model": lipsync_model,
"tools": tools,
"avoid_repetition": avoid_repetition,
@@ -685,7 +683,6 @@ async def main() -> None:
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
- "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -693,6 +690,14 @@ async def main() -> None:
"sadtalker_settings": sadtalker_settings,
"settings": settings,
},
+ files={
+ "input_images": input_images,
+ "input_documents": input_documents,
+ "documents": documents,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
+ "input_face": input_face,
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py
index 8de9ee0..1cdf619 100644
--- a/src/gooey/copilot/types/__init__.py
+++ b/src/gooey/copilot/types/__init__.py
@@ -1,23 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+from .copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel
+from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
+from .copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel
+from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem
+from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger
+from .copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel
+from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
+from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
+from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
+from .copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings
+from .copilot_completion_request_sadtalker_settings_preprocess import (
+ CopilotCompletionRequestSadtalkerSettingsPreprocess,
+)
+from .copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
+from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
__all__ = [
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
+ "CopilotCompletionRequestAsrModel",
+ "CopilotCompletionRequestCitationStyle",
+ "CopilotCompletionRequestEmbeddingModel",
+ "CopilotCompletionRequestFunctionsItem",
+ "CopilotCompletionRequestFunctionsItemTrigger",
+ "CopilotCompletionRequestLipsyncModel",
+ "CopilotCompletionRequestOpenaiTtsModel",
+ "CopilotCompletionRequestOpenaiVoiceName",
+ "CopilotCompletionRequestResponseFormatType",
+ "CopilotCompletionRequestSadtalkerSettings",
+ "CopilotCompletionRequestSadtalkerSettingsPreprocess",
+ "CopilotCompletionRequestTranslationModel",
+ "CopilotCompletionRequestTtsProvider",
]
diff --git a/src/gooey/copilot/types/copilot_completion_request_asr_model.py b/src/gooey/copilot/types/copilot_completion_request_asr_model.py
new file mode 100644
index 0000000..65ae0f5
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_asr_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t_v2",
+ "mms_1b_all",
+ "seamless_m4t",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot/types/copilot_completion_request_citation_style.py b/src/gooey/copilot/types/copilot_completion_request_citation_style.py
new file mode 100644
index 0000000..1bb273a
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_citation_style.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestCitationStyle = typing.Union[
+ typing.Literal[
+ "number",
+ "title",
+ "url",
+ "symbol",
+ "markdown",
+ "html",
+ "slack_mrkdwn",
+ "plaintext",
+ "number_markdown",
+ "number_html",
+ "number_slack_mrkdwn",
+ "number_plaintext",
+ "symbol_markdown",
+ "symbol_html",
+ "symbol_slack_mrkdwn",
+ "symbol_plaintext",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py
new file mode 100644
index 0000000..4655801
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestEmbeddingModel = typing.Union[
+ typing.Literal[
+ "openai_3_large",
+ "openai_3_small",
+ "openai_ada_2",
+ "e5_large_v2",
+ "e5_base_v2",
+ "multilingual_e5_base",
+ "multilingual_e5_large",
+ "gte_large",
+ "gte_base",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item.py b/src/gooey/copilot/types/copilot_completion_request_functions_item.py
new file mode 100644
index 0000000..c9654f1
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_functions_item.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ...core.pydantic_utilities import UniversalBaseModel
+from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger
+import pydantic
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class CopilotCompletionRequestFunctionsItem(UniversalBaseModel):
+ url: str
+ trigger: CopilotCompletionRequestFunctionsItemTrigger = pydantic.Field()
+ """
+ When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py b/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py
new file mode 100644
index 0000000..cf3e214
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
new file mode 100644
index 0000000..865bc4b
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py b/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py
new file mode 100644
index 0000000..4f4a35b
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py b/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py
new file mode 100644
index 0000000..f60a6b3
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestOpenaiVoiceName = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py
new file mode 100644
index 0000000..3c9dbb0
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py
new file mode 100644
index 0000000..12ae458
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ...core.pydantic_utilities import UniversalBaseModel
+import typing
+from .copilot_completion_request_sadtalker_settings_preprocess import (
+ CopilotCompletionRequestSadtalkerSettingsPreprocess,
+)
+import pydantic
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CopilotCompletionRequestSadtalkerSettings(UniversalBaseModel):
+ still: typing.Optional[bool] = None
+ preprocess: typing.Optional[CopilotCompletionRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None)
+ """
+ SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping.
+ """
+
+ pose_style: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Random seed 0-45 inclusive that affects how the pose is animated.
+ """
+
+ expression_scale: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
+ """
+
+ ref_eyeblink: typing.Optional[str] = None
+ ref_pose: typing.Optional[str] = None
+ input_yaw: typing.Optional[typing.List[int]] = None
+ input_pitch: typing.Optional[typing.List[int]] = None
+ input_roll: typing.Optional[typing.List[int]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py
new file mode 100644
index 0000000..88add2e
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestSadtalkerSettingsPreprocess = typing.Union[
+ typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any
+]
diff --git a/src/gooey/copilot/types/copilot_completion_request_translation_model.py b/src/gooey/copilot/types/copilot_completion_request_translation_model.py
new file mode 100644
index 0000000..10b0b5a
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py
new file mode 100644
index 0000000..4dec4b0
--- /dev/null
+++ b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CopilotCompletionRequestTtsProvider = typing.Union[
+ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
+]
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
index f34b80e..3eb7ee8 100644
--- a/src/gooey/core/client_wrapper.py
+++ b/src/gooey/core/client_wrapper.py
@@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "gooeyai",
- "X-Fern-SDK-Version": "0.0.1-beta19",
+ "X-Fern-SDK-Version": "0.0.1-beta20",
}
headers["Authorization"] = f"Bearer {self._get_api_key()}"
return headers
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
index ca0369d..9087b38 100644
--- a/src/gooey/types/__init__.py
+++ b/src/gooey/types/__init__.py
@@ -9,6 +9,7 @@
from .asr_output_json import AsrOutputJson
from .asr_page_output import AsrPageOutput
from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
+from .asr_page_request import AsrPageRequest
from .asr_page_request_output_format import AsrPageRequestOutputFormat
from .asr_page_request_selected_model import AsrPageRequestSelectedModel
from .asr_page_request_translation_model import AsrPageRequestTranslationModel
@@ -21,6 +22,7 @@
from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .bulk_eval_page_status_response import BulkEvalPageStatusResponse
from .bulk_runner_page_output import BulkRunnerPageOutput
+from .bulk_runner_page_request import BulkRunnerPageRequest
from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse
from .button_pressed import ButtonPressed
from .called_function_response import CalledFunctionResponse
@@ -38,6 +40,7 @@
from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
from .compare_upscaler_page_output import CompareUpscalerPageOutput
+from .compare_upscaler_page_request import CompareUpscalerPageRequest
from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
from .console_logs import ConsoleLogs
@@ -66,6 +69,7 @@
from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
from .doc_extract_page_output import DocExtractPageOutput
+from .doc_extract_page_request import DocExtractPageRequest
from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
from .doc_extract_page_status_response import DocExtractPageStatusResponse
@@ -76,9 +80,12 @@
from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
from .doc_search_page_status_response import DocSearchPageStatusResponse
from .doc_summary_page_output import DocSummaryPageOutput
+from .doc_summary_page_request import DocSummaryPageRequest
from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
from .doc_summary_page_status_response import DocSummaryPageStatusResponse
+from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
+from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
@@ -87,6 +94,7 @@
from .embeddings_page_status_response import EmbeddingsPageStatusResponse
from .eval_prompt import EvalPrompt
from .face_inpainting_page_output import FaceInpaintingPageOutput
+from .face_inpainting_page_request import FaceInpaintingPageRequest
from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
from .final_response import FinalResponse
@@ -103,11 +111,13 @@
from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
from .http_validation_error import HttpValidationError
from .image_segmentation_page_output import ImageSegmentationPageOutput
+from .image_segmentation_page_request import ImageSegmentationPageRequest
from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .img2img_page_output import Img2ImgPageOutput
+from .img2img_page_request import Img2ImgPageRequest
from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
@@ -117,22 +127,33 @@
from .letter_writer_page_request import LetterWriterPageRequest
from .letter_writer_page_status_response import LetterWriterPageStatusResponse
from .lipsync_page_output import LipsyncPageOutput
+from .lipsync_page_request import LipsyncPageRequest
from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
from .lipsync_page_status_response import LipsyncPageStatusResponse
+from .lipsync_request_selected_model import LipsyncRequestSelectedModel
from .lipsync_tts_page_output import LipsyncTtsPageOutput
+from .lipsync_tts_page_request import LipsyncTtsPageRequest
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
+from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
+from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
+from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
+from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
from .llm_tools import LlmTools
from .message_part import MessagePart
from .object_inpainting_page_output import ObjectInpaintingPageOutput
+from .object_inpainting_page_request import ObjectInpaintingPageRequest
from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
+from .portrait_request_selected_model import PortraitRequestSelectedModel
+from .product_image_request_selected_model import ProductImageRequestSelectedModel
from .prompt_tree_node import PromptTreeNode
from .prompt_tree_node_prompt import PromptTreeNodePrompt
from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
+from .qr_code_generator_page_request import QrCodeGeneratorPageRequest
from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
)
@@ -142,6 +163,10 @@
)
from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
+from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
+from .qr_code_request_scheduler import QrCodeRequestScheduler
+from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
+from .qr_code_request_selected_model import QrCodeRequestSelectedModel
from .recipe_function import RecipeFunction
from .recipe_function_trigger import RecipeFunctionTrigger
from .recipe_run_state import RecipeRunState
@@ -157,6 +182,10 @@
from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse
+from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
+from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
+from .remix_image_request_selected_model import RemixImageRequestSelectedModel
+from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
from .reply_button import ReplyButton
from .response_model import ResponseModel
from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery
@@ -178,7 +207,12 @@
from .social_lookup_email_page_output import SocialLookupEmailPageOutput
from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
+from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
+from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
+from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
from .stream_error import StreamError
+from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
+from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
from .text2audio_page_output import Text2AudioPageOutput
from .text2audio_page_status_response import Text2AudioPageStatusResponse
from .text_to_speech_page_output import TextToSpeechPageOutput
@@ -187,15 +221,32 @@
from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse
from .training_data_model import TrainingDataModel
+from .translate_request_selected_model import TranslateRequestSelectedModel
from .translation_page_output import TranslationPageOutput
+from .translation_page_request import TranslationPageRequest
from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
from .translation_page_status_response import TranslationPageStatusResponse
+from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
from .validation_error import ValidationError
from .validation_error_loc_item import ValidationErrorLocItem
from .vcard import Vcard
from .video_bots_page_output import VideoBotsPageOutput
from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
+from .video_bots_page_request import VideoBotsPageRequest
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem
+from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
+from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings
+from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
from .video_bots_page_status_response import VideoBotsPageStatusResponse
__all__ = [
@@ -208,6 +259,7 @@
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
+ "AsrPageRequest",
"AsrPageRequestOutputFormat",
"AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
@@ -220,6 +272,7 @@
"BulkEvalPageRequestResponseFormatType",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
+ "BulkRunnerPageRequest",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
"CalledFunctionResponse",
@@ -237,6 +290,7 @@
"CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
+ "CompareUpscalerPageRequest",
"CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
@@ -263,6 +317,7 @@
"DeforumSdPageRequestSelectedModel",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
+ "DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
"DocExtractPageRequestSelectedAsrModel",
"DocExtractPageStatusResponse",
@@ -273,9 +328,12 @@
"DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
+ "DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
"DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageStatusResponse",
+ "DocSummaryRequestResponseFormatType",
+ "DocSummaryRequestSelectedAsrModel",
"EmailFaceInpaintingPageOutput",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
@@ -284,6 +342,7 @@
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
+ "FaceInpaintingPageRequest",
"FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageStatusResponse",
"FinalResponse",
@@ -300,11 +359,13 @@
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
"ImageSegmentationPageOutput",
+ "ImageSegmentationPageRequest",
"ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
+ "Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
"Img2ImgPageRequestSelectedControlnetModelItem",
"Img2ImgPageRequestSelectedModel",
@@ -314,27 +375,42 @@
"LetterWriterPageRequest",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
+ "LipsyncPageRequest",
"LipsyncPageRequestSelectedModel",
"LipsyncPageStatusResponse",
+ "LipsyncRequestSelectedModel",
"LipsyncTtsPageOutput",
+ "LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
"LipsyncTtsPageRequestSelectedModel",
"LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
+ "LipsyncTtsRequestOpenaiTtsModel",
+ "LipsyncTtsRequestOpenaiVoiceName",
+ "LipsyncTtsRequestSelectedModel",
+ "LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
+ "ObjectInpaintingPageRequest",
"ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageStatusResponse",
+ "PortraitRequestSelectedModel",
+ "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
+ "QrCodeGeneratorPageRequest",
"QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
"QrCodeGeneratorPageRequestScheduler",
"QrCodeGeneratorPageRequestSelectedControlnetModelItem",
"QrCodeGeneratorPageRequestSelectedModel",
"QrCodeGeneratorPageStatusResponse",
+ "QrCodeRequestImagePromptControlnetModelsItem",
+ "QrCodeRequestScheduler",
+ "QrCodeRequestSelectedControlnetModelItem",
+ "QrCodeRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
@@ -350,6 +426,10 @@
"RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
+ "RemixImageRequestSelectedControlnetModel",
+ "RemixImageRequestSelectedControlnetModelItem",
+ "RemixImageRequestSelectedModel",
+ "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
@@ -371,7 +451,12 @@
"SocialLookupEmailPageOutput",
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
+ "SpeechRecognitionRequestOutputFormat",
+ "SpeechRecognitionRequestSelectedModel",
+ "SpeechRecognitionRequestTranslationModel",
"StreamError",
+ "SynthesizeDataRequestResponseFormatType",
+ "SynthesizeDataRequestSelectedAsrModel",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
@@ -380,14 +465,31 @@
"TextToSpeechPageRequestTtsProvider",
"TextToSpeechPageStatusResponse",
"TrainingDataModel",
+ "TranslateRequestSelectedModel",
"TranslationPageOutput",
+ "TranslationPageRequest",
"TranslationPageRequestSelectedModel",
"TranslationPageStatusResponse",
+ "UpscaleRequestSelectedModelsItem",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
+ "VideoBotsPageRequest",
+ "VideoBotsPageRequestAsrModel",
+ "VideoBotsPageRequestCitationStyle",
+ "VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestFunctionsItem",
+ "VideoBotsPageRequestFunctionsItemTrigger",
+ "VideoBotsPageRequestLipsyncModel",
+ "VideoBotsPageRequestOpenaiTtsModel",
+ "VideoBotsPageRequestOpenaiVoiceName",
+ "VideoBotsPageRequestResponseFormatType",
+ "VideoBotsPageRequestSadtalkerSettings",
+ "VideoBotsPageRequestSadtalkerSettingsPreprocess",
+ "VideoBotsPageRequestTranslationModel",
+ "VideoBotsPageRequestTtsProvider",
"VideoBotsPageStatusResponse",
]
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
new file mode 100644
index 0000000..1d35181
--- /dev/null
+++ b/src/gooey/types/asr_page_request.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .asr_page_request_selected_model import AsrPageRequestSelectedModel
+from .asr_page_request_translation_model import AsrPageRequestTranslationModel
+from .asr_page_request_output_format import AsrPageRequestOutputFormat
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AsrPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = None
+ language: typing.Optional[str] = None
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = None
+ google_translate_target: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ use `translation_model` & `translation_target` instead.
+ """
+
+ translation_source: typing.Optional[str] = None
+ translation_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py
new file mode 100644
index 0000000..a4129d9
--- /dev/null
+++ b/src/gooey/types/bulk_runner_page_request.py
@@ -0,0 +1,55 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class BulkRunnerPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str] = pydantic.Field()
+ """
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
+ """
+
+ run_urls: typing.List[str] = pydantic.Field()
+ """
+ Provide one or more Gooey.AI workflow runs.
+ You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+ """
+
+ input_columns: typing.Dict[str, str] = pydantic.Field()
+ """
+ For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
+ """
+
+ output_columns: typing.Dict[str, str] = pydantic.Field()
+ """
+ For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
+ """
+
+ eval_urls: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+ """
+
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py
new file mode 100644
index 0000000..8cfb4e7
--- /dev/null
+++ b/src/gooey/types/compare_upscaler_page_request.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CompareUpscalerPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: typing.Optional[str] = None
+ input_video: typing.Optional[str] = None
+ scale: int = pydantic.Field()
+ """
+ The final upsampling scale of the image
+ """
+
+ selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
new file mode 100644
index 0000000..9690c6c
--- /dev/null
+++ b/src/gooey/types/doc_extract_page_request.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from .large_language_models import LargeLanguageModels
+from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class DocExtractPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ sheet_url: typing.Optional[str] = None
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None
+ google_translate_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = None
+ task_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[LargeLanguageModels] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
new file mode 100644
index 0000000..466ddc1
--- /dev/null
+++ b/src/gooey/types/doc_summary_page_request.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .large_language_models import LargeLanguageModels
+from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class DocSummaryPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ documents: typing.List[str]
+ task_instructions: typing.Optional[str] = None
+ merge_instructions: typing.Optional[str] = None
+ selected_model: typing.Optional[LargeLanguageModels] = None
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None
+ google_translate_target: typing.Optional[str] = None
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py
new file mode 100644
index 0000000..8fabf9b
--- /dev/null
+++ b/src/gooey/types/doc_summary_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py
new file mode 100644
index 0000000..8b8a338
--- /dev/null
+++ b/src/gooey/types/doc_summary_request_selected_asr_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DocSummaryRequestSelectedAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t_v2",
+ "mms_1b_all",
+ "seamless_m4t",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py
new file mode 100644
index 0000000..a653205
--- /dev/null
+++ b/src/gooey/types/face_inpainting_page_request.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class FaceInpaintingPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: str
+ face_scale: typing.Optional[float] = None
+ face_pos_x: typing.Optional[float] = None
+ face_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ upscale_factor: typing.Optional[float] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py
new file mode 100644
index 0000000..a2ea60d
--- /dev/null
+++ b/src/gooey/types/image_segmentation_page_request.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ImageSegmentationPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None
+ mask_threshold: typing.Optional[float] = None
+ rect_persepective_transform: typing.Optional[bool] = None
+ reflection_opacity: typing.Optional[float] = None
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py
new file mode 100644
index 0000000..f3cfd2f
--- /dev/null
+++ b/src/gooey/types/img2img_page_request.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class Img2ImgPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: typing.Optional[str] = None
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ prompt_strength: typing.Optional[float] = None
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
+ seed: typing.Optional[int] = None
+ image_guidance_scale: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py
new file mode 100644
index 0000000..2914a1e
--- /dev/null
+++ b/src/gooey/types/lipsync_page_request.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .sad_talker_settings import SadTalkerSettings
+from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class LipsyncPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None
+ input_audio: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_request_selected_model.py b/src/gooey/types/lipsync_request_selected_model.py
new file mode 100644
index 0000000..c5614b4
--- /dev/null
+++ b/src/gooey/types/lipsync_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
new file mode 100644
index 0000000..f4f5293
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_page_request.py
@@ -0,0 +1,62 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
+from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
+from .sad_talker_settings import SadTalkerSettings
+from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class LipsyncTtsPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ text_prompt: str
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = None
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_tts_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_request_openai_tts_model.py
new file mode 100644
index 0000000..510dcfb
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_request_openai_tts_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_request_openai_voice_name.py
new file mode 100644
index 0000000..7ea601b
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_request_openai_voice_name.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsRequestOpenaiVoiceName = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py
new file mode 100644
index 0000000..9ece5a9
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_request_tts_provider.py b/src/gooey/types/lipsync_tts_request_tts_provider.py
new file mode 100644
index 0000000..1a23fe3
--- /dev/null
+++ b/src/gooey/types/lipsync_tts_request_tts_provider.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncTtsRequestTtsProvider = typing.Union[
+ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
+]
diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py
new file mode 100644
index 0000000..50b5b72
--- /dev/null
+++ b/src/gooey/types/object_inpainting_page_request.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ObjectInpaintingPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_image: str
+ text_prompt: str
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ mask_threshold: typing.Optional[float] = None
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None
+ negative_prompt: typing.Optional[str] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
+ seed: typing.Optional[int] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py
new file mode 100644
index 0000000..6c4a5ce
--- /dev/null
+++ b/src/gooey/types/portrait_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py
new file mode 100644
index 0000000..f1ce039
--- /dev/null
+++ b/src/gooey/types/product_image_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py
new file mode 100644
index 0000000..68f3730
--- /dev/null
+++ b/src/gooey/types/qr_code_generator_page_request.py
@@ -0,0 +1,66 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .vcard import Vcard
+from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+)
+from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
+from .qr_code_generator_page_request_selected_controlnet_model_item import (
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+)
+from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class QrCodeGeneratorPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ qr_code_data: typing.Optional[str] = None
+ qr_code_input_image: typing.Optional[str] = None
+ qr_code_vcard: typing.Optional[Vcard] = None
+ qr_code_file: typing.Optional[str] = None
+ use_url_shortener: typing.Optional[bool] = None
+ text_prompt: str
+ negative_prompt: typing.Optional[str] = None
+ image_prompt: typing.Optional[str] = None
+ image_prompt_controlnet_models: typing.Optional[
+ typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = None
+ image_prompt_strength: typing.Optional[float] = None
+ image_prompt_scale: typing.Optional[float] = None
+ image_prompt_pos_x: typing.Optional[float] = None
+ image_prompt_pos_y: typing.Optional[float] = None
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None
+ selected_controlnet_model: typing.Optional[typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] = (
+ None
+ )
+ output_width: typing.Optional[int] = None
+ output_height: typing.Optional[int] = None
+ guidance_scale: typing.Optional[float] = None
+ controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[int] = None
+ scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None
+ seed: typing.Optional[int] = None
+ obj_scale: typing.Optional[float] = None
+ obj_pos_x: typing.Optional[float] = None
+ obj_pos_y: typing.Optional[float] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
new file mode 100644
index 0000000..3be2ab6
--- /dev/null
+++ b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeRequestImagePromptControlnetModelsItem = typing.Union[
+ typing.Literal[
+ "sd_controlnet_canny",
+ "sd_controlnet_depth",
+ "sd_controlnet_hed",
+ "sd_controlnet_mlsd",
+ "sd_controlnet_normal",
+ "sd_controlnet_openpose",
+ "sd_controlnet_scribble",
+ "sd_controlnet_seg",
+ "sd_controlnet_tile",
+ "sd_controlnet_brightness",
+ "control_v1p_sd15_qrcode_monster_v2",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/qr_code_request_scheduler.py b/src/gooey/types/qr_code_request_scheduler.py
new file mode 100644
index 0000000..890b204
--- /dev/null
+++ b/src/gooey/types/qr_code_request_scheduler.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeRequestScheduler = typing.Union[
+ typing.Literal[
+ "singlestep_dpm_solver",
+ "multistep_dpm_solver",
+ "dpm_sde",
+ "dpm_discrete",
+ "dpm_discrete_ancestral",
+ "unipc",
+ "lms_discrete",
+ "heun",
+ "euler",
+ "euler_ancestral",
+ "pndm",
+ "ddpm",
+ "ddim",
+ "deis",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
new file mode 100644
index 0000000..c5cdc8d
--- /dev/null
+++ b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeRequestSelectedControlnetModelItem = typing.Union[
+ typing.Literal[
+ "sd_controlnet_canny",
+ "sd_controlnet_depth",
+ "sd_controlnet_hed",
+ "sd_controlnet_mlsd",
+ "sd_controlnet_normal",
+ "sd_controlnet_openpose",
+ "sd_controlnet_scribble",
+ "sd_controlnet_seg",
+ "sd_controlnet_tile",
+ "sd_controlnet_brightness",
+ "control_v1p_sd15_qrcode_monster_v2",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/qr_code_request_selected_model.py b/src/gooey/types/qr_code_request_selected_model.py
new file mode 100644
index 0000000..7ea963c
--- /dev/null
+++ b/src/gooey/types/qr_code_request_selected_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+QrCodeRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "dream_shaper",
+ "dreamlike_2",
+ "sd_2",
+ "sd_1_5",
+ "dall_e",
+ "dall_e_3",
+ "openjourney_2",
+ "openjourney",
+ "analog_diffusion",
+ "protogen_5_3",
+ "jack_qiao",
+ "rodent_diffusion_1_5",
+ "deepfloyd_if",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/recipe_function.py b/src/gooey/types/recipe_function.py
index ed79772..08bea99 100644
--- a/src/gooey/types/recipe_function.py
+++ b/src/gooey/types/recipe_function.py
@@ -1,18 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.pydantic_utilities import UniversalBaseModel
-import pydantic
from .recipe_function_trigger import RecipeFunctionTrigger
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
class RecipeFunction(UniversalBaseModel):
- url: str = pydantic.Field()
- """
- The URL of the [function](https://gooey.ai/functions) to call.
- """
-
+ url: str
trigger: RecipeFunctionTrigger = pydantic.Field()
"""
When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py
new file mode 100644
index 0000000..eea207f
--- /dev/null
+++ b/src/gooey/types/remix_image_request_selected_controlnet_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
+
+RemixImageRequestSelectedControlnetModel = typing.Union[
+ typing.List[RemixImageRequestSelectedControlnetModelItem],
+ typing.Literal["sd_controlnet_canny"],
+ typing.Literal["sd_controlnet_depth"],
+ typing.Literal["sd_controlnet_hed"],
+ typing.Literal["sd_controlnet_mlsd"],
+ typing.Literal["sd_controlnet_normal"],
+ typing.Literal["sd_controlnet_openpose"],
+ typing.Literal["sd_controlnet_scribble"],
+ typing.Literal["sd_controlnet_seg"],
+ typing.Literal["sd_controlnet_tile"],
+ typing.Literal["sd_controlnet_brightness"],
+ typing.Literal["control_v1p_sd15_qrcode_monster_v2"],
+]
diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
new file mode 100644
index 0000000..b4f3ff0
--- /dev/null
+++ b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RemixImageRequestSelectedControlnetModelItem = typing.Union[
+ typing.Literal[
+ "sd_controlnet_canny",
+ "sd_controlnet_depth",
+ "sd_controlnet_hed",
+ "sd_controlnet_mlsd",
+ "sd_controlnet_normal",
+ "sd_controlnet_openpose",
+ "sd_controlnet_scribble",
+ "sd_controlnet_seg",
+ "sd_controlnet_tile",
+ "sd_controlnet_brightness",
+ "control_v1p_sd15_qrcode_monster_v2",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/remix_image_request_selected_model.py b/src/gooey/types/remix_image_request_selected_model.py
new file mode 100644
index 0000000..245d6b0
--- /dev/null
+++ b/src/gooey/types/remix_image_request_selected_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RemixImageRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "dream_shaper",
+ "dreamlike_2",
+ "sd_2",
+ "sd_1_5",
+ "dall_e",
+ "instruct_pix2pix",
+ "openjourney_2",
+ "openjourney",
+ "analog_diffusion",
+ "protogen_5_3",
+ "jack_qiao",
+ "rodent_diffusion_1_5",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py
new file mode 100644
index 0000000..c84f0e7
--- /dev/null
+++ b/src/gooey/types/remove_background_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/sad_talker_settings.py b/src/gooey/types/sad_talker_settings.py
index 85464e7..c9200b4 100644
--- a/src/gooey/types/sad_talker_settings.py
+++ b/src/gooey/types/sad_talker_settings.py
@@ -24,16 +24,8 @@ class SadTalkerSettings(UniversalBaseModel):
Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
"""
- ref_eyeblink: typing.Optional[str] = pydantic.Field(default=None)
- """
- Optional reference video for eyeblinks to make the eyebrow movement more natural.
- """
-
- ref_pose: typing.Optional[str] = pydantic.Field(default=None)
- """
- Optional reference video to pose the head.
- """
-
+ ref_eyeblink: typing.Optional[str] = None
+ ref_pose: typing.Optional[str] = None
input_yaw: typing.Optional[typing.List[int]] = None
input_pitch: typing.Optional[typing.List[int]] = None
input_roll: typing.Optional[typing.List[int]] = None
diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py
new file mode 100644
index 0000000..4d2cf2b
--- /dev/null
+++ b/src/gooey/types/speech_recognition_request_output_format.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/speech_recognition_request_selected_model.py
new file mode 100644
index 0000000..9d2d28f
--- /dev/null
+++ b/src/gooey/types/speech_recognition_request_selected_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechRecognitionRequestSelectedModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t_v2",
+ "mms_1b_all",
+ "seamless_m4t",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py
new file mode 100644
index 0000000..886ab92
--- /dev/null
+++ b/src/gooey/types/speech_recognition_request_translation_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py
new file mode 100644
index 0000000..3ab37a9
--- /dev/null
+++ b/src/gooey/types/synthesize_data_request_response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py
new file mode 100644
index 0000000..6c1bc21
--- /dev/null
+++ b/src/gooey/types/synthesize_data_request_selected_asr_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SynthesizeDataRequestSelectedAsrModel = typing.Union[
+ typing.Literal[
+ "whisper_large_v2",
+ "whisper_large_v3",
+ "whisper_hindi_large_v2",
+ "whisper_telugu_large_v2",
+ "nemo_english",
+ "nemo_hindi",
+ "vakyansh_bhojpuri",
+ "gcp_v1",
+ "usm",
+ "deepgram",
+ "azure",
+ "seamless_m4t_v2",
+ "mms_1b_all",
+ "seamless_m4t",
+ ],
+ typing.Any,
+]
diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py
new file mode 100644
index 0000000..b774b56
--- /dev/null
+++ b/src/gooey/types/translate_request_selected_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py
new file mode 100644
index 0000000..9c033a6
--- /dev/null
+++ b/src/gooey/types/translation_page_request.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .recipe_function import RecipeFunction
+import pydantic
+from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class TranslationPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[RecipeFunction]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ texts: typing.Optional[typing.List[str]] = None
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None
+ translation_source: typing.Optional[str] = None
+ translation_target: typing.Optional[str] = None
+ glossary_document: typing.Optional[str] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/upscale_request_selected_models_item.py b/src/gooey/types/upscale_request_selected_models_item.py
new file mode 100644
index 0000000..1a8362e
--- /dev/null
+++ b/src/gooey/types/upscale_request_selected_models_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+UpscaleRequestSelectedModelsItem = typing.Union[
+ typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any
+]
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
new file mode 100644
index 0000000..6fb8b5e
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request.py
@@ -0,0 +1,131 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem
+import pydantic
+from .conversation_entry import ConversationEntry
+from .large_language_models import LargeLanguageModels
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .llm_tools import LlmTools
+from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings
+from .run_settings import RunSettings
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class VideoBotsPageRequest(UniversalBaseModel):
+ functions: typing.Optional[typing.List[VideoBotsPageRequestFunctionsItem]] = None
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Variables to be used as Jinja prompt templates and in functions as arguments
+ """
+
+ input_prompt: typing.Optional[str] = None
+ input_audio: typing.Optional[str] = None
+ input_images: typing.Optional[typing.List[str]] = None
+ input_documents: typing.Optional[typing.List[str]] = None
+ doc_extract_url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Select a workflow to extract text from documents and images.
+ """
+
+ messages: typing.Optional[typing.List[ConversationEntry]] = None
+ bot_script: typing.Optional[str] = None
+ selected_model: typing.Optional[LargeLanguageModels] = None
+ document_model: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
+ """
+
+ task_instructions: typing.Optional[str] = None
+ query_instructions: typing.Optional[str] = None
+ keyword_instructions: typing.Optional[str] = None
+ documents: typing.Optional[typing.List[str]] = None
+ max_references: typing.Optional[int] = None
+ max_context_words: typing.Optional[int] = None
+ scroll_jump: typing.Optional[int] = None
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None
+ dense_weight: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
+ Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
+ """
+
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
+ use_url_shortener: typing.Optional[bool] = None
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None)
+ """
+ Choose a model to transcribe incoming audio messages to text.
+ """
+
+ asr_language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Choose a language to transcribe incoming audio messages to text.
+ """
+
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None
+ user_language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
+ """
+
+ input_glossary_document: typing.Optional[str] = None
+ output_glossary_document: typing.Optional[str] = None
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None
+ tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None)
+ """
+ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+ """
+
+ avoid_repetition: typing.Optional[bool] = None
+ num_outputs: typing.Optional[int] = None
+ quality: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ sampling_temperature: typing.Optional[float] = None
+ response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None
+ uberduck_voice_name: typing.Optional[str] = None
+ uberduck_speaking_rate: typing.Optional[float] = None
+ google_voice_name: typing.Optional[str] = None
+ google_speaking_rate: typing.Optional[float] = None
+ google_pitch: typing.Optional[float] = None
+ bark_history_prompt: typing.Optional[str] = None
+ elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use `elevenlabs_voice_id` instead
+ """
+
+ elevenlabs_api_key: typing.Optional[str] = None
+ elevenlabs_voice_id: typing.Optional[str] = None
+ elevenlabs_model: typing.Optional[str] = None
+ elevenlabs_stability: typing.Optional[float] = None
+ elevenlabs_similarity_boost: typing.Optional[float] = None
+ elevenlabs_style: typing.Optional[float] = None
+ elevenlabs_speaker_boost: typing.Optional[bool] = None
+ azure_voice_name: typing.Optional[str] = None
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None
+ input_face: typing.Optional[str] = None
+ face_padding_top: typing.Optional[int] = None
+ face_padding_bottom: typing.Optional[int] = None
+ face_padding_left: typing.Optional[int] = None
+ face_padding_right: typing.Optional[int] = None
+ sadtalker_settings: typing.Optional[VideoBotsPageRequestSadtalkerSettings] = None
+ settings: typing.Optional[RunSettings] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_asr_model.py
rename to src/gooey/types/video_bots_page_request_asr_model.py
diff --git a/src/gooey/copilot/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_citation_style.py
rename to src/gooey/types/video_bots_page_request_citation_style.py
diff --git a/src/gooey/copilot/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_embedding_model.py
rename to src/gooey/types/video_bots_page_request_embedding_model.py
diff --git a/src/gooey/types/video_bots_page_request_functions_item.py b/src/gooey/types/video_bots_page_request_functions_item.py
new file mode 100644
index 0000000..5803c05
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_functions_item.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class VideoBotsPageRequestFunctionsItem(UniversalBaseModel):
+ url: str
+ trigger: VideoBotsPageRequestFunctionsItemTrigger = pydantic.Field()
+ """
+ When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request_functions_item_trigger.py b/src/gooey/types/video_bots_page_request_functions_item_trigger.py
new file mode 100644
index 0000000..b3c2078
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_functions_item_trigger.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any]
diff --git a/src/gooey/copilot/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_lipsync_model.py
rename to src/gooey/types/video_bots_page_request_lipsync_model.py
diff --git a/src/gooey/copilot/types/video_bots_page_request_openai_tts_model.py b/src/gooey/types/video_bots_page_request_openai_tts_model.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_openai_tts_model.py
rename to src/gooey/types/video_bots_page_request_openai_tts_model.py
diff --git a/src/gooey/copilot/types/video_bots_page_request_openai_voice_name.py b/src/gooey/types/video_bots_page_request_openai_voice_name.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_openai_voice_name.py
rename to src/gooey/types/video_bots_page_request_openai_voice_name.py
diff --git a/src/gooey/copilot/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_response_format_type.py
rename to src/gooey/types/video_bots_page_request_response_format_type.py
diff --git a/src/gooey/types/video_bots_page_request_sadtalker_settings.py b/src/gooey/types/video_bots_page_request_sadtalker_settings.py
new file mode 100644
index 0000000..6749388
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_sadtalker_settings.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.pydantic_utilities import UniversalBaseModel
+import typing
+from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class VideoBotsPageRequestSadtalkerSettings(UniversalBaseModel):
+ still: typing.Optional[bool] = None
+ preprocess: typing.Optional[VideoBotsPageRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None)
+ """
+ SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping.
+ """
+
+ pose_style: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Random seed 0-45 inclusive that affects how the pose is animated.
+ """
+
+ expression_scale: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
+ """
+
+ ref_eyeblink: typing.Optional[str] = None
+ ref_pose: typing.Optional[str] = None
+ input_yaw: typing.Optional[typing.List[int]] = None
+ input_pitch: typing.Optional[typing.List[int]] = None
+ input_roll: typing.Optional[typing.List[int]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py b/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py
new file mode 100644
index 0000000..4a625ac
--- /dev/null
+++ b/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoBotsPageRequestSadtalkerSettingsPreprocess = typing.Union[
+ typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any
+]
diff --git a/src/gooey/copilot/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_translation_model.py
rename to src/gooey/types/video_bots_page_request_translation_model.py
diff --git a/src/gooey/copilot/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py
similarity index 100%
rename from src/gooey/copilot/types/video_bots_page_request_tts_provider.py
rename to src/gooey/types/video_bots_page_request_tts_provider.py