From e0a330756368f66a8b7986f6d4ea7fcbbf76b145 Mon Sep 17 00:00:00 2001 From: takatost Date: Fri, 20 Sep 2024 19:47:25 +0800 Subject: [PATCH 001/235] fix(workflow): "Max submit count reached" error occurred when executing workflow as tool in iteration (#8595) --- api/core/workflow/graph_engine/graph_engine.py | 10 +++++++--- api/core/workflow/nodes/iteration/iteration_node.py | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 57e4f716fdda4..8342dbd13d2a5 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -180,16 +180,20 @@ def run(self) -> Generator[GraphEngineEvent, None, None]: # trigger graph run success event yield GraphRunSucceededEvent(outputs=self.graph_runtime_state.outputs) + self._release_thread() except GraphRunFailedError as e: yield GraphRunFailedEvent(error=e.error) + self._release_thread() return except Exception as e: logger.exception("Unknown Error when graph running") yield GraphRunFailedEvent(error=str(e)) + self._release_thread() raise e - finally: - if self.is_main_thread_pool and self.thread_pool_id in GraphEngine.workflow_thread_pool_mapping: - del GraphEngine.workflow_thread_pool_mapping[self.thread_pool_id] + + def _release_thread(self): + if self.is_main_thread_pool and self.thread_pool_id in GraphEngine.workflow_thread_pool_mapping: + del GraphEngine.workflow_thread_pool_mapping[self.thread_pool_id] def _run( self, diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index 6f20745dafc8d..01bb4e9076e5a 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -89,6 +89,7 @@ def _run(self) -> Generator[RunEvent | InNodeEvent, None, None]: variable_pool=variable_pool, max_execution_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_execution_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME, + thread_pool_id=self.thread_pool_id, ) start_at = datetime.now(timezone.utc).replace(tzinfo=None) From d63a5a1c3ce526df43984899b6c2234a4d2aaf3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Sat, 21 Sep 2024 17:30:30 +0800 Subject: [PATCH 002/235] fix: a helper link error (#8508) --- .../app/configuration/dataset-config/settings-modal/index.tsx | 2 +- web/app/components/datasets/create/step-two/index.tsx | 2 +- .../components/datasets/hit-testing/modify-retrieval-modal.tsx | 2 +- web/app/components/datasets/settings/form/index.tsx | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx b/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx index 65858ce8cfd36..e538c347d95c9 100644 --- a/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx +++ b/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx @@ -263,7 +263,7 @@ const SettingsModal: FC = ({
{t('datasetSettings.form.retrievalSetting.title')}
- {t('datasetSettings.form.retrievalSetting.learnMore')} + {t('datasetSettings.form.retrievalSetting.learnMore')} {t('datasetSettings.form.retrievalSetting.description')}
diff --git a/web/app/components/datasets/create/step-two/index.tsx b/web/app/components/datasets/create/step-two/index.tsx index 94614918dbe40..f4fc58ee2a2d8 100644 --- a/web/app/components/datasets/create/step-two/index.tsx +++ b/web/app/components/datasets/create/step-two/index.tsx @@ -820,7 +820,7 @@ const StepTwo = ({
{t('datasetSettings.form.retrievalSetting.title')}
- {t('datasetSettings.form.retrievalSetting.learnMore')} + {t('datasetSettings.form.retrievalSetting.learnMore')} {t('datasetSettings.form.retrievalSetting.longDescription')}
diff --git a/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx b/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx index 999f1cdf0d2b0..1fc5b68d671eb 100644 --- a/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx +++ b/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx @@ -77,7 +77,7 @@ const ModifyRetrievalModal: FC = ({
{t('datasetSettings.form.retrievalSetting.title')}
diff --git a/web/app/components/datasets/settings/form/index.tsx b/web/app/components/datasets/settings/form/index.tsx index 0f6bdd0a59e0d..15b8abc242e1a 100644 --- a/web/app/components/datasets/settings/form/index.tsx +++ b/web/app/components/datasets/settings/form/index.tsx @@ -245,7 +245,7 @@ const Form = () => {
{t('datasetSettings.form.retrievalSetting.title')}
From 483ead55d5d0e337f2b82a8dab2cdf501f390caa Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 21 Sep 2024 17:30:43 +0800 Subject: [PATCH 003/235] chore: translate i18n files (#8557) Co-authored-by: iamjoel <2120155+iamjoel@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/i18n/de-DE/dataset-creation.ts | 1 + web/i18n/es-ES/dataset-creation.ts | 1 + web/i18n/fa-IR/dataset-creation.ts | 1 + web/i18n/fr-FR/dataset-creation.ts | 1 + web/i18n/hi-IN/dataset-creation.ts | 1 + web/i18n/it-IT/dataset-creation.ts | 1 + web/i18n/ja-JP/dataset-creation.ts | 1 + web/i18n/ko-KR/dataset-creation.ts | 1 + web/i18n/pl-PL/dataset-creation.ts | 1 + web/i18n/pt-BR/dataset-creation.ts | 1 + web/i18n/ro-RO/dataset-creation.ts | 1 + web/i18n/ru-RU/dataset-creation.ts | 1 + web/i18n/tr-TR/dataset-creation.ts | 1 + web/i18n/uk-UA/dataset-creation.ts | 1 + web/i18n/vi-VN/dataset-creation.ts | 1 + web/i18n/zh-Hant/dataset-creation.ts | 1 + 16 files changed, 16 insertions(+) diff --git a/web/i18n/de-DE/dataset-creation.ts b/web/i18n/de-DE/dataset-creation.ts index 8b27395049e12..5251cc94e846a 100644 --- a/web/i18n/de-DE/dataset-creation.ts +++ b/web/i18n/de-DE/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: 'Wissenseinstellungen.', websiteSource: 'Preprocess-Website', webpageUnit: 'Seiten', + separatorTip: 'Ein Trennzeichen ist das Zeichen, das zum Trennen von Text verwendet wird. \\n\\n und \\n sind häufig verwendete Trennzeichen zum Trennen von Absätzen und Zeilen. In Kombination mit Kommas (\\n\\n,\\n) werden Absätze nach Zeilen segmentiert, wenn die maximale Blocklänge überschritten wird. Sie können auch spezielle, von Ihnen selbst definierte Trennzeichen verwenden (z. B. ***).', }, stepThree: { creationTitle: '🎉 Wissen erstellt', diff --git a/web/i18n/es-ES/dataset-creation.ts b/web/i18n/es-ES/dataset-creation.ts index 132c9cbb9b849..e093632bc4fc2 100644 --- a/web/i18n/es-ES/dataset-creation.ts +++ b/web/i18n/es-ES/dataset-creation.ts @@ -138,6 +138,7 @@ const translation = { indexSettingTip: 'Para cambiar el método de índice, por favor ve a la ', retrievalSettingTip: 'Para cambiar el método de índice, por favor ve a la ', datasetSettingLink: 'configuración del conocimiento.', + separatorTip: 'Un delimitador es el carácter que se utiliza para separar el texto. \\n\\n y \\n son delimitadores comúnmente utilizados para separar párrafos y líneas. Combinado con comas (\\n\\n,\\n), los párrafos se segmentarán por líneas cuando excedan la longitud máxima del fragmento. También puede utilizar delimitadores especiales definidos por usted mismo (por ejemplo, ***).', }, stepThree: { creationTitle: '🎉 Conocimiento creado', diff --git a/web/i18n/fa-IR/dataset-creation.ts b/web/i18n/fa-IR/dataset-creation.ts index e6e6ad5bfb4d4..40dae4b02d826 100644 --- a/web/i18n/fa-IR/dataset-creation.ts +++ b/web/i18n/fa-IR/dataset-creation.ts @@ -138,6 +138,7 @@ const translation = { indexSettingTip: 'برای تغییر روش شاخص، لطفاً به', retrievalSettingTip: 'برای تغییر روش شاخص، لطفاً به', datasetSettingLink: 'تنظیمات دانش بروید.', + separatorTip: 'جداکننده نویسه ای است که برای جداسازی متن استفاده می شود. \\n\\n و \\n معمولا برای جداسازی پاراگراف ها و خطوط استفاده می شوند. همراه با کاما (\\n\\n,\\n)، پاراگراف ها زمانی که از حداکثر طول تکه فراتر می روند، با خطوط تقسیم بندی می شوند. همچنین می توانید از جداکننده های خاصی که توسط خودتان تعریف شده اند استفاده کنید (مثلا ***).', }, stepThree: { creationTitle: ' دانش ایجاد شد', diff --git a/web/i18n/fr-FR/dataset-creation.ts b/web/i18n/fr-FR/dataset-creation.ts index c08a3e5731bb3..38066885d0c6e 100644 --- a/web/i18n/fr-FR/dataset-creation.ts +++ b/web/i18n/fr-FR/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: 'Paramètres de connaissance.', webpageUnit: 'Pages', websiteSource: 'Site web de prétraitement', + separatorTip: 'Un délimiteur est le caractère utilisé pour séparer le texte. \\n\\n et \\n sont des délimiteurs couramment utilisés pour séparer les paragraphes et les lignes. Combiné à des virgules (\\n\\n,\\n), les paragraphes seront segmentés par des lignes lorsqu’ils dépasseront la longueur maximale des morceaux. Vous pouvez également utiliser des délimiteurs spéciaux définis par vous-même (par exemple ***).', }, stepThree: { creationTitle: '🎉 Connaissance créée', diff --git a/web/i18n/hi-IN/dataset-creation.ts b/web/i18n/hi-IN/dataset-creation.ts index 0fa71acf4aa32..19c396081a37e 100644 --- a/web/i18n/hi-IN/dataset-creation.ts +++ b/web/i18n/hi-IN/dataset-creation.ts @@ -155,6 +155,7 @@ const translation = { indexSettingTip: 'इंडेक्स विधि बदलने के लिए, कृपया जाएं ', retrievalSettingTip: 'इंडेक्स विधि बदलने के लिए, कृपया जाएं ', datasetSettingLink: 'ज्ञान सेटिंग्स।', + separatorTip: 'एक सीमांकक पाठ को अलग करने के लिए उपयोग किया जाने वाला वर्ण है। \\n\\n और \\n आमतौर पर पैराग्राफ और लाइनों को अलग करने के लिए उपयोग किए जाने वाले सीमांकक हैं। अल्पविराम (\\n\\n,\\n) के साथ संयुक्त, अधिकतम खंड लंबाई से अधिक होने पर अनुच्छेदों को पंक्तियों द्वारा खंडित किया जाएगा। आप स्वयं द्वारा परिभाषित विशेष सीमांकक का भी उपयोग कर सकते हैं (उदा. ***).', }, stepThree: { creationTitle: '🎉 ज्ञान बनाया गया', diff --git a/web/i18n/it-IT/dataset-creation.ts b/web/i18n/it-IT/dataset-creation.ts index 1629776bf3c09..46889b80a60c8 100644 --- a/web/i18n/it-IT/dataset-creation.ts +++ b/web/i18n/it-IT/dataset-creation.ts @@ -158,6 +158,7 @@ const translation = { indexSettingTip: 'Per cambiare il metodo di indicizzazione, vai alle ', retrievalSettingTip: 'Per cambiare il metodo di indicizzazione, vai alle ', datasetSettingLink: 'impostazioni della Conoscenza.', + separatorTip: 'Un delimitatore è il carattere utilizzato per separare il testo. \\n\\n e \\n sono delimitatori comunemente usati per separare paragrafi e righe. In combinazione con le virgole (\\n\\n,\\n), i paragrafi verranno segmentati per righe quando superano la lunghezza massima del blocco. È inoltre possibile utilizzare delimitatori speciali definiti dall\'utente (ad es. ***).', }, stepThree: { creationTitle: '🎉 Conoscenza creata', diff --git a/web/i18n/ja-JP/dataset-creation.ts b/web/i18n/ja-JP/dataset-creation.ts index e6d204840a50e..d11a0c94e9bba 100644 --- a/web/i18n/ja-JP/dataset-creation.ts +++ b/web/i18n/ja-JP/dataset-creation.ts @@ -138,6 +138,7 @@ const translation = { indexSettingTip: 'インデックス方法を変更するには、', retrievalSettingTip: '検索方法を変更するには、', datasetSettingLink: 'ナレッジ設定', + separatorTip: '区切り文字は、テキストを区切るために使用される文字です。\\n\\n と \\n は、段落と行を区切るために一般的に使用される区切り記号です。カンマ (\\n\\n,\\n) と組み合わせると、最大チャンク長を超えると、段落は行で区切られます。自分で定義した特別な区切り文字を使用することもできます(例:***)。', }, stepThree: { creationTitle: '🎉 ナレッジが作成されました', diff --git a/web/i18n/ko-KR/dataset-creation.ts b/web/i18n/ko-KR/dataset-creation.ts index e8851acd2fd51..6a4126d8dbe52 100644 --- a/web/i18n/ko-KR/dataset-creation.ts +++ b/web/i18n/ko-KR/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: '지식 설정', webpageUnit: '페이지', websiteSource: '웹 사이트 전처리', + separatorTip: '구분 기호는 텍스트를 구분하는 데 사용되는 문자입니다. \\n\\n 및 \\n은 단락과 줄을 구분하는 데 일반적으로 사용되는 구분 기호입니다. 쉼표(\\n\\n,\\n)와 함께 사용하면 최대 청크 길이를 초과할 경우 단락이 줄로 분할됩니다. 직접 정의한 특수 구분 기호(예: ***)를 사용할 수도 있습니다.', }, stepThree: { creationTitle: '🎉 지식이 생성되었습니다', diff --git a/web/i18n/pl-PL/dataset-creation.ts b/web/i18n/pl-PL/dataset-creation.ts index 64e50c6b33af0..f5b36e62eec26 100644 --- a/web/i18n/pl-PL/dataset-creation.ts +++ b/web/i18n/pl-PL/dataset-creation.ts @@ -146,6 +146,7 @@ const translation = { datasetSettingLink: 'ustawień Wiedzy.', webpageUnit: 'Stron', websiteSource: 'Witryna internetowa przetwarzania wstępnego', + separatorTip: 'Ogranicznik to znak używany do oddzielania tekstu. \\n\\n i \\n są powszechnie używanymi ogranicznikami do oddzielania akapitów i wierszy. W połączeniu z przecinkami (\\n\\n,\\n), akapity będą segmentowane wierszami po przekroczeniu maksymalnej długości fragmentu. Możesz również skorzystać ze zdefiniowanych przez siebie specjalnych ograniczników (np. ***).', }, stepThree: { creationTitle: '🎉 Utworzono Wiedzę', diff --git a/web/i18n/pt-BR/dataset-creation.ts b/web/i18n/pt-BR/dataset-creation.ts index 4ab78a50c7706..511f0d5bcbd37 100644 --- a/web/i18n/pt-BR/dataset-creation.ts +++ b/web/i18n/pt-BR/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: 'configurações do Conhecimento.', websiteSource: 'Site de pré-processamento', webpageUnit: 'Páginas', + separatorTip: 'Um delimitador é o caractere usado para separar o texto. \\n\\n e \\n são delimitadores comumente usados para separar parágrafos e linhas. Combinado com vírgulas (\\n\\n,\\n), os parágrafos serão segmentados por linhas ao exceder o comprimento máximo do bloco. Você também pode usar delimitadores especiais definidos por você (por exemplo, ***).', }, stepThree: { creationTitle: '🎉 Conhecimento criado', diff --git a/web/i18n/ro-RO/dataset-creation.ts b/web/i18n/ro-RO/dataset-creation.ts index efe3bb246cc58..4ea0b0475827d 100644 --- a/web/i18n/ro-RO/dataset-creation.ts +++ b/web/i18n/ro-RO/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: 'setările Cunoștinței.', webpageUnit: 'Pagini', websiteSource: 'Site-ul web de preprocesare', + separatorTip: 'Un delimitator este caracterul folosit pentru a separa textul. \\n\\n și \\n sunt delimitatori utilizați în mod obișnuit pentru separarea paragrafelor și liniilor. Combinate cu virgule (\\n\\n,\\n), paragrafele vor fi segmentate pe linii atunci când depășesc lungimea maximă a bucății. De asemenea, puteți utiliza delimitatori speciali definiți de dumneavoastră (de exemplu, ***).', }, stepThree: { creationTitle: '🎉 Cunoștință creată', diff --git a/web/i18n/ru-RU/dataset-creation.ts b/web/i18n/ru-RU/dataset-creation.ts index c4dce774d818d..c97daeeeceac7 100644 --- a/web/i18n/ru-RU/dataset-creation.ts +++ b/web/i18n/ru-RU/dataset-creation.ts @@ -138,6 +138,7 @@ const translation = { indexSettingTip: 'Чтобы изменить метод индексации, пожалуйста, перейдите в ', retrievalSettingTip: 'Чтобы изменить метод индексации, пожалуйста, перейдите в ', datasetSettingLink: 'настройки базы знаний.', + separatorTip: 'Разделитель — это символ, используемый для разделения текста. \\n\\n и \\n — это часто используемые разделители для разделения абзацев и строк. В сочетании с запятыми (\\n\\n,\\n) абзацы будут сегментированы по строкам, если максимальная длина блока превышает их. Вы также можете использовать специальные разделители, определенные вами (например, ***).', }, stepThree: { creationTitle: '🎉 База знаний создана', diff --git a/web/i18n/tr-TR/dataset-creation.ts b/web/i18n/tr-TR/dataset-creation.ts index b26608c39ffeb..c29e3045b8cf1 100644 --- a/web/i18n/tr-TR/dataset-creation.ts +++ b/web/i18n/tr-TR/dataset-creation.ts @@ -138,6 +138,7 @@ const translation = { indexSettingTip: 'Dizin yöntemini değiştirmek için, lütfen', retrievalSettingTip: 'Dizin yöntemini değiştirmek için, lütfen', datasetSettingLink: 'Bilgi ayarlarına gidin.', + separatorTip: 'Sınırlayıcı, metni ayırmak için kullanılan karakterdir. \\n\\n ve \\n, paragrafları ve satırları ayırmak için yaygın olarak kullanılan sınırlayıcılardır. Virgüllerle (\\n\\n,\\n) birleştirildiğinde, paragraflar maksimum öbek uzunluğunu aştığında satırlarla bölünür. Kendiniz tarafından tanımlanan özel sınırlayıcıları da kullanabilirsiniz (örn.', }, stepThree: { creationTitle: '🎉 Bilgi oluşturuldu', diff --git a/web/i18n/uk-UA/dataset-creation.ts b/web/i18n/uk-UA/dataset-creation.ts index e4a38f41f4664..5b2c9503cf1c4 100644 --- a/web/i18n/uk-UA/dataset-creation.ts +++ b/web/i18n/uk-UA/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: 'Налаштування знань.', webpageUnit: 'Сторінок', websiteSource: 'Веб-сайт попередньої обробки', + separatorTip: 'Роздільник – це символ, який використовується для поділу тексту. \\n\\n та \\n є часто використовуваними роздільниками для відокремлення абзаців та рядків. У поєднанні з комами (\\n\\n,\\n) абзаци будуть розділені лініями, якщо вони перевищують максимальну довжину фрагмента. Ви також можете використовувати спеціальні роздільники, визначені вами (наприклад, ***).', }, stepThree: { creationTitle: '🎉 Знання створено', diff --git a/web/i18n/vi-VN/dataset-creation.ts b/web/i18n/vi-VN/dataset-creation.ts index da69020287334..af49575b906c4 100644 --- a/web/i18n/vi-VN/dataset-creation.ts +++ b/web/i18n/vi-VN/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: 'cài đặt Kiến thức.', websiteSource: 'Trang web tiền xử lý', webpageUnit: 'Trang', + separatorTip: 'Dấu phân cách là ký tự được sử dụng để phân tách văn bản. \\n\\n và \\n là dấu phân cách thường được sử dụng để tách các đoạn văn và dòng. Kết hợp với dấu phẩy (\\n\\n,\\n), các đoạn văn sẽ được phân đoạn theo các dòng khi vượt quá độ dài đoạn tối đa. Bạn cũng có thể sử dụng dấu phân cách đặc biệt do chính bạn xác định (ví dụ: ***).', }, stepThree: { creationTitle: '🎉 Kiến thức đã được tạo', diff --git a/web/i18n/zh-Hant/dataset-creation.ts b/web/i18n/zh-Hant/dataset-creation.ts index fd810d41c16bb..73a57db6a06c4 100644 --- a/web/i18n/zh-Hant/dataset-creation.ts +++ b/web/i18n/zh-Hant/dataset-creation.ts @@ -133,6 +133,7 @@ const translation = { datasetSettingLink: '知識庫設定。', websiteSource: '預處理網站', webpageUnit: '頁面', + separatorTip: '分隔符是用於分隔文字的字元。\\n\\n 和 \\n 是分隔段落和行的常用分隔符。與逗號 (\\n\\n,\\n) 組合使用時,當超過最大區塊長度時,段落將按行分段。您也可以使用自定義的特殊分隔符(例如 ***)。', }, stepThree: { creationTitle: '🎉 知識庫已建立', From e75c33a56166c0825faa1d07d955086a4358d125 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=B9=E7=A8=8B?= <1787003204@qq.com> Date: Sat, 21 Sep 2024 17:30:58 +0800 Subject: [PATCH 004/235] Enhance Readme Documentation to Clarify the Importance of Celery Service (#8558) --- api/README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/api/README.md b/api/README.md index 70ca2e86a85a7..bab33f9293a31 100644 --- a/api/README.md +++ b/api/README.md @@ -65,14 +65,12 @@ 8. Start Dify [web](../web) service. 9. Setup your application by visiting `http://localhost:3000`... -10. If you need to debug local async processing, please start the worker service. +10. If you need to handle and debug the async tasks (e.g. dataset importing and documents indexing), please start the worker service. ```bash poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion ``` - The started celery app handles the async tasks, e.g. dataset importing and documents indexing. - ## Testing 1. Install dependencies for both the backend and the test environment From b3cb97f0adff4b670d95bc5146d15c12382e1ead Mon Sep 17 00:00:00 2001 From: WalterMitty Date: Sat, 21 Sep 2024 17:31:49 +0800 Subject: [PATCH 005/235] docs: Update ssrf_proxy related doc link in docker-compose file (#8516) --- docker-legacy/docker-compose.middleware.yaml | 2 +- docker-legacy/docker-compose.yaml | 2 +- docker/docker-compose.middleware.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-legacy/docker-compose.middleware.yaml b/docker-legacy/docker-compose.middleware.yaml index fadbb3e6084c2..da54fe33fdd1a 100644 --- a/docker-legacy/docker-compose.middleware.yaml +++ b/docker-legacy/docker-compose.middleware.yaml @@ -73,7 +73,7 @@ services: # ssrf_proxy server # for more information, please refer to - # https://docs.dify.ai/learn-more/faq/self-host-faq#id-18.-why-is-ssrf_proxy-needed + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed ssrf_proxy: image: ubuntu/squid:latest restart: always diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 513915a1f1591..1636bb6a214bd 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -500,7 +500,7 @@ services: # ssrf_proxy server # for more information, please refer to - # https://docs.dify.ai/learn-more/faq/self-host-faq#id-18.-why-is-ssrf_proxy-needed + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed ssrf_proxy: image: ubuntu/squid:latest restart: always diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index 251c62fee103c..d7900def73f1e 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -63,7 +63,7 @@ services: # ssrf_proxy server # for more information, please refer to - # https://docs.dify.ai/learn-more/faq/self-host-faq#id-18.-why-is-ssrf_proxy-needed + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed ssrf_proxy: image: ubuntu/squid:latest restart: always From 5541248264fdf69392917a25924786a1b834e91e Mon Sep 17 00:00:00 2001 From: Hongbin <61153998+BingGeX@users.noreply.github.com> Date: Sat, 21 Sep 2024 17:33:15 +0800 Subject: [PATCH 006/235] =?UTF-8?q?Update=20the=20PerfXCloud=20provider=20?= =?UTF-8?q?model=20list=EF=BC=8CUpdate=20PerfXCloudProvider=20validate=5Fp?= =?UTF-8?q?rovider=5Fcredentials=20method.=20(#8587)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: xhb <466010723@qq.com> --- .../perfxcloud/llm/Llama3-Chinese_v2.yaml | 1 + .../Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml | 1 + .../llm/Meta-Llama-3-8B-Instruct.yaml | 1 + ...Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml | 1 + .../llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml | 1 + .../perfxcloud/llm/Qwen1.5-7B.yaml | 1 + .../llm/Qwen2-72B-Instruct-AWQ-int4.yaml | 61 ++++++++++++++++++ .../llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml | 1 + .../perfxcloud/llm/Qwen2-7B-Instruct.yaml | 63 +++++++++++++++++++ .../perfxcloud/llm/Qwen2-7B.yaml | 1 + .../perfxcloud/llm/Qwen2.5-72B-Instruct.yaml | 61 ++++++++++++++++++ .../perfxcloud/llm/Qwen2.5-7B-Instruct.yaml | 61 ++++++++++++++++++ .../llm/Reflection-Llama-3.1-70B.yaml | 61 ++++++++++++++++++ .../perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml | 61 ++++++++++++++++++ .../perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml | 61 ++++++++++++++++++ .../perfxcloud/llm/Yi-Coder-9B-Chat.yaml | 61 ++++++++++++++++++ .../perfxcloud/llm/_position.yaml | 19 ++++-- .../perfxcloud/llm/chatglm3-6b.yaml | 1 + .../perfxcloud/llm/deepseek-v2-chat.yaml | 1 + .../perfxcloud/llm/deepseek-v2-lite-chat.yaml | 1 + .../model_providers/perfxcloud/perfxcloud.py | 20 +----- .../text_embedding/gte-Qwen2-7B-instruct.yaml | 4 ++ 22 files changed, 520 insertions(+), 24 deletions(-) create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml create mode 100644 api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml index 87712874b9f5c..bf91468fcf56b 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml index f16f3de60b8d3..781b837e8ecee 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml index 21267c240bc0e..67210e9020fe6 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml index 80c7ec40f22cb..482632ff0673d 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml index 841dd97f353b0..ddb6fd977c5a5 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml index 33d5d12b223d2..024c79dbcfd76 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml new file mode 100644 index 0000000000000..94f661f40d5c9 --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml @@ -0,0 +1,61 @@ +model: Qwen2-72B-Instruct-AWQ-int4 +label: + en_US: Qwen2-72B-Instruct-AWQ-int4 +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml index 62255cc7d2e56..a06f8d5ab18f4 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml @@ -61,3 +61,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml new file mode 100644 index 0000000000000..4369411399e72 --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml @@ -0,0 +1,63 @@ +model: Qwen2-7B-Instruct +label: + en_US: Qwen2-7B-Instruct +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call +model_properties: + mode: completion + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 2000 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml index 2f3f1f0225d71..d549ecd227dfb 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml @@ -61,3 +61,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml new file mode 100644 index 0000000000000..15cbf01f1f66d --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml @@ -0,0 +1,61 @@ +model: Qwen2.5-72B-Instruct +label: + en_US: Qwen2.5-72B-Instruct +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 30720 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml new file mode 100644 index 0000000000000..dadc8f8f3275e --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml @@ -0,0 +1,61 @@ +model: Qwen2.5-7B-Instruct +label: + en_US: Qwen2.5-7B-Instruct +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml new file mode 100644 index 0000000000000..649be20b48abe --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml @@ -0,0 +1,61 @@ +model: Reflection-Llama-3.1-70B +label: + en_US: Reflection-Llama-3.1-70B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 10240 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml new file mode 100644 index 0000000000000..92eae6804f61f --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml @@ -0,0 +1,61 @@ +model: Yi-1_5-9B-Chat-16K +label: + en_US: Yi-1_5-9B-Chat-16K +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 16384 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml new file mode 100644 index 0000000000000..0e21ce148c39b --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml @@ -0,0 +1,61 @@ +model: Yi-Coder-1.5B-Chat +label: + en_US: Yi-Coder-1.5B-Chat +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 20480 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml new file mode 100644 index 0000000000000..23b0841ce4ed6 --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml @@ -0,0 +1,61 @@ +model: Yi-Coder-9B-Chat +label: + en_US: Yi-Coder-9B-Chat +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 20480 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.5 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 600 + min: 1 + max: 1248 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. + - name: top_p + use_template: top_p + type: float + default: 0.8 + min: 0.1 + max: 0.9 + help: + zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 + en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. + - name: top_k + type: int + min: 0 + max: 99 + label: + zh_Hans: 取样数量 + en_US: Top k + help: + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. +pricing: + input: "0.000" + output: "0.000" + unit: "0.000" + currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml index 2c9eac0e49a4d..37bf400f1e347 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml @@ -1,15 +1,24 @@ -- Meta-Llama-3.1-405B-Instruct-AWQ-INT4 +- Qwen2.5-72B-Instruct +- Qwen2.5-7B-Instruct +- Yi-Coder-1.5B-Chat +- Yi-Coder-9B-Chat +- Qwen2-72B-Instruct-AWQ-int4 +- Yi-1_5-9B-Chat-16K +- Qwen2-7B-Instruct +- Reflection-Llama-3.1-70B +- Qwen2-72B-Instruct - Meta-Llama-3.1-8B-Instruct + +- Meta-Llama-3.1-405B-Instruct-AWQ-INT4 - Meta-Llama-3-70B-Instruct-GPTQ-Int4 +- chatglm3-6b - Meta-Llama-3-8B-Instruct +- Llama3-Chinese_v2 +- deepseek-v2-lite-chat - Qwen2-72B-Instruct-GPTQ-Int4 -- Qwen2-72B-Instruct - Qwen2-7B - Qwen-14B-Chat-Int4 - Qwen1.5-72B-Chat-GPTQ-Int4 - Qwen1.5-7B - Qwen1.5-110B-Chat-GPTQ-Int4 - deepseek-v2-chat -- deepseek-v2-lite-chat -- Llama3-Chinese_v2 -- chatglm3-6b diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml index f9c26b7f9002c..75d80f784a71f 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml index 078922ef95b08..fa9a7b7175e9d 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml index 4ff3af7b51776..75a26d25051ae 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml +++ b/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml @@ -59,3 +59,4 @@ pricing: output: "0.000" unit: "0.000" currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py b/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py index 450d22fb75943..9a4ead031d018 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py +++ b/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py @@ -1,7 +1,5 @@ import logging -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.model_provider import ModelProvider logger = logging.getLogger(__name__) @@ -9,20 +7,4 @@ class PerfXCloudProvider(ModelProvider): def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `Qwen2_72B_Chat_GPTQ_Int4` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="Qwen2-72B-Instruct-GPTQ-Int4", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex + pass diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml new file mode 100644 index 0000000000000..03db0d8bce850 --- /dev/null +++ b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml @@ -0,0 +1,4 @@ +model: gte-Qwen2-7B-instruct +model_type: text-embedding +model_properties: + context_size: 2048 From 5ddb601e4345a5099668618d5e0e7e6c3dd56c79 Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Sat, 21 Sep 2024 18:08:07 +0800 Subject: [PATCH 007/235] add MixtralAI Model (#8517) --- .../mistralai/llm/_position.yaml | 5 ++ .../mistralai/llm/codestral-latest.yaml | 51 +++++++++++++++++++ .../mistralai/llm/mistral-embed.yaml | 51 +++++++++++++++++++ .../mistralai/llm/open-codestral-mamba.yaml | 51 +++++++++++++++++++ .../mistralai/llm/open-mistral-nemo.yaml | 51 +++++++++++++++++++ .../mistralai/llm/pixtral-12b-2409.yaml | 51 +++++++++++++++++++ 6 files changed, 260 insertions(+) create mode 100644 api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml create mode 100644 api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml create mode 100644 api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml create mode 100644 api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml create mode 100644 api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml diff --git a/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml b/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml index 751003d71e888..bdb06b7fff637 100644 --- a/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml @@ -1,3 +1,8 @@ +- pixtral-12b-2409 +- codestral-latest +- mistral-embed +- open-mistral-nemo +- open-codestral-mamba - open-mistral-7b - open-mixtral-8x7b - open-mixtral-8x22b diff --git a/api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml b/api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml new file mode 100644 index 0000000000000..5f1260233fe97 --- /dev/null +++ b/api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml @@ -0,0 +1,51 @@ +model: codestral-latest +label: + zh_Hans: codestral-latest + en_US: codestral-latest +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + default: 0.7 + min: 0 + max: 1 + - name: top_p + use_template: top_p + default: 1 + min: 0 + max: 1 + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 4096 + - name: safe_prompt + default: false + type: boolean + help: + en_US: Whether to inject a safety prompt before all conversations. + zh_Hans: 是否开启提示词审查 + label: + en_US: SafePrompt + zh_Hans: 提示词审查 + - name: random_seed + type: int + help: + en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. + zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 + label: + en_US: RandomSeed + zh_Hans: 随机数种子 + default: 0 + min: 0 + max: 2147483647 +pricing: + input: '0.008' + output: '0.024' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml b/api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml new file mode 100644 index 0000000000000..d759103d08a94 --- /dev/null +++ b/api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml @@ -0,0 +1,51 @@ +model: mistral-embed +label: + zh_Hans: mistral-embed + en_US: mistral-embed +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + default: 0.7 + min: 0 + max: 1 + - name: top_p + use_template: top_p + default: 1 + min: 0 + max: 1 + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 1024 + - name: safe_prompt + default: false + type: boolean + help: + en_US: Whether to inject a safety prompt before all conversations. + zh_Hans: 是否开启提示词审查 + label: + en_US: SafePrompt + zh_Hans: 提示词审查 + - name: random_seed + type: int + help: + en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. + zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 + label: + en_US: RandomSeed + zh_Hans: 随机数种子 + default: 0 + min: 0 + max: 2147483647 +pricing: + input: '0.008' + output: '0.024' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml new file mode 100644 index 0000000000000..d7ffb9ea02084 --- /dev/null +++ b/api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml @@ -0,0 +1,51 @@ +model: open-codestral-mamba +label: + zh_Hans: open-codestral-mamba + en_US: open-codestral-mamba +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 256000 +parameter_rules: + - name: temperature + use_template: temperature + default: 0.7 + min: 0 + max: 1 + - name: top_p + use_template: top_p + default: 1 + min: 0 + max: 1 + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 16384 + - name: safe_prompt + default: false + type: boolean + help: + en_US: Whether to inject a safety prompt before all conversations. + zh_Hans: 是否开启提示词审查 + label: + en_US: SafePrompt + zh_Hans: 提示词审查 + - name: random_seed + type: int + help: + en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. + zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 + label: + en_US: RandomSeed + zh_Hans: 随机数种子 + default: 0 + min: 0 + max: 2147483647 +pricing: + input: '0.008' + output: '0.024' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml new file mode 100644 index 0000000000000..dcda4fbce7e82 --- /dev/null +++ b/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml @@ -0,0 +1,51 @@ +model: open-mistral-nemo +label: + zh_Hans: open-mistral-nemo + en_US: open-mistral-nemo +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + default: 0.7 + min: 0 + max: 1 + - name: top_p + use_template: top_p + default: 1 + min: 0 + max: 1 + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 8192 + - name: safe_prompt + default: false + type: boolean + help: + en_US: Whether to inject a safety prompt before all conversations. + zh_Hans: 是否开启提示词审查 + label: + en_US: SafePrompt + zh_Hans: 提示词审查 + - name: random_seed + type: int + help: + en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. + zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 + label: + en_US: RandomSeed + zh_Hans: 随机数种子 + default: 0 + min: 0 + max: 2147483647 +pricing: + input: '0.008' + output: '0.024' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml b/api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml new file mode 100644 index 0000000000000..0b002b49cac8e --- /dev/null +++ b/api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml @@ -0,0 +1,51 @@ +model: pixtral-12b-2409 +label: + zh_Hans: pixtral-12b-2409 + en_US: pixtral-12b-2409 +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + default: 0.7 + min: 0 + max: 1 + - name: top_p + use_template: top_p + default: 1 + min: 0 + max: 1 + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 8192 + - name: safe_prompt + default: false + type: boolean + help: + en_US: Whether to inject a safety prompt before all conversations. + zh_Hans: 是否开启提示词审查 + label: + en_US: SafePrompt + zh_Hans: 提示词审查 + - name: random_seed + type: int + help: + en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. + zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 + label: + en_US: RandomSeed + zh_Hans: 随机数种子 + default: 0 + min: 0 + max: 2147483647 +pricing: + input: '0.008' + output: '0.024' + unit: '0.001' + currency: USD From 8219f9e0909a1b7ef6b3f887133fc328f62bcff6 Mon Sep 17 00:00:00 2001 From: Nam Vu Date: Sat, 21 Sep 2024 19:49:01 +0700 Subject: [PATCH 008/235] fix: api/core/ops/ops_trace_manager.py (#8501) --- api/core/ops/ops_trace_manager.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 6f17bade9748e..876790f5b8889 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -176,11 +176,18 @@ def get_ops_trace_instance( return None app: App = db.session.query(App).filter(App.id == app_id).first() + + if app is None: + return None + app_ops_trace_config = json.loads(app.tracing) if app.tracing else None - if app_ops_trace_config is not None: - tracing_provider = app_ops_trace_config.get("tracing_provider") - else: + if app_ops_trace_config is None: + return None + + tracing_provider = app_ops_trace_config.get("tracing_provider") + + if tracing_provider is None or tracing_provider not in provider_config_map: return None # decrypt_token From 1a8dcae10e4e0f7411cce3e48171dde1a79db951 Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Sat, 21 Sep 2024 22:52:10 +0800 Subject: [PATCH 009/235] add Qwen custom add model interface (#8565) --- .../model_providers/tongyi/llm/llm.py | 71 ++++++++++++++++++- .../model_providers/tongyi/tongyi.yaml | 18 +++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 1d4eba6668c0c..f90c7f075fe86 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -30,7 +30,15 @@ ToolPromptMessage, UserPromptMessage, ) -from core.model_runtime.entities.model_entities import ModelFeature +from core.model_runtime.entities.model_entities import ( + AIModelEntity, + FetchFrom, + I18nObject, + ModelFeature, + ModelType, + ParameterRule, + ParameterType, +) from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, InvokeBadRequestError, @@ -520,3 +528,64 @@ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]] UnsupportedHTTPMethod, ], } + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: + """ + Architecture for defining customizable models + + :param model: model name + :param credentials: model credentials + :return: AIModelEntity or None + """ + rules = [ + ParameterRule( + name="temperature", + type=ParameterType.FLOAT, + use_template="temperature", + label=I18nObject(zh_Hans="温度", en_US="Temperature"), + ), + ParameterRule( + name="top_p", + type=ParameterType.FLOAT, + use_template="top_p", + label=I18nObject(zh_Hans="Top P", en_US="Top P"), + ), + ParameterRule( + name="top_k", + type=ParameterType.INT, + min=0, + max=99, + label=I18nObject(zh_Hans="top_k", en_US="top_k"), + ), + ParameterRule( + name="max_tokens", + type=ParameterType.INT, + min=1, + max=128000, + default=1024, + label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), + ), + ParameterRule( + name="seed", + type=ParameterType.INT, + default=1234, + label=I18nObject(zh_Hans="随机种子", en_US="Random Seed"), + ), + ParameterRule( + name="repetition_penalty", + type=ParameterType.FLOAT, + default=1.1, + label=I18nObject(zh_Hans="重复惩罚", en_US="Repetition Penalty"), + ), + ] + + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_type=ModelType.LLM, + model_properties={}, + parameter_rules=rules, + ) + + return entity diff --git a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml index de2c289c947c3..fabe6d90e6b20 100644 --- a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml +++ b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml @@ -20,6 +20,7 @@ supported_model_types: - text-embedding configurate_methods: - predefined-model + - customizable-model provider_credential_schema: credential_form_schemas: - variable: dashscope_api_key @@ -30,3 +31,20 @@ provider_credential_schema: placeholder: zh_Hans: 在此输入您的 API Key en_US: Enter your API Key +model_credential_schema: + model: + label: + en_US: Model Name + zh_Hans: 模型名称 + placeholder: + en_US: Enter full model name + zh_Hans: 输入模型全称 + credential_form_schemas: + - variable: dashscope_api_key + required: true + label: + en_US: API Key + type: secret-input + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key From 831c5a93af83058da71f56c2df1d634ee768e001 Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Sat, 21 Sep 2024 22:56:37 +0800 Subject: [PATCH 010/235] refactor(ops): Optimize the iteration for filter_none_values and use logging.error to record logs when an exception occurs (#8461) --- api/core/ops/entities/trace_entity.py | 3 +-- api/core/ops/ops_trace_manager.py | 4 ++-- api/core/ops/utils.py | 7 +++++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index f27a0af6e0de1..db6ce9d9c38ad 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -21,8 +21,7 @@ def ensure_type(cls, v): return None if isinstance(v, str | dict | list): return v - else: - return "" + return "" class WorkflowTraceInfo(BaseTraceInfo): diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 876790f5b8889..0200f4a32df16 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -708,7 +708,7 @@ def add_trace_task(self, trace_task: TraceTask): trace_task.app_id = self.app_id trace_manager_queue.put(trace_task) except Exception as e: - logging.debug(f"Error adding trace task: {e}") + logging.error(f"Error adding trace task: {e}") finally: self.start_timer() @@ -727,7 +727,7 @@ def run(self): if tasks: self.send_to_celery(tasks) except Exception as e: - logging.debug(f"Error processing trace tasks: {e}") + logging.error(f"Error processing trace tasks: {e}") def start_timer(self): global trace_manager_timer diff --git a/api/core/ops/utils.py b/api/core/ops/utils.py index 498685b3426d3..3cd3fb57565a5 100644 --- a/api/core/ops/utils.py +++ b/api/core/ops/utils.py @@ -6,12 +6,15 @@ def filter_none_values(data: dict): + new_data = {} for key, value in data.items(): if value is None: continue if isinstance(value, datetime): - data[key] = value.isoformat() - return {key: value for key, value in data.items() if value is not None} + new_data[key] = value.isoformat() + else: + new_data[key] = value + return new_data def get_message_data(message_id): From b32a7713e024d63d327303b3749d9380fbaa592e Mon Sep 17 00:00:00 2001 From: Joe <79627742+ZhouhaoJiang@users.noreply.github.com> Date: Sat, 21 Sep 2024 23:59:50 +0800 Subject: [PATCH 011/235] feat: update pyproject.toml (#8368) --- api/poetry.lock | 100 ++++++++++++++++++++++++--------------------- api/pyproject.toml | 2 +- 2 files changed, 54 insertions(+), 48 deletions(-) diff --git a/api/poetry.lock b/api/poetry.lock index 28c688cc9cde4..8d4d680031a7c 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -616,13 +616,13 @@ files = [ [[package]] name = "azure-core" -version = "1.30.2" +version = "1.31.0" description = "Microsoft Azure Core Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-core-1.30.2.tar.gz", hash = "sha256:a14dc210efcd608821aa472d9fb8e8d035d29b68993819147bc290a8ac224472"}, - {file = "azure_core-1.30.2-py3-none-any.whl", hash = "sha256:cf019c1ca832e96274ae85abd3d9f752397194d9fea3b41487290562ac8abe4a"}, + {file = "azure_core-1.31.0-py3-none-any.whl", hash = "sha256:22954de3777e0250029360ef31d80448ef1be13b80a459bff80ba7073379e2cd"}, + {file = "azure_core-1.31.0.tar.gz", hash = "sha256:656a0dd61e1869b1506b7c6a3b31d62f15984b1a573d6326f6aa2f3e4123284b"}, ] [package.dependencies] @@ -828,13 +828,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.17" +version = "1.35.19" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.17-py3-none-any.whl", hash = "sha256:a93f773ca93139529b5d36730b382dbee63ab4c7f26129aa5c84835255ca999d"}, - {file = "botocore-1.35.17.tar.gz", hash = "sha256:0d35d03ea647b5d464c7f77bdab6fb23ae5d49752b13cf97ab84444518c7b1bd"}, + {file = "botocore-1.35.19-py3-none-any.whl", hash = "sha256:c83f7f0cacfe7c19b109b363ebfa8736e570d24922f16ed371681f58ebab44a9"}, + {file = "botocore-1.35.19.tar.gz", hash = "sha256:42d6d8db7250cbd7899f786f9861e02cab17dc238f64d6acb976098ed9809625"}, ] [package.dependencies] @@ -2429,13 +2429,13 @@ test = ["pytest (>=6)"] [[package]] name = "fastapi" -version = "0.114.1" +version = "0.114.2" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.114.1-py3-none-any.whl", hash = "sha256:5d4746f6e4b7dff0b4f6b6c6d5445645285f662fe75886e99af7ee2d6b58bb3e"}, - {file = "fastapi-0.114.1.tar.gz", hash = "sha256:1d7bbbeabbaae0acb0c22f0ab0b040f642d3093ca3645f8c876b6f91391861d8"}, + {file = "fastapi-0.114.2-py3-none-any.whl", hash = "sha256:44474a22913057b1acb973ab90f4b671ba5200482e7622816d79105dcece1ac5"}, + {file = "fastapi-0.114.2.tar.gz", hash = "sha256:0adb148b62edb09e8c6eeefa3ea934e8f276dabc038c5a82989ea6346050c3da"}, ] [package.dependencies] @@ -3990,15 +3990,18 @@ files = [ [[package]] name = "idna" -version = "3.8" +version = "3.9" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.9-py3-none-any.whl", hash = "sha256:69297d5da0cc9281c77efffb4e730254dd45943f45bbfb461de5991713989b1e"}, + {file = "idna-3.9.tar.gz", hash = "sha256:e5c5dafde284f26e9e0f28f6ea2d6400abd5ca099864a67f576f3981c6476124"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "importlib-metadata" version = "6.11.0" @@ -4393,13 +4396,13 @@ six = "*" [[package]] name = "langfuse" -version = "2.48.0" +version = "2.48.1" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langfuse-2.48.0-py3-none-any.whl", hash = "sha256:475b047e461f8a45e3c7d81b6a87e0b9e389c489d465b838aa69cbdd16eeacce"}, - {file = "langfuse-2.48.0.tar.gz", hash = "sha256:46e7e6e6e97fe03115a9f95d7f29b3fcd1848a9d1bb34608ebb42a3931919e45"}, + {file = "langfuse-2.48.1-py3-none-any.whl", hash = "sha256:8661070b6d94ba1d7da92c054f3110b6ecf4489d6e8204a4080f934f3f49ebf2"}, + {file = "langfuse-2.48.1.tar.gz", hash = "sha256:b8117d90babec6be1bc3303b42e0b71848531eae44118e6e0123d03e7961d0fc"}, ] [package.dependencies] @@ -4418,13 +4421,13 @@ openai = ["openai (>=0.27.8)"] [[package]] name = "langsmith" -version = "0.1.118" +version = "0.1.120" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.118-py3-none-any.whl", hash = "sha256:f017127b3efb037da5e46ff4f8583e8192e7955191737240c327f3eadc144d7c"}, - {file = "langsmith-0.1.118.tar.gz", hash = "sha256:ff1ca06c92c6081250244ebbce5d0bb347b9d898d2e9b60a13b11f0f0720f09f"}, + {file = "langsmith-0.1.120-py3-none-any.whl", hash = "sha256:54d2785e301646c0988e0a69ebe4d976488c87b41928b358cb153b6ddd8db62b"}, + {file = "langsmith-0.1.120.tar.gz", hash = "sha256:25499ca187b41bd89d784b272b97a8d76f60e0e21bdf20336e8a2aa6a9b23ac9"}, ] [package.dependencies] @@ -6232,13 +6235,13 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.2" +version = "4.3.3" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.3.2-py3-none-any.whl", hash = "sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617"}, - {file = "platformdirs-4.3.2.tar.gz", hash = "sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c"}, + {file = "platformdirs-4.3.3-py3-none-any.whl", hash = "sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5"}, + {file = "platformdirs-4.3.3.tar.gz", hash = "sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0"}, ] [package.extras] @@ -6248,13 +6251,13 @@ type = ["mypy (>=1.11.2)"] [[package]] name = "plotly" -version = "5.24.0" +version = "5.24.1" description = "An open-source, interactive data visualization library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "plotly-5.24.0-py3-none-any.whl", hash = "sha256:0e54efe52c8cef899f7daa41be9ed97dfb6be622613a2a8f56a86a0634b2b67e"}, - {file = "plotly-5.24.0.tar.gz", hash = "sha256:eae9f4f54448682442c92c1e97148e3ad0c52f0cf86306e1b76daba24add554a"}, + {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, + {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, ] [package.dependencies] @@ -7025,15 +7028,18 @@ files = [ [[package]] name = "pyreadline3" -version = "3.4.3" +version = "3.5.2" description = "A python implementation of GNU readline." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "pyreadline3-3.4.3-py3-none-any.whl", hash = "sha256:f832c5898f4f9a0f81d48a8c499b39d0179de1a465ea3def1a7e7231840b4ed6"}, - {file = "pyreadline3-3.4.3.tar.gz", hash = "sha256:ebab0baca37f50e2faa1dd99a6da1c75de60e0d68a3b229c134bbd12786250e2"}, + {file = "pyreadline3-3.5.2-py3-none-any.whl", hash = "sha256:a87d56791e2965b2b187e2ea33dcf664600842c997c0623c95cf8ef07db83de9"}, + {file = "pyreadline3-3.5.2.tar.gz", hash = "sha256:ba82292e52c5a3bb256b291af0c40b457c1e8699cac9a873abbcaac8aef3a1bb"}, ] +[package.extras] +dev = ["build", "flake8", "pytest", "twine"] + [[package]] name = "pytest" version = "8.3.3" @@ -8778,13 +8784,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tencentcloud-sdk-python-common" -version = "3.0.1230" +version = "3.0.1231" description = "Tencent Cloud Common SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-common-3.0.1230.tar.gz", hash = "sha256:1e0f3bab80026fcb0083820869239b3f8cf30beb8e00e12c213bdecc75eb7577"}, - {file = "tencentcloud_sdk_python_common-3.0.1230-py2.py3-none-any.whl", hash = "sha256:03616c79685c154c689536a9c823d52b855cf49eada70679826a92aff5afd596"}, + {file = "tencentcloud-sdk-python-common-3.0.1231.tar.gz", hash = "sha256:22aa281ca2eac511e1615b2953da7c4a0bec87cf93a05a7a15dbb61b23a215ee"}, + {file = "tencentcloud_sdk_python_common-3.0.1231-py2.py3-none-any.whl", hash = "sha256:bd0f7c4df4b156ec35c8731afa1f498043c7e1cd5d2feb595ee441fdb45a061e"}, ] [package.dependencies] @@ -8792,17 +8798,17 @@ requests = ">=2.16.0" [[package]] name = "tencentcloud-sdk-python-hunyuan" -version = "3.0.1230" +version = "3.0.1231" description = "Tencent Cloud Hunyuan SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-hunyuan-3.0.1230.tar.gz", hash = "sha256:900d15cb9dc2217b1282d985898ec7ecf97859351c86c6f7efc74685f08a5f85"}, - {file = "tencentcloud_sdk_python_hunyuan-3.0.1230-py2.py3-none-any.whl", hash = "sha256:604dab0d4d66ea942f23d7980c76b5f0f6af3d68a8374e619331a4dd2910991e"}, + {file = "tencentcloud-sdk-python-hunyuan-3.0.1231.tar.gz", hash = "sha256:6da12f418f14305b3a6b7bb29b6d95bf4038a6b66b81c0e03b8dafc4f6df99ca"}, + {file = "tencentcloud_sdk_python_hunyuan-3.0.1231-py2.py3-none-any.whl", hash = "sha256:21ba28f69c34c15e20900be3f2c06376fcaf7e58265f939833c55631f2348792"}, ] [package.dependencies] -tencentcloud-sdk-python-common = "3.0.1230" +tencentcloud-sdk-python-common = "3.0.1231" [[package]] name = "threadpoolctl" @@ -9205,13 +9211,13 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "types-requests" -version = "2.32.0.20240907" +version = "2.32.0.20240914" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240907.tar.gz", hash = "sha256:ff33935f061b5e81ec87997e91050f7b4af4f82027a7a7a9d9aaea04a963fdf8"}, - {file = "types_requests-2.32.0.20240907-py3-none-any.whl", hash = "sha256:1d1e79faeaf9d42def77f3c304893dea17a97cae98168ac69f3cb465516ee8da"}, + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, ] [package.dependencies] @@ -9454,13 +9460,13 @@ files = [ [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -9614,12 +9620,12 @@ files = [ [[package]] name = "volcengine-python-sdk" -version = "1.0.100" +version = "1.0.101" description = "Volcengine SDK for Python" optional = false python-versions = "*" files = [ - {file = "volcengine-python-sdk-1.0.100.tar.gz", hash = "sha256:cdc194fe3ce51adda6892d2ca1c43edba3300699321dc6c69119c59fc3b28932"}, + {file = "volcengine-python-sdk-1.0.101.tar.gz", hash = "sha256:1b76e71a6dcf3d5be1b2c058e7d281359e6cca2cc920ffe2567d3115beea1d02"}, ] [package.dependencies] @@ -10215,13 +10221,13 @@ requests = "*" [[package]] name = "zipp" -version = "3.20.1" +version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"}, - {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"}, + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, ] [package.extras] @@ -10416,4 +10422,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "9173a56b2efea12804c980511e1465fba43c7a3d83b1ad284ee149851ed67fc5" +content-hash = "18924ae12a00bde4438a46168bc167ed69613ab1ab0c387f193cd47ac24379b2" diff --git a/api/pyproject.toml b/api/pyproject.toml index 8c10f1dad9795..829c39a12bf91 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -168,7 +168,7 @@ gunicorn = "~22.0.0" httpx = { version = "~0.27.0", extras = ["socks"] } huggingface-hub = "~0.16.4" jieba = "0.42.1" -langfuse = "^2.36.1" +langfuse = "^2.48.0" langsmith = "^0.1.77" mailchimp-transactional = "~1.0.50" markdown = "~3.5.1" From 8c51d06222d5ada18bab11c4777b7e7feb644fbd Mon Sep 17 00:00:00 2001 From: Hash Brown Date: Sun, 22 Sep 2024 03:15:11 +0800 Subject: [PATCH 012/235] feat: regenerate in `Chat`, `agent` and `Chatflow` app (#7661) --- api/constants/__init__.py | 1 + api/controllers/console/app/completion.py | 1 + api/controllers/console/app/message.py | 2 - api/controllers/console/app/workflow.py | 2 + api/controllers/console/explore/completion.py | 1 + api/controllers/console/explore/message.py | 2 +- api/controllers/service_api/app/message.py | 1 + api/controllers/web/completion.py | 1 + api/controllers/web/message.py | 3 +- api/core/agent/base_agent_runner.py | 5 +- .../app/apps/advanced_chat/app_generator.py | 1 + api/core/app/apps/agent_chat/app_generator.py | 1 + api/core/app/apps/chat/app_generator.py | 1 + .../app/apps/message_based_app_generator.py | 1 + api/core/app/entities/app_invoke_entities.py | 3 + api/core/memory/token_buffer_memory.py | 21 ++- .../prompt/utils/extract_thread_messages.py | 22 +++ api/fields/conversation_fields.py | 1 + api/fields/message_fields.py | 1 + ...bb251_add_parent_message_id_to_messages.py | 36 +++++ api/models/model.py | 1 + api/services/message_service.py | 4 +- .../prompt/test_extract_thread_messages.py | 91 +++++++++++ .../debug-with-multiple-model/chat-item.tsx | 4 +- .../debug/debug-with-single-model/index.tsx | 26 ++- web/app/components/app/log/list.tsx | 149 ++++++++++-------- .../chat/chat-with-history/chat-wrapper.tsx | 25 ++- .../base/chat/chat-with-history/hooks.tsx | 35 +--- .../base/chat/chat/answer/index.tsx | 3 + .../base/chat/chat/answer/operation.tsx | 13 +- web/app/components/base/chat/chat/context.tsx | 3 + web/app/components/base/chat/chat/hooks.ts | 3 +- web/app/components/base/chat/chat/index.tsx | 5 + web/app/components/base/chat/chat/type.ts | 1 + web/app/components/base/chat/constants.ts | 1 + .../chat/embedded-chatbot/chat-wrapper.tsx | 25 ++- .../base/chat/embedded-chatbot/hooks.tsx | 38 +---- web/app/components/base/chat/types.ts | 4 +- web/app/components/base/chat/utils.ts | 57 ++++++- .../assets/vender/line/general/refresh.svg | 1 + .../src/vender/line/general/Refresh.json | 23 +++ .../icons/src/vender/line/general/Refresh.tsx | 16 ++ .../icons/src/vender/line/general/index.ts | 1 + .../components/base/regenerate-btn/index.tsx | 31 ++++ .../workflow/panel/chat-record/index.tsx | 79 ++++++---- .../panel/debug-and-preview/chat-wrapper.tsx | 26 ++- .../workflow/panel/debug-and-preview/hooks.ts | 2 + web/hooks/use-app-favicon.ts | 6 +- web/i18n/en-US/app-api.ts | 1 + web/i18n/zh-Hans/app-api.ts | 1 + web/models/log.ts | 1 + 51 files changed, 604 insertions(+), 179 deletions(-) create mode 100644 api/core/prompt/utils/extract_thread_messages.py create mode 100644 api/migrations/versions/2024_09_11_1012-d57ba9ebb251_add_parent_message_id_to_messages.py create mode 100644 api/tests/unit_tests/core/prompt/test_extract_thread_messages.py create mode 100644 web/app/components/base/icons/assets/vender/line/general/refresh.svg create mode 100644 web/app/components/base/icons/src/vender/line/general/Refresh.json create mode 100644 web/app/components/base/icons/src/vender/line/general/Refresh.tsx create mode 100644 web/app/components/base/regenerate-btn/index.tsx diff --git a/api/constants/__init__.py b/api/constants/__init__.py index e22c3268ef428..75eaf81638cdf 100644 --- a/api/constants/__init__.py +++ b/api/constants/__init__.py @@ -1 +1,2 @@ HIDDEN_VALUE = "[__HIDDEN__]" +UUID_NIL = "00000000-0000-0000-0000-000000000000" diff --git a/api/controllers/console/app/completion.py b/api/controllers/console/app/completion.py index 53de51c24d798..d3296d3dff44a 100644 --- a/api/controllers/console/app/completion.py +++ b/api/controllers/console/app/completion.py @@ -109,6 +109,7 @@ def post(self, app_model): parser.add_argument("files", type=list, required=False, location="json") parser.add_argument("model_config", type=dict, required=True, location="json") parser.add_argument("conversation_id", type=uuid_value, location="json") + parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json") parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json") parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json") args = parser.parse_args() diff --git a/api/controllers/console/app/message.py b/api/controllers/console/app/message.py index fe06201982374..2fba3e0af02be 100644 --- a/api/controllers/console/app/message.py +++ b/api/controllers/console/app/message.py @@ -105,8 +105,6 @@ def get(self, app_model): if rest_count > 0: has_more = True - history_messages = list(reversed(history_messages)) - return InfiniteScrollPagination(data=history_messages, limit=args["limit"], has_more=has_more) diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py index b488deb89d90c..0a693b84e27ec 100644 --- a/api/controllers/console/app/workflow.py +++ b/api/controllers/console/app/workflow.py @@ -166,6 +166,8 @@ def post(self, app_model: App): parser.add_argument("query", type=str, required=True, location="json", default="") parser.add_argument("files", type=list, location="json") parser.add_argument("conversation_id", type=uuid_value, location="json") + parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json") + args = parser.parse_args() try: diff --git a/api/controllers/console/explore/completion.py b/api/controllers/console/explore/completion.py index f4646920982bd..125bc1af8c41e 100644 --- a/api/controllers/console/explore/completion.py +++ b/api/controllers/console/explore/completion.py @@ -100,6 +100,7 @@ def post(self, installed_app): parser.add_argument("query", type=str, required=True, location="json") parser.add_argument("files", type=list, required=False, location="json") parser.add_argument("conversation_id", type=uuid_value, location="json") + parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json") parser.add_argument("retriever_from", type=str, required=False, default="explore_app", location="json") args = parser.parse_args() diff --git a/api/controllers/console/explore/message.py b/api/controllers/console/explore/message.py index 0e0238556cf9a..3d221ff30a659 100644 --- a/api/controllers/console/explore/message.py +++ b/api/controllers/console/explore/message.py @@ -51,7 +51,7 @@ def get(self, installed_app): try: return MessageService.pagination_by_first_id( - app_model, current_user, args["conversation_id"], args["first_id"], args["limit"] + app_model, current_user, args["conversation_id"], args["first_id"], args["limit"], "desc" ) except services.errors.conversation.ConversationNotExistsError: raise NotFound("Conversation Not Exists.") diff --git a/api/controllers/service_api/app/message.py b/api/controllers/service_api/app/message.py index e54e6f4903d57..a70ee89b5e2bf 100644 --- a/api/controllers/service_api/app/message.py +++ b/api/controllers/service_api/app/message.py @@ -54,6 +54,7 @@ class MessageListApi(Resource): message_fields = { "id": fields.String, "conversation_id": fields.String, + "parent_message_id": fields.String, "inputs": fields.Raw, "query": fields.String, "answer": fields.String(attribute="re_sign_file_url_answer"), diff --git a/api/controllers/web/completion.py b/api/controllers/web/completion.py index 115492b7966c0..45b890dfc4899 100644 --- a/api/controllers/web/completion.py +++ b/api/controllers/web/completion.py @@ -96,6 +96,7 @@ def post(self, app_model, end_user): parser.add_argument("files", type=list, required=False, location="json") parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json") parser.add_argument("conversation_id", type=uuid_value, location="json") + parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json") parser.add_argument("retriever_from", type=str, required=False, default="web_app", location="json") args = parser.parse_args() diff --git a/api/controllers/web/message.py b/api/controllers/web/message.py index 0d4047f4efbaf..2d2a5866c8038 100644 --- a/api/controllers/web/message.py +++ b/api/controllers/web/message.py @@ -57,6 +57,7 @@ class MessageListApi(WebApiResource): message_fields = { "id": fields.String, "conversation_id": fields.String, + "parent_message_id": fields.String, "inputs": fields.Raw, "query": fields.String, "answer": fields.String(attribute="re_sign_file_url_answer"), @@ -89,7 +90,7 @@ def get(self, app_model, end_user): try: return MessageService.pagination_by_first_id( - app_model, end_user, args["conversation_id"], args["first_id"], args["limit"] + app_model, end_user, args["conversation_id"], args["first_id"], args["limit"], "desc" ) except services.errors.conversation.ConversationNotExistsError: raise NotFound("Conversation Not Exists.") diff --git a/api/core/agent/base_agent_runner.py b/api/core/agent/base_agent_runner.py index d09a9956a4a59..5295f97bdbb86 100644 --- a/api/core/agent/base_agent_runner.py +++ b/api/core/agent/base_agent_runner.py @@ -32,6 +32,7 @@ from core.model_runtime.entities.model_entities import ModelFeature from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder +from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.tools.entities.tool_entities import ( ToolParameter, ToolRuntimeVariablePool, @@ -441,10 +442,12 @@ def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[P .filter( Message.conversation_id == self.message.conversation_id, ) - .order_by(Message.created_at.asc()) + .order_by(Message.created_at.desc()) .all() ) + messages = list(reversed(extract_thread_messages(messages))) + for message in messages: if message.id == self.message.id: continue diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 88e1256ed54e4..445ef6d0ab19f 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -121,6 +121,7 @@ def generate( inputs=conversation.inputs if conversation else self._get_cleaned_inputs(inputs, app_config), query=query, files=file_objs, + parent_message_id=args.get("parent_message_id"), user_id=user.id, stream=stream, invoke_from=invoke_from, diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index abf8a332ab27d..99abccf4f9840 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -127,6 +127,7 @@ def generate( inputs=conversation.inputs if conversation else self._get_cleaned_inputs(inputs, app_config), query=query, files=file_objs, + parent_message_id=args.get("parent_message_id"), user_id=user.id, stream=stream, invoke_from=invoke_from, diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 032556ec4c1ce..9ef1366a0f668 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -128,6 +128,7 @@ def generate( inputs=conversation.inputs if conversation else self._get_cleaned_inputs(inputs, app_config), query=query, files=file_objs, + parent_message_id=args.get("parent_message_id"), user_id=user.id, stream=stream, invoke_from=invoke_from, diff --git a/api/core/app/apps/message_based_app_generator.py b/api/core/app/apps/message_based_app_generator.py index c4db95cbd0c4a..65b759acf5376 100644 --- a/api/core/app/apps/message_based_app_generator.py +++ b/api/core/app/apps/message_based_app_generator.py @@ -218,6 +218,7 @@ def _init_generate_records( answer_tokens=0, answer_unit_price=0, answer_price_unit=0, + parent_message_id=getattr(application_generate_entity, "parent_message_id", None), provider_response_latency=0, total_price=0, currency="USD", diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index ab8d4e374e26f..87ca51ef1b1db 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -122,6 +122,7 @@ class ChatAppGenerateEntity(EasyUIBasedAppGenerateEntity): """ conversation_id: Optional[str] = None + parent_message_id: Optional[str] = None class CompletionAppGenerateEntity(EasyUIBasedAppGenerateEntity): @@ -138,6 +139,7 @@ class AgentChatAppGenerateEntity(EasyUIBasedAppGenerateEntity): """ conversation_id: Optional[str] = None + parent_message_id: Optional[str] = None class AdvancedChatAppGenerateEntity(AppGenerateEntity): @@ -149,6 +151,7 @@ class AdvancedChatAppGenerateEntity(AppGenerateEntity): app_config: WorkflowUIBasedAppConfig conversation_id: Optional[str] = None + parent_message_id: Optional[str] = None query: str class SingleIterationRunEntity(BaseModel): diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index d3185c3b11aec..60b36c50f00d3 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -11,6 +11,7 @@ TextPromptMessageContent, UserPromptMessage, ) +from core.prompt.utils.extract_thread_messages import extract_thread_messages from extensions.ext_database import db from models.model import AppMode, Conversation, Message, MessageFile from models.workflow import WorkflowRun @@ -33,8 +34,17 @@ def get_history_prompt_messages( # fetch limited messages, and return reversed query = ( - db.session.query(Message.id, Message.query, Message.answer, Message.created_at, Message.workflow_run_id) - .filter(Message.conversation_id == self.conversation.id, Message.answer != "") + db.session.query( + Message.id, + Message.query, + Message.answer, + Message.created_at, + Message.workflow_run_id, + Message.parent_message_id, + ) + .filter( + Message.conversation_id == self.conversation.id, + ) .order_by(Message.created_at.desc()) ) @@ -45,7 +55,12 @@ def get_history_prompt_messages( messages = query.limit(message_limit).all() - messages = list(reversed(messages)) + # instead of all messages from the conversation, we only need to extract messages + # that belong to the thread of last message + thread_messages = extract_thread_messages(messages) + thread_messages.pop(0) + messages = list(reversed(thread_messages)) + message_file_parser = MessageFileParser(tenant_id=app_record.tenant_id, app_id=app_record.id) prompt_messages = [] for message in messages: diff --git a/api/core/prompt/utils/extract_thread_messages.py b/api/core/prompt/utils/extract_thread_messages.py new file mode 100644 index 0000000000000..e8b626499fce1 --- /dev/null +++ b/api/core/prompt/utils/extract_thread_messages.py @@ -0,0 +1,22 @@ +from constants import UUID_NIL + + +def extract_thread_messages(messages: list[dict]) -> list[dict]: + thread_messages = [] + next_message = None + + for message in messages: + if not message.parent_message_id: + # If the message is regenerated and does not have a parent message, it is the start of a new thread + thread_messages.append(message) + break + + if not next_message: + thread_messages.append(message) + next_message = message.parent_message_id + else: + if next_message in {message.id, UUID_NIL}: + thread_messages.append(message) + next_message = message.parent_message_id + + return thread_messages diff --git a/api/fields/conversation_fields.py b/api/fields/conversation_fields.py index 9207314fc2132..3dcd88d1de31d 100644 --- a/api/fields/conversation_fields.py +++ b/api/fields/conversation_fields.py @@ -75,6 +75,7 @@ def format(self, value): "metadata": fields.Raw(attribute="message_metadata_dict"), "status": fields.String, "error": fields.String, + "parent_message_id": fields.String, } feedback_stat_fields = {"like": fields.Integer, "dislike": fields.Integer} diff --git a/api/fields/message_fields.py b/api/fields/message_fields.py index 3d2df87afb9b1..c938097131f7a 100644 --- a/api/fields/message_fields.py +++ b/api/fields/message_fields.py @@ -62,6 +62,7 @@ message_fields = { "id": fields.String, "conversation_id": fields.String, + "parent_message_id": fields.String, "inputs": fields.Raw, "query": fields.String, "answer": fields.String(attribute="re_sign_file_url_answer"), diff --git a/api/migrations/versions/2024_09_11_1012-d57ba9ebb251_add_parent_message_id_to_messages.py b/api/migrations/versions/2024_09_11_1012-d57ba9ebb251_add_parent_message_id_to_messages.py new file mode 100644 index 0000000000000..fd957eeafb2b6 --- /dev/null +++ b/api/migrations/versions/2024_09_11_1012-d57ba9ebb251_add_parent_message_id_to_messages.py @@ -0,0 +1,36 @@ +"""add parent_message_id to messages + +Revision ID: d57ba9ebb251 +Revises: 675b5321501b +Create Date: 2024-09-11 10:12:45.826265 + +""" +import sqlalchemy as sa +from alembic import op + +import models as models + +# revision identifiers, used by Alembic. +revision = 'd57ba9ebb251' +down_revision = '675b5321501b' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('messages', schema=None) as batch_op: + batch_op.add_column(sa.Column('parent_message_id', models.types.StringUUID(), nullable=True)) + + # Set parent_message_id for existing messages to uuid_nil() to distinguish them from new messages with actual parent IDs or NULLs + op.execute('UPDATE messages SET parent_message_id = uuid_nil() WHERE parent_message_id IS NULL') + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('messages', schema=None) as batch_op: + batch_op.drop_column('parent_message_id') + + # ### end Alembic commands ### diff --git a/api/models/model.py b/api/models/model.py index ae0bc3210b646..53940a5a16cc8 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -710,6 +710,7 @@ class Message(db.Model): answer_tokens = db.Column(db.Integer, nullable=False, server_default=db.text("0")) answer_unit_price = db.Column(db.Numeric(10, 4), nullable=False) answer_price_unit = db.Column(db.Numeric(10, 7), nullable=False, server_default=db.text("0.001")) + parent_message_id = db.Column(StringUUID, nullable=True) provider_response_latency = db.Column(db.Float, nullable=False, server_default=db.text("0")) total_price = db.Column(db.Numeric(10, 7)) currency = db.Column(db.String(255), nullable=False) diff --git a/api/services/message_service.py b/api/services/message_service.py index ecb121c36e4cc..f432a77c80e51 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -34,6 +34,7 @@ def pagination_by_first_id( conversation_id: str, first_id: Optional[str], limit: int, + order: str = "asc", ) -> InfiniteScrollPagination: if not user: return InfiniteScrollPagination(data=[], limit=limit, has_more=False) @@ -91,7 +92,8 @@ def pagination_by_first_id( if rest_count > 0: has_more = True - history_messages = list(reversed(history_messages)) + if order == "asc": + history_messages = list(reversed(history_messages)) return InfiniteScrollPagination(data=history_messages, limit=limit, has_more=has_more) diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py new file mode 100644 index 0000000000000..ba3c1eb5e032a --- /dev/null +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -0,0 +1,91 @@ +from uuid import uuid4 + +from constants import UUID_NIL +from core.prompt.utils.extract_thread_messages import extract_thread_messages + + +class TestMessage: + def __init__(self, id, parent_message_id): + self.id = id + self.parent_message_id = parent_message_id + + def __getitem__(self, item): + return getattr(self, item) + + +def test_extract_thread_messages_single_message(): + messages = [TestMessage(str(uuid4()), UUID_NIL)] + result = extract_thread_messages(messages) + assert len(result) == 1 + assert result[0] == messages[0] + + +def test_extract_thread_messages_linear_thread(): + id1, id2, id3, id4, id5 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) + messages = [ + TestMessage(id5, id4), + TestMessage(id4, id3), + TestMessage(id3, id2), + TestMessage(id2, id1), + TestMessage(id1, UUID_NIL), + ] + result = extract_thread_messages(messages) + assert len(result) == 5 + assert [msg["id"] for msg in result] == [id5, id4, id3, id2, id1] + + +def test_extract_thread_messages_branched_thread(): + id1, id2, id3, id4 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) + messages = [ + TestMessage(id4, id2), + TestMessage(id3, id2), + TestMessage(id2, id1), + TestMessage(id1, UUID_NIL), + ] + result = extract_thread_messages(messages) + assert len(result) == 3 + assert [msg["id"] for msg in result] == [id4, id2, id1] + + +def test_extract_thread_messages_empty_list(): + messages = [] + result = extract_thread_messages(messages) + assert len(result) == 0 + + +def test_extract_thread_messages_partially_loaded(): + id0, id1, id2, id3 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) + messages = [ + TestMessage(id3, id2), + TestMessage(id2, id1), + TestMessage(id1, id0), + ] + result = extract_thread_messages(messages) + assert len(result) == 3 + assert [msg["id"] for msg in result] == [id3, id2, id1] + + +def test_extract_thread_messages_legacy_messages(): + id1, id2, id3 = str(uuid4()), str(uuid4()), str(uuid4()) + messages = [ + TestMessage(id3, UUID_NIL), + TestMessage(id2, UUID_NIL), + TestMessage(id1, UUID_NIL), + ] + result = extract_thread_messages(messages) + assert len(result) == 3 + assert [msg["id"] for msg in result] == [id3, id2, id1] + + +def test_extract_thread_messages_mixed_with_legacy_messages(): + id1, id2, id3, id4, id5 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) + messages = [ + TestMessage(id5, id4), + TestMessage(id4, id2), + TestMessage(id3, id2), + TestMessage(id2, UUID_NIL), + TestMessage(id1, UUID_NIL), + ] + result = extract_thread_messages(messages) + assert len(result) == 4 + assert [msg["id"] for msg in result] == [id5, id4, id2, id1] diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.tsx index 80dfb5c534ea5..1c70f4fe77a9d 100644 --- a/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.tsx +++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.tsx @@ -46,6 +46,7 @@ const ChatItem: FC = ({ const config = useConfigFromDebugContext() const { chatList, + chatListRef, isResponding, handleSend, suggestedQuestions, @@ -80,6 +81,7 @@ const ChatItem: FC = ({ query: message, inputs, model_config: configData, + parent_message_id: chatListRef.current.at(-1)?.id || null, } if (visionConfig.enabled && files?.length && supportVision) @@ -93,7 +95,7 @@ const ChatItem: FC = ({ onGetSuggestedQuestions: (responseItemId, getAbortController) => fetchSuggestedQuestions(appId, responseItemId, getAbortController), }, ) - }, [appId, config, handleSend, inputs, modelAndParameter, textGenerationModelList, visionConfig.enabled]) + }, [appId, config, handleSend, inputs, modelAndParameter, textGenerationModelList, visionConfig.enabled, chatListRef]) const { eventEmitter } = useEventEmitterContextContext() eventEmitter?.useSubscription((v: any) => { diff --git a/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx b/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx index d93ad00659e47..80e7c98a8f0c9 100644 --- a/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx +++ b/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx @@ -12,7 +12,7 @@ import { import Chat from '@/app/components/base/chat/chat' import { useChat } from '@/app/components/base/chat/chat/hooks' import { useDebugConfigurationContext } from '@/context/debug-configuration' -import type { OnSend } from '@/app/components/base/chat/types' +import type { ChatItem, OnSend } from '@/app/components/base/chat/types' import { useProviderContext } from '@/context/provider-context' import { fetchConversationMessages, @@ -45,10 +45,12 @@ const DebugWithSingleModel = forwardRef { + const doSend: OnSend = useCallback((message, files, last_answer) => { if (checkCanSend && !checkCanSend()) return const currentProvider = textGenerationModelList.find(item => item.provider === modelConfig.provider) @@ -85,6 +87,7 @@ const DebugWithSingleModel = forwardRef fetchSuggestedQuestions(appId, responseItemId, getAbortController), }, ) - }, [appId, checkCanSend, completionParams, config, handleSend, inputs, modelConfig, textGenerationModelList, visionConfig.enabled]) + }, [chatListRef, appId, checkCanSend, completionParams, config, handleSend, inputs, modelConfig, textGenerationModelList, visionConfig.enabled]) + + const doRegenerate = useCallback((chatItem: ChatItem) => { + const index = chatList.findIndex(item => item.id === chatItem.id) + if (index === -1) + return + + const prevMessages = chatList.slice(0, index) + const question = prevMessages.pop() + const lastAnswer = prevMessages.at(-1) + + if (!question) + return + + handleUpdateChatList(prevMessages) + doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + }, [chatList, handleUpdateChatList, doSend]) const allToolIcons = useMemo(() => { const icons: Record = {} @@ -123,6 +142,7 @@ const DebugWithSingleModel = forwardRef} diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx index caec10c4f714b..149e877fa4ac2 100644 --- a/web/app/components/app/log/list.tsx +++ b/web/app/components/app/log/list.tsx @@ -16,6 +16,7 @@ import timezone from 'dayjs/plugin/timezone' import { createContext, useContext } from 'use-context-selector' import { useShallow } from 'zustand/react/shallow' import { useTranslation } from 'react-i18next' +import { UUID_NIL } from '../../base/chat/constants' import s from './style.module.css' import VarPanel from './var-panel' import cn from '@/utils/classnames' @@ -81,72 +82,92 @@ const PARAM_MAP = { frequency_penalty: 'Frequency Penalty', } -// Format interface data for easy display -const getFormattedChatList = (messages: ChatMessage[], conversationId: string, timezone: string, format: string) => { - const newChatList: IChatItem[] = [] - messages.forEach((item: ChatMessage) => { - newChatList.push({ - id: `question-${item.id}`, - content: item.inputs.query || item.inputs.default_input || item.query, // text generation: item.inputs.query; chat: item.query - isAnswer: false, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], - }) - newChatList.push({ - id: item.id, - content: item.answer, - agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), - feedback: item.feedbacks.find(item => item.from_source === 'user'), // user feedback - adminFeedback: item.feedbacks.find(item => item.from_source === 'admin'), // admin feedback - feedbackDisabled: false, - isAnswer: true, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], - log: [ - ...item.message, - ...(item.message[item.message.length - 1]?.role !== 'assistant' - ? [ - { - role: 'assistant', - text: item.answer, - files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], - }, - ] - : []), - ], - workflow_run_id: item.workflow_run_id, - conversationId, - input: { - inputs: item.inputs, - query: item.query, - }, - more: { - time: dayjs.unix(item.created_at).tz(timezone).format(format), - tokens: item.answer_tokens + item.message_tokens, - latency: item.provider_response_latency.toFixed(2), - }, - citation: item.metadata?.retriever_resources, - annotation: (() => { - if (item.annotation_hit_history) { - return { - id: item.annotation_hit_history.annotation_id, - authorName: item.annotation_hit_history.annotation_create_account?.name || 'N/A', - created_at: item.annotation_hit_history.created_at, - } +function appendQAToChatList(newChatList: IChatItem[], item: any, conversationId: string, timezone: string, format: string) { + newChatList.push({ + id: item.id, + content: item.answer, + agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), + feedback: item.feedbacks.find((item: any) => item.from_source === 'user'), // user feedback + adminFeedback: item.feedbacks.find((item: any) => item.from_source === 'admin'), // admin feedback + feedbackDisabled: false, + isAnswer: true, + message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], + log: [ + ...item.message, + ...(item.message[item.message.length - 1]?.role !== 'assistant' + ? [ + { + role: 'assistant', + text: item.answer, + files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], + }, + ] + : []), + ], + workflow_run_id: item.workflow_run_id, + conversationId, + input: { + inputs: item.inputs, + query: item.query, + }, + more: { + time: dayjs.unix(item.created_at).tz(timezone).format(format), + tokens: item.answer_tokens + item.message_tokens, + latency: item.provider_response_latency.toFixed(2), + }, + citation: item.metadata?.retriever_resources, + annotation: (() => { + if (item.annotation_hit_history) { + return { + id: item.annotation_hit_history.annotation_id, + authorName: item.annotation_hit_history.annotation_create_account?.name || 'N/A', + created_at: item.annotation_hit_history.created_at, } + } - if (item.annotation) { - return { - id: item.annotation.id, - authorName: item.annotation.account.name, - logAnnotation: item.annotation, - created_at: 0, - } + if (item.annotation) { + return { + id: item.annotation.id, + authorName: item.annotation.account.name, + logAnnotation: item.annotation, + created_at: 0, } + } - return undefined - })(), - }) + return undefined + })(), + parentMessageId: `question-${item.id}`, }) - return newChatList + newChatList.push({ + id: `question-${item.id}`, + content: item.inputs.query || item.inputs.default_input || item.query, // text generation: item.inputs.query; chat: item.query + isAnswer: false, + message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], + parentMessageId: item.parent_message_id || undefined, + }) +} + +const getFormattedChatList = (messages: ChatMessage[], conversationId: string, timezone: string, format: string) => { + const newChatList: IChatItem[] = [] + let nextMessageId = null + for (const item of messages) { + if (!item.parent_message_id) { + appendQAToChatList(newChatList, item, conversationId, timezone, format) + break + } + + if (!nextMessageId) { + appendQAToChatList(newChatList, item, conversationId, timezone, format) + nextMessageId = item.parent_message_id + } + else { + if (item.id === nextMessageId || nextMessageId === UUID_NIL) { + appendQAToChatList(newChatList, item, conversationId, timezone, format) + nextMessageId = item.parent_message_id + } + } + } + return newChatList.reverse() } // const displayedParams = CompletionParams.slice(0, -2) @@ -171,6 +192,7 @@ function DetailPanel([]) + const fetchedMessages = useRef([]) const [hasMore, setHasMore] = useState(true) const [varValues, setVarValues] = useState>({}) const fetchData = async () => { @@ -192,7 +214,8 @@ function DetailPanel - : items.length < 8 + : (items.length < 8 && !hasMore) ?
{ }, [appParams, currentConversationItem?.introduction, currentConversationId]) const { chatList, + chatListRef, + handleUpdateChatList, handleSend, handleStop, isResponding, @@ -63,11 +66,12 @@ const ChatWrapper = () => { currentChatInstanceRef.current.handleStop = handleStop }, []) - const doSend: OnSend = useCallback((message, files) => { + const doSend: OnSend = useCallback((message, files, last_answer) => { const data: any = { query: message, inputs: currentConversationId ? currentConversationItem?.inputs : newConversationInputs, conversation_id: currentConversationId, + parent_message_id: last_answer?.id || chatListRef.current.at(-1)?.id || null, } if (appConfig?.file_upload?.image.enabled && files?.length) @@ -83,6 +87,7 @@ const ChatWrapper = () => { }, ) }, [ + chatListRef, appConfig, currentConversationId, currentConversationItem, @@ -92,6 +97,23 @@ const ChatWrapper = () => { isInstalledApp, appId, ]) + + const doRegenerate = useCallback((chatItem: ChatItem) => { + const index = chatList.findIndex(item => item.id === chatItem.id) + if (index === -1) + return + + const prevMessages = chatList.slice(0, index) + const question = prevMessages.pop() + const lastAnswer = prevMessages.at(-1) + + if (!question) + return + + handleUpdateChatList(prevMessages) + doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + }, [chatList, handleUpdateChatList, doSend]) + const chatNode = useMemo(() => { if (inputsForms.length) { return ( @@ -148,6 +170,7 @@ const ChatWrapper = () => { chatFooterClassName='pb-4' chatFooterInnerClassName={`mx-auto w-full max-w-full ${isMobile && 'px-4'}`} onSend={doSend} + onRegenerate={doRegenerate} onStopResponding={handleStop} chatNode={chatNode} allToolIcons={appMeta?.tool_icons || {}} diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx index 1e05cc39ef5dc..b9ebc42ec8889 100644 --- a/web/app/components/base/chat/chat-with-history/hooks.tsx +++ b/web/app/components/base/chat/chat-with-history/hooks.tsx @@ -12,10 +12,10 @@ import produce from 'immer' import type { Callback, ChatConfig, - ChatItem, Feedback, } from '../types' import { CONVERSATION_ID_INFO } from '../constants' +import { getPrevChatList } from '../utils' import { delConversation, fetchAppInfo, @@ -34,7 +34,6 @@ import type { AppData, ConversationItem, } from '@/models/share' -import { addFileInfos, sortAgentSorts } from '@/app/components/tools/utils' import { useToastContext } from '@/app/components/base/toast' import { changeLanguage } from '@/i18n/i18next-config' import { useAppFavicon } from '@/hooks/use-app-favicon' @@ -108,32 +107,12 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { const { data: appConversationData, isLoading: appConversationDataLoading, mutate: mutateAppConversationData } = useSWR(['appConversationData', isInstalledApp, appId, false], () => fetchConversations(isInstalledApp, appId, undefined, false, 100)) const { data: appChatListData, isLoading: appChatListDataLoading } = useSWR(chatShouldReloadKey ? ['appChatList', chatShouldReloadKey, isInstalledApp, appId] : null, () => fetchChatList(chatShouldReloadKey, isInstalledApp, appId)) - const appPrevChatList = useMemo(() => { - const data = appChatListData?.data || [] - const chatList: ChatItem[] = [] - - if (currentConversationId && data.length) { - data.forEach((item: any) => { - chatList.push({ - id: `question-${item.id}`, - content: item.query, - isAnswer: false, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], - }) - chatList.push({ - id: item.id, - content: item.answer, - agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), - feedback: item.feedback, - isAnswer: true, - citation: item.retriever_resources, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], - }) - }) - } - - return chatList - }, [appChatListData, currentConversationId]) + const appPrevChatList = useMemo( + () => (currentConversationId && appChatListData?.data.length) + ? getPrevChatList(appChatListData.data) + : [], + [appChatListData, currentConversationId], + ) const [showNewConversationItemInList, setShowNewConversationItemInList] = useState(false) diff --git a/web/app/components/base/chat/chat/answer/index.tsx b/web/app/components/base/chat/chat/answer/index.tsx index 5fe2a7bad5107..705cd73ddf18d 100644 --- a/web/app/components/base/chat/chat/answer/index.tsx +++ b/web/app/components/base/chat/chat/answer/index.tsx @@ -35,6 +35,7 @@ type AnswerProps = { chatAnswerContainerInner?: string hideProcessDetail?: boolean appData?: AppData + noChatInput?: boolean } const Answer: FC = ({ item, @@ -48,6 +49,7 @@ const Answer: FC = ({ chatAnswerContainerInner, hideProcessDetail, appData, + noChatInput, }) => { const { t } = useTranslation() const { @@ -110,6 +112,7 @@ const Answer: FC = ({ question={question} index={index} showPromptLog={showPromptLog} + noChatInput={noChatInput} /> ) } diff --git a/web/app/components/base/chat/chat/answer/operation.tsx b/web/app/components/base/chat/chat/answer/operation.tsx index 08267bb09cd5c..5e5fc3b204641 100644 --- a/web/app/components/base/chat/chat/answer/operation.tsx +++ b/web/app/components/base/chat/chat/answer/operation.tsx @@ -7,6 +7,7 @@ import { import { useTranslation } from 'react-i18next' import type { ChatItem } from '../../types' import { useChatContext } from '../context' +import RegenerateBtn from '@/app/components/base/regenerate-btn' import cn from '@/utils/classnames' import CopyBtn from '@/app/components/base/copy-btn' import { MessageFast } from '@/app/components/base/icons/src/vender/solid/communication' @@ -28,6 +29,7 @@ type OperationProps = { maxSize: number contentWidth: number hasWorkflowProcess: boolean + noChatInput?: boolean } const Operation: FC = ({ item, @@ -37,6 +39,7 @@ const Operation: FC = ({ maxSize, contentWidth, hasWorkflowProcess, + noChatInput, }) => { const { t } = useTranslation() const { @@ -45,6 +48,7 @@ const Operation: FC = ({ onAnnotationEdited, onAnnotationRemoved, onFeedback, + onRegenerate, } = useChatContext() const [isShowReplyModal, setIsShowReplyModal] = useState(false) const { @@ -159,12 +163,13 @@ const Operation: FC = ({
) } + { + !isOpeningStatement && !noChatInput && onRegenerate?.(item)} /> + } { config?.supportFeedback && !localFeedback?.rating && onFeedback && !isOpeningStatement && ( -
- +
+
handleFeedback('like')} diff --git a/web/app/components/base/chat/chat/context.tsx b/web/app/components/base/chat/chat/context.tsx index ba6f67189e1cc..c47b7501762e1 100644 --- a/web/app/components/base/chat/chat/context.tsx +++ b/web/app/components/base/chat/chat/context.tsx @@ -12,6 +12,7 @@ export type ChatContextValue = Pick void noChatInput?: boolean onSend?: OnSend + onRegenerate?: OnRegenerate chatContainerClassName?: string chatContainerInnerClassName?: string chatFooterClassName?: string @@ -67,6 +69,7 @@ const Chat: FC = ({ appData, config, onSend, + onRegenerate, chatList, isResponding, noStopResponding, @@ -186,6 +189,7 @@ const Chat: FC = ({ answerIcon={answerIcon} allToolIcons={allToolIcons} onSend={onSend} + onRegenerate={onRegenerate} onAnnotationAdded={onAnnotationAdded} onAnnotationEdited={onAnnotationEdited} onAnnotationRemoved={onAnnotationRemoved} @@ -219,6 +223,7 @@ const Chat: FC = ({ showPromptLog={showPromptLog} chatAnswerContainerInner={chatAnswerContainerInner} hideProcessDetail={hideProcessDetail} + noChatInput={noChatInput} /> ) } diff --git a/web/app/components/base/chat/chat/type.ts b/web/app/components/base/chat/chat/type.ts index b2cb18011c692..dd26a4179d4fb 100644 --- a/web/app/components/base/chat/chat/type.ts +++ b/web/app/components/base/chat/chat/type.ts @@ -95,6 +95,7 @@ export type IChatItem = { // for agent log conversationId?: string input?: any + parentMessageId?: string } export type Metadata = { diff --git a/web/app/components/base/chat/constants.ts b/web/app/components/base/chat/constants.ts index 8249be7375d2e..309f0f04a716b 100644 --- a/web/app/components/base/chat/constants.ts +++ b/web/app/components/base/chat/constants.ts @@ -1 +1,2 @@ export const CONVERSATION_ID_INFO = 'conversationIdInfo' +export const UUID_NIL = '00000000-0000-0000-0000-000000000000' diff --git a/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx b/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx index 48ee4110584ae..8cb546fd52a0e 100644 --- a/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx +++ b/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx @@ -2,6 +2,7 @@ import { useCallback, useEffect, useMemo } from 'react' import Chat from '../chat' import type { ChatConfig, + ChatItem, OnSend, } from '../types' import { useChat } from '../chat/hooks' @@ -45,11 +46,13 @@ const ChatWrapper = () => { } as ChatConfig }, [appParams, currentConversationItem?.introduction, currentConversationId]) const { + chatListRef, chatList, handleSend, handleStop, isResponding, suggestedQuestions, + handleUpdateChatList, } = useChat( appConfig, { @@ -65,11 +68,12 @@ const ChatWrapper = () => { currentChatInstanceRef.current.handleStop = handleStop }, []) - const doSend: OnSend = useCallback((message, files) => { + const doSend: OnSend = useCallback((message, files, last_answer) => { const data: any = { query: message, inputs: currentConversationId ? currentConversationItem?.inputs : newConversationInputs, conversation_id: currentConversationId, + parent_message_id: last_answer?.id || chatListRef.current.at(-1)?.id || null, } if (appConfig?.file_upload?.image.enabled && files?.length) @@ -85,6 +89,7 @@ const ChatWrapper = () => { }, ) }, [ + chatListRef, appConfig, currentConversationId, currentConversationItem, @@ -94,6 +99,23 @@ const ChatWrapper = () => { isInstalledApp, appId, ]) + + const doRegenerate = useCallback((chatItem: ChatItem) => { + const index = chatList.findIndex(item => item.id === chatItem.id) + if (index === -1) + return + + const prevMessages = chatList.slice(0, index) + const question = prevMessages.pop() + const lastAnswer = prevMessages.at(-1) + + if (!question) + return + + handleUpdateChatList(prevMessages) + doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + }, [chatList, handleUpdateChatList, doSend]) + const chatNode = useMemo(() => { if (inputsForms.length) { return ( @@ -136,6 +158,7 @@ const ChatWrapper = () => { chatFooterClassName='pb-4' chatFooterInnerClassName={cn('mx-auto w-full max-w-full tablet:px-4', isMobile && 'px-4')} onSend={doSend} + onRegenerate={doRegenerate} onStopResponding={handleStop} chatNode={chatNode} allToolIcons={appMeta?.tool_icons || {}} diff --git a/web/app/components/base/chat/embedded-chatbot/hooks.tsx b/web/app/components/base/chat/embedded-chatbot/hooks.tsx index 39d25f57d194e..fd89efcbff3ab 100644 --- a/web/app/components/base/chat/embedded-chatbot/hooks.tsx +++ b/web/app/components/base/chat/embedded-chatbot/hooks.tsx @@ -11,10 +11,10 @@ import { useLocalStorageState } from 'ahooks' import produce from 'immer' import type { ChatConfig, - ChatItem, Feedback, } from '../types' import { CONVERSATION_ID_INFO } from '../constants' +import { getPrevChatList, getProcessedInputsFromUrlParams } from '../utils' import { fetchAppInfo, fetchAppMeta, @@ -28,10 +28,8 @@ import type { // AppData, ConversationItem, } from '@/models/share' -import { addFileInfos, sortAgentSorts } from '@/app/components/tools/utils' import { useToastContext } from '@/app/components/base/toast' import { changeLanguage } from '@/i18n/i18next-config' -import { getProcessedInputsFromUrlParams } from '@/app/components/base/chat/utils' export const useEmbeddedChatbot = () => { const isInstalledApp = false @@ -75,32 +73,12 @@ export const useEmbeddedChatbot = () => { const { data: appConversationData, isLoading: appConversationDataLoading, mutate: mutateAppConversationData } = useSWR(['appConversationData', isInstalledApp, appId, false], () => fetchConversations(isInstalledApp, appId, undefined, false, 100)) const { data: appChatListData, isLoading: appChatListDataLoading } = useSWR(chatShouldReloadKey ? ['appChatList', chatShouldReloadKey, isInstalledApp, appId] : null, () => fetchChatList(chatShouldReloadKey, isInstalledApp, appId)) - const appPrevChatList = useMemo(() => { - const data = appChatListData?.data || [] - const chatList: ChatItem[] = [] - - if (currentConversationId && data.length) { - data.forEach((item: any) => { - chatList.push({ - id: `question-${item.id}`, - content: item.query, - isAnswer: false, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], - }) - chatList.push({ - id: item.id, - content: item.answer, - agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), - feedback: item.feedback, - isAnswer: true, - citation: item.retriever_resources, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], - }) - }) - } - - return chatList - }, [appChatListData, currentConversationId]) + const appPrevChatList = useMemo( + () => (currentConversationId && appChatListData?.data.length) + ? getPrevChatList(appChatListData.data) + : [], + [appChatListData, currentConversationId], + ) const [showNewConversationItemInList, setShowNewConversationItemInList] = useState(false) @@ -155,7 +133,7 @@ export const useEmbeddedChatbot = () => { type: 'text-input', } }) - }, [appParams]) + }, [initInputs, appParams]) useEffect(() => { // init inputs from url params diff --git a/web/app/components/base/chat/types.ts b/web/app/components/base/chat/types.ts index 21277fec57099..489dbb44cf67e 100644 --- a/web/app/components/base/chat/types.ts +++ b/web/app/components/base/chat/types.ts @@ -63,7 +63,9 @@ export type ChatItem = IChatItem & { conversationId?: string } -export type OnSend = (message: string, files?: VisionFile[]) => void +export type OnSend = (message: string, files?: VisionFile[], last_answer?: ChatItem) => void + +export type OnRegenerate = (chatItem: ChatItem) => void export type Callback = { onSuccess: () => void diff --git a/web/app/components/base/chat/utils.ts b/web/app/components/base/chat/utils.ts index 3fe5050cc7b34..e851c4c4633fb 100644 --- a/web/app/components/base/chat/utils.ts +++ b/web/app/components/base/chat/utils.ts @@ -1,7 +1,11 @@ +import { addFileInfos, sortAgentSorts } from '../../tools/utils' +import { UUID_NIL } from './constants' +import type { ChatItem } from './types' + async function decodeBase64AndDecompress(base64String: string) { const binaryString = atob(base64String) const compressedUint8Array = Uint8Array.from(binaryString, char => char.charCodeAt(0)) - const decompressedStream = new Response(compressedUint8Array).body.pipeThrough(new DecompressionStream('gzip')) + const decompressedStream = new Response(compressedUint8Array).body?.pipeThrough(new DecompressionStream('gzip')) const decompressedArrayBuffer = await new Response(decompressedStream).arrayBuffer() return new TextDecoder().decode(decompressedArrayBuffer) } @@ -15,6 +19,57 @@ function getProcessedInputsFromUrlParams(): Record { return inputs } +function appendQAToChatList(chatList: ChatItem[], item: any) { + // we append answer first and then question since will reverse the whole chatList later + chatList.push({ + id: item.id, + content: item.answer, + agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), + feedback: item.feedback, + isAnswer: true, + citation: item.retriever_resources, + message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], + }) + chatList.push({ + id: `question-${item.id}`, + content: item.query, + isAnswer: false, + message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], + }) +} + +/** + * Computes the latest thread messages from all messages of the conversation. + * Same logic as backend codebase `api/core/prompt/utils/extract_thread_messages.py` + * + * @param fetchedMessages - The history chat list data from the backend, sorted by created_at in descending order. This includes all flattened history messages of the conversation. + * @returns An array of ChatItems representing the latest thread. + */ +function getPrevChatList(fetchedMessages: any[]) { + const ret: ChatItem[] = [] + let nextMessageId = null + + for (const item of fetchedMessages) { + if (!item.parent_message_id) { + appendQAToChatList(ret, item) + break + } + + if (!nextMessageId) { + appendQAToChatList(ret, item) + nextMessageId = item.parent_message_id + } + else { + if (item.id === nextMessageId || nextMessageId === UUID_NIL) { + appendQAToChatList(ret, item) + nextMessageId = item.parent_message_id + } + } + } + return ret.reverse() +} + export { getProcessedInputsFromUrlParams, + getPrevChatList, } diff --git a/web/app/components/base/icons/assets/vender/line/general/refresh.svg b/web/app/components/base/icons/assets/vender/line/general/refresh.svg new file mode 100644 index 0000000000000..05cf98682734a --- /dev/null +++ b/web/app/components/base/icons/assets/vender/line/general/refresh.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/web/app/components/base/icons/src/vender/line/general/Refresh.json b/web/app/components/base/icons/src/vender/line/general/Refresh.json new file mode 100644 index 0000000000000..128dcb7d4d713 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/general/Refresh.json @@ -0,0 +1,23 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "xmlns": "http://www.w3.org/2000/svg", + "viewBox": "0 0 24 24", + "fill": "currentColor" + }, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M5.46257 4.43262C7.21556 2.91688 9.5007 2 12 2C17.5228 2 22 6.47715 22 12C22 14.1361 21.3302 16.1158 20.1892 17.7406L17 12H20C20 7.58172 16.4183 4 12 4C9.84982 4 7.89777 4.84827 6.46023 6.22842L5.46257 4.43262ZM18.5374 19.5674C16.7844 21.0831 14.4993 22 12 22C6.47715 22 2 17.5228 2 12C2 9.86386 2.66979 7.88416 3.8108 6.25944L7 12H4C4 16.4183 7.58172 20 12 20C14.1502 20 16.1022 19.1517 17.5398 17.7716L18.5374 19.5674Z" + }, + "children": [] + } + ] + }, + "name": "Refresh" +} \ No newline at end of file diff --git a/web/app/components/base/icons/src/vender/line/general/Refresh.tsx b/web/app/components/base/icons/src/vender/line/general/Refresh.tsx new file mode 100644 index 0000000000000..96641f1c4243b --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/general/Refresh.tsx @@ -0,0 +1,16 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import * as React from 'react' +import data from './Refresh.json' +import IconBase from '@/app/components/base/icons/IconBase' +import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase' + +const Icon = React.forwardRef, Omit>(( + props, + ref, +) => ) + +Icon.displayName = 'Refresh' + +export default Icon diff --git a/web/app/components/base/icons/src/vender/line/general/index.ts b/web/app/components/base/icons/src/vender/line/general/index.ts index c1af2e49948a4..b5c7a7bbc1d9a 100644 --- a/web/app/components/base/icons/src/vender/line/general/index.ts +++ b/web/app/components/base/icons/src/vender/line/general/index.ts @@ -18,6 +18,7 @@ export { default as Menu01 } from './Menu01' export { default as Pin01 } from './Pin01' export { default as Pin02 } from './Pin02' export { default as Plus02 } from './Plus02' +export { default as Refresh } from './Refresh' export { default as Settings01 } from './Settings01' export { default as Settings04 } from './Settings04' export { default as Target04 } from './Target04' diff --git a/web/app/components/base/regenerate-btn/index.tsx b/web/app/components/base/regenerate-btn/index.tsx new file mode 100644 index 0000000000000..aaf0206df609d --- /dev/null +++ b/web/app/components/base/regenerate-btn/index.tsx @@ -0,0 +1,31 @@ +'use client' +import { t } from 'i18next' +import { Refresh } from '../icons/src/vender/line/general' +import Tooltip from '@/app/components/base/tooltip' + +type Props = { + className?: string + onClick?: () => void +} + +const RegenerateBtn = ({ className, onClick }: Props) => { + return ( +
+ +
onClick?.()} + style={{ + boxShadow: '0px 4px 8px -2px rgba(16, 24, 40, 0.1), 0px 2px 4px -2px rgba(16, 24, 40, 0.06)', + }} + > + +
+
+
+ ) +} + +export default RegenerateBtn diff --git a/web/app/components/workflow/panel/chat-record/index.tsx b/web/app/components/workflow/panel/chat-record/index.tsx index afd20b7358670..1bcfd6474d7c8 100644 --- a/web/app/components/workflow/panel/chat-record/index.tsx +++ b/web/app/components/workflow/panel/chat-record/index.tsx @@ -2,7 +2,6 @@ import { memo, useCallback, useEffect, - useMemo, useState, } from 'react' import { RiCloseLine } from '@remixicon/react' @@ -17,50 +16,70 @@ import type { ChatItem } from '@/app/components/base/chat/types' import { fetchConversationMessages } from '@/service/debug' import { useStore as useAppStore } from '@/app/components/app/store' import Loading from '@/app/components/base/loading' +import { UUID_NIL } from '@/app/components/base/chat/constants' + +function appendQAToChatList(newChatList: ChatItem[], item: any) { + newChatList.push({ + id: item.id, + content: item.answer, + feedback: item.feedback, + isAnswer: true, + citation: item.metadata?.retriever_resources, + message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], + workflow_run_id: item.workflow_run_id, + }) + newChatList.push({ + id: `question-${item.id}`, + content: item.query, + isAnswer: false, + message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], + }) +} + +function getFormattedChatList(messages: any[]) { + const newChatList: ChatItem[] = [] + let nextMessageId = null + for (const item of messages) { + if (!item.parent_message_id) { + appendQAToChatList(newChatList, item) + break + } + + if (!nextMessageId) { + appendQAToChatList(newChatList, item) + nextMessageId = item.parent_message_id + } + else { + if (item.id === nextMessageId || nextMessageId === UUID_NIL) { + appendQAToChatList(newChatList, item) + nextMessageId = item.parent_message_id + } + } + } + return newChatList.reverse() +} const ChatRecord = () => { const [fetched, setFetched] = useState(false) - const [chatList, setChatList] = useState([]) + const [chatList, setChatList] = useState([]) const appDetail = useAppStore(s => s.appDetail) const workflowStore = useWorkflowStore() const { handleLoadBackupDraft } = useWorkflowRun() const historyWorkflowData = useStore(s => s.historyWorkflowData) const currentConversationID = historyWorkflowData?.conversation_id - const chatMessageList = useMemo(() => { - const res: ChatItem[] = [] - if (chatList.length) { - chatList.forEach((item: any) => { - res.push({ - id: `question-${item.id}`, - content: item.query, - isAnswer: false, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'user') || [], - }) - res.push({ - id: item.id, - content: item.answer, - feedback: item.feedback, - isAnswer: true, - citation: item.metadata?.retriever_resources, - message_files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], - workflow_run_id: item.workflow_run_id, - }) - }) - } - return res - }, [chatList]) - const handleFetchConversationMessages = useCallback(async () => { if (appDetail && currentConversationID) { try { setFetched(false) const res = await fetchConversationMessages(appDetail.id, currentConversationID) - setFetched(true) - setChatList((res as any).data) + setChatList(getFormattedChatList((res as any).data)) } catch (e) { - + console.error(e) + } + finally { + setFetched(true) } } }, [appDetail, currentConversationID]) @@ -101,7 +120,7 @@ const ChatRecord = () => { config={{ supportCitationHitInfo: true, } as any} - chatList={chatMessageList} + chatList={chatList} chatContainerClassName='px-4' chatContainerInnerClassName='pt-6 w-full max-w-full mx-auto' chatFooterClassName='px-4 rounded-b-2xl' diff --git a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx index a7dd607e221ea..107a5dc698f74 100644 --- a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx +++ b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx @@ -18,7 +18,7 @@ import ConversationVariableModal from './conversation-variable-modal' import { useChat } from './hooks' import type { ChatWrapperRefType } from './index' import Chat from '@/app/components/base/chat/chat' -import type { OnSend } from '@/app/components/base/chat/types' +import type { ChatItem, OnSend } from '@/app/components/base/chat/types' import { useFeaturesStore } from '@/app/components/base/features/hooks' import { fetchSuggestedQuestions, @@ -58,6 +58,8 @@ const ChatWrapper = forwardRef(({ showConv const { conversationId, chatList, + chatListRef, + handleUpdateChatList, handleStop, isResponding, suggestedQuestions, @@ -73,19 +75,36 @@ const ChatWrapper = forwardRef(({ showConv taskId => stopChatMessageResponding(appDetail!.id, taskId), ) - const doSend = useCallback((query, files) => { + const doSend = useCallback((query, files, last_answer) => { handleSend( { query, files, inputs: workflowStore.getState().inputs, conversation_id: conversationId, + parent_message_id: last_answer?.id || chatListRef.current.at(-1)?.id || null, }, { onGetSuggestedQuestions: (messageId, getAbortController) => fetchSuggestedQuestions(appDetail!.id, messageId, getAbortController), }, ) - }, [conversationId, handleSend, workflowStore, appDetail]) + }, [chatListRef, conversationId, handleSend, workflowStore, appDetail]) + + const doRegenerate = useCallback((chatItem: ChatItem) => { + const index = chatList.findIndex(item => item.id === chatItem.id) + if (index === -1) + return + + const prevMessages = chatList.slice(0, index) + const question = prevMessages.pop() + const lastAnswer = prevMessages.at(-1) + + if (!question) + return + + handleUpdateChatList(prevMessages) + doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + }, [chatList, handleUpdateChatList, doSend]) useImperativeHandle(ref, () => { return { @@ -107,6 +126,7 @@ const ChatWrapper = forwardRef(({ showConv chatFooterClassName='px-4 rounded-bl-2xl' chatFooterInnerClassName='pb-4 w-full max-w-full mx-auto' onSend={doSend} + onRegenerate={doRegenerate} onStopResponding={handleStop} chatNode={( <> diff --git a/web/app/components/workflow/panel/debug-and-preview/hooks.ts b/web/app/components/workflow/panel/debug-and-preview/hooks.ts index 51a018bcb15b4..cad76a4490c85 100644 --- a/web/app/components/workflow/panel/debug-and-preview/hooks.ts +++ b/web/app/components/workflow/panel/debug-and-preview/hooks.ts @@ -387,6 +387,8 @@ export const useChat = ( return { conversationId: conversationId.current, chatList, + chatListRef, + handleUpdateChatList, handleSend, handleStop, handleRestart, diff --git a/web/hooks/use-app-favicon.ts b/web/hooks/use-app-favicon.ts index 86eadc1b3d086..1ff743928faae 100644 --- a/web/hooks/use-app-favicon.ts +++ b/web/hooks/use-app-favicon.ts @@ -5,10 +5,10 @@ import type { AppIconType } from '@/types/app' type UseAppFaviconOptions = { enable?: boolean - icon_type?: AppIconType + icon_type?: AppIconType | null icon?: string - icon_background?: string - icon_url?: string + icon_background?: string | null + icon_url?: string | null } export function useAppFavicon(options: UseAppFaviconOptions) { diff --git a/web/i18n/en-US/app-api.ts b/web/i18n/en-US/app-api.ts index 631faeee9a5e7..355ff30602736 100644 --- a/web/i18n/en-US/app-api.ts +++ b/web/i18n/en-US/app-api.ts @@ -6,6 +6,7 @@ const translation = { ok: 'In Service', copy: 'Copy', copied: 'Copied', + regenerate: 'Regenerate', play: 'Play', pause: 'Pause', playing: 'Playing', diff --git a/web/i18n/zh-Hans/app-api.ts b/web/i18n/zh-Hans/app-api.ts index 6b9048b66efb9..a0defdab62517 100644 --- a/web/i18n/zh-Hans/app-api.ts +++ b/web/i18n/zh-Hans/app-api.ts @@ -6,6 +6,7 @@ const translation = { ok: '运行中', copy: '复制', copied: '已复制', + regenerate: '重新生成', play: '播放', pause: '暂停', playing: '播放中', diff --git a/web/models/log.ts b/web/models/log.ts index 8da1c4cf4e826..dc557bfe2142b 100644 --- a/web/models/log.ts +++ b/web/models/log.ts @@ -106,6 +106,7 @@ export type MessageContent = { metadata: Metadata agent_thoughts: any[] // TODO workflow_run_id: string + parent_message_id: string | null } export type CompletionConversationGeneralDetail = { From a587f0d3f14cde1c36ae5206c8eac5b87ec84738 Mon Sep 17 00:00:00 2001 From: Shota Totsuka <153569547+totsukash@users.noreply.github.com> Date: Sun, 22 Sep 2024 10:04:00 +0900 Subject: [PATCH 013/235] docs: Add Japanese documentation for tools (#8469) --- api/core/tools/README.md | 4 +- api/core/tools/README_CN.md | 4 +- api/core/tools/README_JP.md | 31 ++ api/core/tools/docs/en_US/tool_scale_out.md | 2 +- .../{zh_Hans => }/images/index/image-1.png | Bin .../{zh_Hans => }/images/index/image-2.png | Bin .../docs/{zh_Hans => }/images/index/image.png | Bin .../tools/docs/ja_JP/advanced_scale_out.md | 283 ++++++++++++++++++ api/core/tools/docs/ja_JP/tool_scale_out.md | 240 +++++++++++++++ api/core/tools/docs/zh_Hans/tool_scale_out.md | 2 +- 10 files changed, 560 insertions(+), 6 deletions(-) create mode 100644 api/core/tools/README_JP.md rename api/core/tools/docs/{zh_Hans => }/images/index/image-1.png (100%) rename api/core/tools/docs/{zh_Hans => }/images/index/image-2.png (100%) rename api/core/tools/docs/{zh_Hans => }/images/index/image.png (100%) create mode 100644 api/core/tools/docs/ja_JP/advanced_scale_out.md create mode 100644 api/core/tools/docs/ja_JP/tool_scale_out.md diff --git a/api/core/tools/README.md b/api/core/tools/README.md index c7ee81422efbd..b5d0a30d348a9 100644 --- a/api/core/tools/README.md +++ b/api/core/tools/README.md @@ -9,10 +9,10 @@ The tools provided for Agents and Workflows are currently divided into two categ - `Api-Based Tools` leverage third-party APIs for implementation. You don't need to code to integrate these -- simply provide interface definitions in formats like `OpenAPI` , `Swagger`, or the `OpenAI-plugin` on the front-end. ### Built-in Tool Providers -![Alt text](docs/zh_Hans/images/index/image.png) +![Alt text](docs/images/index/image.png) ### API Tool Providers -![Alt text](docs/zh_Hans/images/index/image-1.png) +![Alt text](docs/images/index/image-1.png) ## Tool Integration diff --git a/api/core/tools/README_CN.md b/api/core/tools/README_CN.md index fda5d0630ca8c..7e18441131fd7 100644 --- a/api/core/tools/README_CN.md +++ b/api/core/tools/README_CN.md @@ -12,10 +12,10 @@ - `Api-Based Tools` 基于API的工具,即通过调用第三方API实现的工具,`Api-Based Tool`不需要再额外定义,只需提供`OpenAPI` `Swagger` `OpenAI plugin`等接口文档即可。 ### 内置工具供应商 -![Alt text](docs/zh_Hans/images/index/image.png) +![Alt text](docs/images/index/image.png) ### API工具供应商 -![Alt text](docs/zh_Hans/images/index/image-1.png) +![Alt text](docs/images/index/image-1.png) ## 工具接入 为了实现更灵活更强大的功能,Tools提供了一系列的接口,帮助开发者快速构建想要的工具,本文作为开发者的入门指南,将会以[快速接入](./docs/zh_Hans/tool_scale_out.md)和[高级接入](./docs/zh_Hans/advanced_scale_out.md)两部分介绍如何接入工具。 diff --git a/api/core/tools/README_JP.md b/api/core/tools/README_JP.md new file mode 100644 index 0000000000000..39d0bf1762ad0 --- /dev/null +++ b/api/core/tools/README_JP.md @@ -0,0 +1,31 @@ +# Tools + +このモジュールは、Difyのエージェントアシスタントやワークフローで使用される組み込みツールを実装しています。このモジュールでは、フロントエンドのロジックを変更することなく、独自のツールを定義し表示することができます。この分離により、Difyの機能を容易に水平方向にスケールアウトできます。 + +## 機能紹介 + +エージェントとワークフロー向けに提供されるツールは、現在2つのカテゴリーに分類されています。 + +- `Built-in Tools`はDify内部で実装され、エージェントとワークフローで使用するためにハードコードされています。 +- `Api-Based Tools`はサードパーティのAPIを利用して実装されています。これらを統合するためのコーディングは不要で、フロントエンドで + `OpenAPI`, `Swagger`または`OpenAI-plugin`などの形式でインターフェース定義を提供するだけです。 + +### 組み込みツールプロバイダー + +![Alt text](docs/images/index/image.png) + +### APIツールプロバイダー + +![Alt text](docs/images/index/image-1.png) + +## ツールの統合 + +開発者が柔軟で強力なツールを構築できるよう、2つのガイドを提供しています。 + +### [クイック統合 👈🏻](./docs/ja_JP/tool_scale_out.md) + +クイック統合は、Google検索ツールの例を通じて、ツール統合の基本をすばやく理解できるようにすることを目的としています。 + +### [高度な統合 👈🏻](./docs/ja_JP/advanced_scale_out.md) + +高度な統合では、モジュールインターフェースについてより深く掘り下げ、画像生成、複数ツールの組み合わせ、異なるツール間でのパラメーター、画像、ファイルのフロー管理など、より複雑な機能の実装方法を説明します。 \ No newline at end of file diff --git a/api/core/tools/docs/en_US/tool_scale_out.md b/api/core/tools/docs/en_US/tool_scale_out.md index 121b7a5a76d22..1deaf04a47539 100644 --- a/api/core/tools/docs/en_US/tool_scale_out.md +++ b/api/core/tools/docs/en_US/tool_scale_out.md @@ -245,4 +245,4 @@ After the above steps are completed, we can see this tool on the frontend, and i Of course, because google_search needs a credential, before using it, you also need to input your credentials on the frontend. -![Alt text](../zh_Hans/images/index/image-2.png) +![Alt text](../images/index/image-2.png) diff --git a/api/core/tools/docs/zh_Hans/images/index/image-1.png b/api/core/tools/docs/images/index/image-1.png similarity index 100% rename from api/core/tools/docs/zh_Hans/images/index/image-1.png rename to api/core/tools/docs/images/index/image-1.png diff --git a/api/core/tools/docs/zh_Hans/images/index/image-2.png b/api/core/tools/docs/images/index/image-2.png similarity index 100% rename from api/core/tools/docs/zh_Hans/images/index/image-2.png rename to api/core/tools/docs/images/index/image-2.png diff --git a/api/core/tools/docs/zh_Hans/images/index/image.png b/api/core/tools/docs/images/index/image.png similarity index 100% rename from api/core/tools/docs/zh_Hans/images/index/image.png rename to api/core/tools/docs/images/index/image.png diff --git a/api/core/tools/docs/ja_JP/advanced_scale_out.md b/api/core/tools/docs/ja_JP/advanced_scale_out.md new file mode 100644 index 0000000000000..96f843354f91b --- /dev/null +++ b/api/core/tools/docs/ja_JP/advanced_scale_out.md @@ -0,0 +1,283 @@ +# 高度なツール統合 + +このガイドを始める前に、Difyのツール統合プロセスの基本を理解していることを確認してください。簡単な概要については[クイック統合](./tool_scale_out.md)をご覧ください。 + +## ツールインターフェース + +より複雑なツールを迅速に構築するのを支援するため、`Tool`クラスに一連のヘルパーメソッドを定義しています。 + +### メッセージの返却 + +Difyは`テキスト`、`リンク`、`画像`、`ファイルBLOB`、`JSON`などの様々なメッセージタイプをサポートしています。以下のインターフェースを通じて、異なるタイプのメッセージをLLMとユーザーに返すことができます。 + +注意:以下のインターフェースの一部のパラメータについては、後のセクションで説明します。 + +#### 画像URL +画像のURLを渡すだけで、Difyが自動的に画像をダウンロードしてユーザーに返します。 + +```python + def create_image_message(self, image: str, save_as: str = '') -> ToolInvokeMessage: + """ + create an image message + + :param image: the url of the image + :param save_as: save as + :return: the image message + """ +``` + +#### リンク +リンクを返す必要がある場合は、以下のインターフェースを使用できます。 + +```python + def create_link_message(self, link: str, save_as: str = '') -> ToolInvokeMessage: + """ + create a link message + + :param link: the url of the link + :param save_as: save as + :return: the link message + """ +``` + +#### テキスト +テキストメッセージを返す必要がある場合は、以下のインターフェースを使用できます。 + +```python + def create_text_message(self, text: str, save_as: str = '') -> ToolInvokeMessage: + """ + create a text message + + :param text: the text of the message + :param save_as: save as + :return: the text message + """ +``` + +#### ファイルBLOB +画像、音声、動画、PPT、Word、Excelなどのファイルの生データを返す必要がある場合は、以下のインターフェースを使用できます。 + +- `blob` ファイルの生データ(bytes型) +- `meta` ファイルのメタデータ。ファイルの種類が分かっている場合は、`mime_type`を渡すことをお勧めします。そうでない場合、Difyはデフォルトタイプとして`octet/stream`を使用します。 + +```python + def create_blob_message(self, blob: bytes, meta: dict = None, save_as: str = '') -> ToolInvokeMessage: + """ + create a blob message + + :param blob: the blob + :param meta: meta + :param save_as: save as + :return: the blob message + """ +``` + +#### JSON +フォーマットされたJSONを返す必要がある場合は、以下のインターフェースを使用できます。これは通常、ワークフロー内のノード間のデータ伝送に使用されますが、エージェントモードでは、ほとんどの大規模言語モデルもJSONを読み取り、理解することができます。 + +- `object` Pythonの辞書オブジェクトで、自動的にJSONにシリアライズされます。 + +```python + def create_json_message(self, object: dict) -> ToolInvokeMessage: + """ + create a json message + """ +``` + +### ショートカットツール + +大規模モデルアプリケーションでは、以下の2つの一般的なニーズがあります: +- まず長いテキストを事前に要約し、その要約内容をLLMに渡すことで、元のテキストが長すぎてLLMが処理できない問題を防ぐ +- ツールが取得したコンテンツがリンクである場合、Webページ情報をクロールしてからLLMに返す必要がある + +開発者がこれら2つのニーズを迅速に実装できるよう、以下の2つのショートカットツールを提供しています。 + +#### テキスト要約ツール + +このツールはuser_idと要約するテキストを入力として受け取り、要約されたテキストを返します。Difyは現在のワークスペースのデフォルトモデルを使用して長文を要約します。 + +```python + def summary(self, user_id: str, content: str) -> str: + """ + summary the content + + :param user_id: the user id + :param content: the content + :return: the summary + """ +``` + +#### Webページクローリングツール + +このツールはクロールするWebページのリンクとユーザーエージェント(空でも可)を入力として受け取り、そのWebページの情報を含む文字列を返します。`user_agent`はオプションのパラメータで、ツールを識別するために使用できます。渡さない場合、Difyはデフォルトの`user_agent`を使用します。 + +```python + def get_url(self, url: str, user_agent: str = None) -> str: + """ + get url from the crawled result + """ +``` + +### 変数プール + +`Tool`内に変数プールを導入し、ツールの実行中に生成された変数やファイルなどを保存します。これらの変数は、ツールの実行中に他のツールが使用することができます。 + +次に、`DallE3`と`Vectorizer.AI`を例に、変数プールの使用方法を紹介します。 + +- `DallE3`は画像生成ツールで、テキストに基づいて画像を生成できます。ここでは、`DallE3`にカフェのロゴを生成させます。 +- `Vectorizer.AI`はベクター画像変換ツールで、画像をベクター画像に変換できるため、画像を無限に拡大しても品質が損なわれません。ここでは、`DallE3`が生成したPNGアイコンをベクター画像に変換し、デザイナーが実際に使用できるようにします。 + +#### DallE3 +まず、DallE3を使用します。画像を作成した後、その画像を変数プールに保存します。コードは以下の通りです: + +```python +from typing import Any, Dict, List, Union +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.tool.builtin_tool import BuiltinTool + +from base64 import b64decode + +from openai import OpenAI + +class DallE3Tool(BuiltinTool): + def _invoke(self, + user_id: str, + tool_parameters: Dict[str, Any], + ) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]: + """ + invoke tools + """ + client = OpenAI( + api_key=self.runtime.credentials['openai_api_key'], + ) + + # prompt + prompt = tool_parameters.get('prompt', '') + if not prompt: + return self.create_text_message('Please input prompt') + + # call openapi dalle3 + response = client.images.generate( + prompt=prompt, model='dall-e-3', + size='1024x1024', n=1, style='vivid', quality='standard', + response_format='b64_json' + ) + + result = [] + for image in response.data: + # Save all images to the variable pool through the save_as parameter. The variable name is self.VARIABLE_KEY.IMAGE.value. If new images are generated later, they will overwrite the previous images. + result.append(self.create_blob_message(blob=b64decode(image.b64_json), + meta={ 'mime_type': 'image/png' }, + save_as=self.VARIABLE_KEY.IMAGE.value)) + + return result +``` + +ここでは画像の変数名として`self.VARIABLE_KEY.IMAGE.value`を使用していることに注意してください。開発者のツールが互いに連携できるよう、この`KEY`を定義しました。自由に使用することも、この`KEY`を使用しないこともできます。カスタムのKEYを渡すこともできます。 + +#### Vectorizer.AI +次に、Vectorizer.AIを使用して、DallE3が生成したPNGアイコンをベクター画像に変換します。ここで定義した関数を見てみましょう。コードは以下の通りです: + +```python +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter +from core.tools.errors import ToolProviderCredentialValidationError + +from typing import Any, Dict, List, Union +from httpx import post +from base64 import b64decode + +class VectorizerTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) + -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]: + """ + Tool invocation, the image variable name needs to be passed in from here, so that we can get the image from the variable pool + """ + + + def get_runtime_parameters(self) -> List[ToolParameter]: + """ + Override the tool parameter list, we can dynamically generate the parameter list based on the actual situation in the current variable pool, so that the LLM can generate the form based on the parameter list + """ + + + def is_tool_available(self) -> bool: + """ + Whether the current tool is available, if there is no image in the current variable pool, then we don't need to display this tool, just return False here + """ +``` + +次に、これら3つの関数を実装します: + +```python +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter +from core.tools.errors import ToolProviderCredentialValidationError + +from typing import Any, Dict, List, Union +from httpx import post +from base64 import b64decode + +class VectorizerTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) + -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]: + """ + invoke tools + """ + api_key_name = self.runtime.credentials.get('api_key_name', None) + api_key_value = self.runtime.credentials.get('api_key_value', None) + + if not api_key_name or not api_key_value: + raise ToolProviderCredentialValidationError('Please input api key name and value') + + # Get image_id, the definition of image_id can be found in get_runtime_parameters + image_id = tool_parameters.get('image_id', '') + if not image_id: + return self.create_text_message('Please input image id') + + # Get the image generated by DallE from the variable pool + image_binary = self.get_variable_file(self.VARIABLE_KEY.IMAGE) + if not image_binary: + return self.create_text_message('Image not found, please request user to generate image firstly.') + + # Generate vector image + response = post( + 'https://vectorizer.ai/api/v1/vectorize', + files={ 'image': image_binary }, + data={ 'mode': 'test' }, + auth=(api_key_name, api_key_value), + timeout=30 + ) + + if response.status_code != 200: + raise Exception(response.text) + + return [ + self.create_text_message('the vectorized svg is saved as an image.'), + self.create_blob_message(blob=response.content, + meta={'mime_type': 'image/svg+xml'}) + ] + + def get_runtime_parameters(self) -> List[ToolParameter]: + """ + override the runtime parameters + """ + # Here, we override the tool parameter list, define the image_id, and set its option list to all images in the current variable pool. The configuration here is consistent with the configuration in yaml. + return [ + ToolParameter.get_simple_instance( + name='image_id', + llm_description=f'the image id that you want to vectorize, \ + and the image id should be specified in \ + {[i.name for i in self.list_default_image_variables()]}', + type=ToolParameter.ToolParameterType.SELECT, + required=True, + options=[i.name for i in self.list_default_image_variables()] + ) + ] + + def is_tool_available(self) -> bool: + # Only when there are images in the variable pool, the LLM needs to use this tool + return len(self.list_default_image_variables()) > 0 +``` + +ここで注目すべきは、実際には`image_id`を使用していないことです。このツールを呼び出す際には、デフォルトの変数プールに必ず画像があると仮定し、直接`image_binary = self.get_variable_file(self.VARIABLE_KEY.IMAGE)`を使用して画像を取得しています。モデルの能力が弱い場合、開発者にもこの方法を推奨します。これにより、エラー許容度を効果的に向上させ、モデルが誤ったパラメータを渡すのを防ぐことができます。 \ No newline at end of file diff --git a/api/core/tools/docs/ja_JP/tool_scale_out.md b/api/core/tools/docs/ja_JP/tool_scale_out.md new file mode 100644 index 0000000000000..a721023d00bdd --- /dev/null +++ b/api/core/tools/docs/ja_JP/tool_scale_out.md @@ -0,0 +1,240 @@ +# ツールの迅速な統合 + +ここでは、GoogleSearchを例にツールを迅速に統合する方法を紹介します。 + +## 1. ツールプロバイダーのyamlを準備する + +### 概要 + +このyamlファイルには、プロバイダー名、アイコン、作者などの詳細情報が含まれ、フロントエンドでの柔軟な表示を可能にします。 + +### 例 + +`core/tools/provider/builtin`の下に`google`モジュール(フォルダ)を作成し、`google.yaml`を作成します。名前はモジュール名と一致している必要があります。 + +以降、このツールに関するすべての操作はこのモジュール内で行います。 + +```yaml +identity: # ツールプロバイダーの基本情報 + author: Dify # 作者 + name: google # 名前(一意、他のプロバイダーと重複不可) + label: # フロントエンド表示用のラベル + en_US: Google # 英語ラベル + zh_Hans: Google # 中国語ラベル + description: # フロントエンド表示用の説明 + en_US: Google # 英語説明 + zh_Hans: Google # 中国語説明 + icon: icon.svg # アイコン(現在のモジュールの_assetsフォルダに配置) + tags: # タグ(フロントエンド表示用) + - search +``` + +- `identity`フィールドは必須で、ツールプロバイダーの基本情報(作者、名前、ラベル、説明、アイコンなど)が含まれます。 + - アイコンは現在のモジュールの`_assets`フォルダに配置する必要があります。[こちら](../../provider/builtin/google/_assets/icon.svg)を参照してください。 + - タグはフロントエンドでの表示に使用され、ユーザーがこのツールプロバイダーを素早く見つけるのに役立ちます。現在サポートされているすべてのタグは以下の通りです: + ```python + class ToolLabelEnum(Enum): + SEARCH = 'search' + IMAGE = 'image' + VIDEOS = 'videos' + WEATHER = 'weather' + FINANCE = 'finance' + DESIGN = 'design' + TRAVEL = 'travel' + SOCIAL = 'social' + NEWS = 'news' + MEDICAL = 'medical' + PRODUCTIVITY = 'productivity' + EDUCATION = 'education' + BUSINESS = 'business' + ENTERTAINMENT = 'entertainment' + UTILITIES = 'utilities' + OTHER = 'other' + ``` + +## 2. プロバイダーの認証情報を準備する + +GoogleはSerpApiが提供するAPIを使用するサードパーティツールであり、SerpApiを使用するにはAPI Keyが必要です。つまり、このツールを使用するには認証情報が必要です。一方、`wikipedia`のようなツールでは認証情報フィールドを記入する必要はありません。[こちら](../../provider/builtin/wikipedia/wikipedia.yaml)を参照してください。 + +認証情報フィールドを設定すると、以下のようになります: + +```yaml +identity: + author: Dify + name: google + label: + en_US: Google + zh_Hans: Google + description: + en_US: Google + zh_Hans: Google + icon: icon.svg +credentials_for_provider: # 認証情報フィールド + serpapi_api_key: # 認証情報フィールド名 + type: secret-input # 認証情報フィールドタイプ + required: true # 必須かどうか + label: # 認証情報フィールドラベル + en_US: SerpApi API key # 英語ラベル + zh_Hans: SerpApi API key # 中国語ラベル + placeholder: # 認証情報フィールドプレースホルダー + en_US: Please input your SerpApi API key # 英語プレースホルダー + zh_Hans: 请输入你的 SerpApi API key # 中国語プレースホルダー + help: # 認証情報フィールドヘルプテキスト + en_US: Get your SerpApi API key from SerpApi # 英語ヘルプテキスト + zh_Hans: 从 SerpApi 获取您的 SerpApi API key # 中国語ヘルプテキスト + url: https://serpapi.com/manage-api-key # 認証情報フィールドヘルプリンク +``` + +- `type`:認証情報フィールドタイプ。現在、`secret-input`、`text-input`、`select`の3種類をサポートしており、それぞれパスワード入力ボックス、テキスト入力ボックス、ドロップダウンボックスに対応します。`secret-input`の場合、フロントエンドで入力内容が隠され、バックエンドで入力内容が暗号化されます。 + +## 3. ツールのyamlを準備する + +1つのプロバイダーの下に複数のツールを持つことができ、各ツールにはyamlファイルが必要です。このファイルにはツールの基本情報、パラメータ、出力などが含まれます。 + +引き続きGoogleSearchを例に、`google`モジュールの下に`tools`モジュールを作成し、`tools/google_search.yaml`を作成します。内容は以下の通りです: + +```yaml +identity: # ツールの基本情報 + name: google_search # ツール名(一意、他のツールと重複不可) + author: Dify # 作者 + label: # フロントエンド表示用のラベル + en_US: GoogleSearch # 英語ラベル + zh_Hans: 谷歌搜索 # 中国語ラベル +description: # フロントエンド表示用の説明 + human: # フロントエンド表示用の紹介(多言語対応) + en_US: A tool for performing a Google SERP search and extracting snippets and webpages. Input should be a search query. + zh_Hans: 一个用于执行 Google SERP 搜索并提取片段和网页的工具。输入应该是一个搜索查询。 + llm: A tool for performing a Google SERP search and extracting snippets and webpages. Input should be a search query. # LLMに渡す紹介文。LLMがこのツールをより理解できるよう、できるだけ詳細な情報を記述することをお勧めします。 +parameters: # パラメータリスト + - name: query # パラメータ名 + type: string # パラメータタイプ + required: true # 必須かどうか + label: # パラメータラベル + en_US: Query string # 英語ラベル + zh_Hans: 查询语句 # 中国語ラベル + human_description: # フロントエンド表示用の紹介(多言語対応) + en_US: used for searching + zh_Hans: 用于搜索网页内容 + llm_description: key words for searching # LLMに渡す紹介文。LLMがこのパラメータをより理解できるよう、できるだけ詳細な情報を記述することをお勧めします。 + form: llm # フォームタイプ。llmはこのパラメータがAgentによって推論される必要があることを意味し、フロントエンドではこのパラメータは表示されません。 + - name: result_type + type: select # パラメータタイプ + required: true + options: # ドロップダウンボックスのオプション + - value: text + label: + en_US: text + zh_Hans: 文本 + - value: link + label: + en_US: link + zh_Hans: 链接 + default: link + label: + en_US: Result type + zh_Hans: 结果类型 + human_description: + en_US: used for selecting the result type, text or link + zh_Hans: 用于选择结果类型,使用文本还是链接进行展示 + form: form # フォームタイプ。formはこのパラメータが対話開始前にフロントエンドでユーザーによって入力される必要があることを意味します。 +``` + +- `identity`フィールドは必須で、ツールの基本情報(名前、作者、ラベル、説明など)が含まれます。 +- `parameters` パラメータリスト + - `name`(必須)パラメータ名。一意で、他のパラメータと重複しないようにしてください。 + - `type`(必須)パラメータタイプ。現在、`string`、`number`、`boolean`、`select`、`secret-input`の5種類をサポートしており、それぞれ文字列、数値、ブール値、ドロップダウンボックス、暗号化入力ボックスに対応します。機密情報には`secret-input`タイプの使用をお勧めします。 + - `label`(必須)パラメータラベル。フロントエンド表示用です。 + - `form`(必須)フォームタイプ。現在、`llm`と`form`の2種類をサポートしています。 + - エージェントアプリケーションでは、`llm`はこのパラメータがLLM自身によって推論されることを示し、`form`はこのツールを使用するために事前に設定できるパラメータであることを示します。 + - ワークフローアプリケーションでは、`llm`と`form`の両方がフロントエンドで入力する必要がありますが、`llm`のパラメータはツールノードの入力変数として使用されます。 + - `required` パラメータが必須かどうかを示します。 + - `llm`モードでは、パラメータが必須の場合、Agentはこのパラメータを推論する必要があります。 + - `form`モードでは、パラメータが必須の場合、ユーザーは対話開始前にフロントエンドでこのパラメータを入力する必要があります。 + - `options` パラメータオプション + - `llm`モードでは、DifyはすべてのオプションをLLMに渡し、LLMはこれらのオプションに基づいて推論できます。 + - `form`モードで、`type`が`select`の場合、フロントエンドはこれらのオプションを表示します。 + - `default` デフォルト値 + - `min` 最小値。パラメータタイプが`number`の場合に設定できます。 + - `max` 最大値。パラメータタイプが`number`の場合に設定できます。 + - `human_description` フロントエンド表示用の紹介。多言語対応です。 + - `placeholder` 入力ボックスのプロンプトテキスト。フォームタイプが`form`で、パラメータタイプが`string`、`number`、`secret-input`の場合に設定できます。多言語対応です。 + - `llm_description` LLMに渡す紹介文。LLMがこのパラメータをより理解できるよう、できるだけ詳細な情報を記述することをお勧めします。 + +## 4. ツールコードを準備する + +ツールの設定が完了したら、ツールのロジックを実装するコードを作成します。 + +`google/tools`モジュールの下に`google_search.py`を作成し、内容は以下の通りです: + +```python +from core.tools.tool.builtin_tool import BuiltinTool +from core.tools.entities.tool_entities import ToolInvokeMessage + +from typing import Any, Dict, List, Union + +class GoogleSearchTool(BuiltinTool): + def _invoke(self, + user_id: str, + tool_parameters: Dict[str, Any], + ) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]: + """ + ツールを呼び出す + """ + query = tool_parameters['query'] + result_type = tool_parameters['result_type'] + api_key = self.runtime.credentials['serpapi_api_key'] + result = SerpAPI(api_key).run(query, result_type=result_type) + + if result_type == 'text': + return self.create_text_message(text=result) + return self.create_link_message(link=result) +``` + +### パラメータ +ツールの全体的なロジックは`_invoke`メソッドにあります。このメソッドは2つのパラメータ(`user_id`とtool_parameters`)を受け取り、それぞれユーザーIDとツールパラメータを表します。 + +### 戻り値 +ツールの戻り値として、1つのメッセージまたは複数のメッセージを選択できます。ここでは1つのメッセージを返しています。`create_text_message`と`create_link_message`を使用して、テキストメッセージまたはリンクメッセージを作成できます。複数のメッセージを返す場合は、リストを構築できます(例:`[self.create_text_message('msg1'), self.create_text_message('msg2')]`)。 + +## 5. プロバイダーコードを準備する + +最後に、プロバイダーモジュールの下にプロバイダークラスを作成し、プロバイダーの認証情報検証ロジックを実装する必要があります。認証情報の検証が失敗した場合、`ToolProviderCredentialValidationError`例外が発生します。 + +`google`モジュールの下に`google.py`を作成し、内容は以下の通りです: + +```python +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController +from core.tools.errors import ToolProviderCredentialValidationError + +from core.tools.provider.builtin.google.tools.google_search import GoogleSearchTool + +from typing import Any, Dict + +class GoogleProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: Dict[str, Any]) -> None: + try: + # 1. ここでGoogleSearchTool()を使ってGoogleSearchToolをインスタンス化する必要があります。これによりGoogleSearchToolのyaml設定が自動的に読み込まれますが、この時点では認証情報は含まれていません + # 2. 次に、fork_tool_runtimeメソッドを使用して、現在の認証情報をGoogleSearchToolに渡す必要があります + # 3. 最後に、invokeを呼び出します。パラメータはGoogleSearchToolのyamlで設定されたパラメータルールに従って渡す必要があります + GoogleSearchTool().fork_tool_runtime( + meta={ + "credentials": credentials, + } + ).invoke( + user_id='', + tool_parameters={ + "query": "test", + "result_type": "link" + }, + ) + except Exception as e: + raise ToolProviderCredentialValidationError(str(e)) +``` + +## 完了 + +以上のステップが完了すると、このツールをフロントエンドで確認し、Agentで使用することができるようになります。 + +もちろん、google_searchには認証情報が必要なため、使用する前にフロントエンドで認証情報を入力する必要があります。 + +![Alt text](../images/index/image-2.png) \ No newline at end of file diff --git a/api/core/tools/docs/zh_Hans/tool_scale_out.md b/api/core/tools/docs/zh_Hans/tool_scale_out.md index 06a8d9a4f9a9d..ec61e4677bae7 100644 --- a/api/core/tools/docs/zh_Hans/tool_scale_out.md +++ b/api/core/tools/docs/zh_Hans/tool_scale_out.md @@ -234,4 +234,4 @@ class GoogleProvider(BuiltinToolProviderController): 当然,因为google_search需要一个凭据,在使用之前,还需要在前端配置它的凭据。 -![Alt text](images/index/image-2.png) +![Alt text](../images/index/image-2.png) From c8b9bdebfe88e26a13abf36330420b77c3106b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Sun, 22 Sep 2024 10:08:35 +0800 Subject: [PATCH 014/235] feat:use xinference tts stream mode (#8616) --- .../model_providers/xinference/llm/llm.py | 3 +-- .../model_providers/xinference/tts/tts.py | 12 ++++++------ api/poetry.lock | 8 ++++---- api/pyproject.toml | 2 +- .../model_runtime/__mock/xinference.py | 5 +---- 5 files changed, 13 insertions(+), 17 deletions(-) diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index 4fadda5df5d85..286640079b02a 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -19,7 +19,6 @@ from openai.types.completion import Completion from xinference_client.client.restful.restful_client import ( Client, - RESTfulChatglmCppChatModelHandle, RESTfulChatModelHandle, RESTfulGenerateModelHandle, ) @@ -491,7 +490,7 @@ def _generate( if tools and len(tools) > 0: generate_config["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools] vision = credentials.get("support_vision", False) - if isinstance(xinference_model, RESTfulChatModelHandle | RESTfulChatglmCppChatModelHandle): + if isinstance(xinference_model, RESTfulChatModelHandle): resp = client.chat.completions.create( model=credentials["model_uid"], messages=[self._convert_prompt_message_to_dict(message) for message in prompt_messages], diff --git a/api/core/model_runtime/model_providers/xinference/tts/tts.py b/api/core/model_runtime/model_providers/xinference/tts/tts.py index 10538b5788a3c..81dbe397d2f10 100644 --- a/api/core/model_runtime/model_providers/xinference/tts/tts.py +++ b/api/core/model_runtime/model_providers/xinference/tts/tts.py @@ -208,21 +208,21 @@ def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences))) futures = [ executor.submit( - handle.speech, input=sentences[i], voice=voice, response_format="mp3", speed=1.0, stream=False + handle.speech, input=sentences[i], voice=voice, response_format="mp3", speed=1.0, stream=True ) for i in range(len(sentences)) ] for future in futures: response = future.result() - for i in range(0, len(response), 1024): - yield response[i : i + 1024] + for chunk in response: + yield chunk else: response = handle.speech( - input=content_text.strip(), voice=voice, response_format="mp3", speed=1.0, stream=False + input=content_text.strip(), voice=voice, response_format="mp3", speed=1.0, stream=True ) - for i in range(0, len(response), 1024): - yield response[i : i + 1024] + for chunk in response: + yield chunk except Exception as ex: raise InvokeBadRequestError(str(ex)) diff --git a/api/poetry.lock b/api/poetry.lock index 8d4d680031a7c..e532394a24e74 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -10014,13 +10014,13 @@ h11 = ">=0.9.0,<1" [[package]] name = "xinference-client" -version = "0.13.3" +version = "0.15.2" description = "Client for Xinference" optional = false python-versions = "*" files = [ - {file = "xinference-client-0.13.3.tar.gz", hash = "sha256:822b722100affdff049c27760be7d62ac92de58c87a40d3361066df446ba648f"}, - {file = "xinference_client-0.13.3-py3-none-any.whl", hash = "sha256:f0eff3858b1ebcef2129726f82b09259c177e11db466a7ca23def3d4849c419f"}, + {file = "xinference-client-0.15.2.tar.gz", hash = "sha256:5c2259bb133148d1cc9bd2b8ec6eb8b5bbeba7f11d6252959f4e6cd79baa53ed"}, + {file = "xinference_client-0.15.2-py3-none-any.whl", hash = "sha256:b6275adab695e75e75a33e21e0ad212488fc2d5a4d0f693d544c0e78469abbe3"}, ] [package.dependencies] @@ -10422,4 +10422,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "18924ae12a00bde4438a46168bc167ed69613ab1ab0c387f193cd47ac24379b2" +content-hash = "85aa4be7defee8fe6622cf95ba03e81895121502ebf6d666d6ce376ff019fac7" diff --git a/api/pyproject.toml b/api/pyproject.toml index 829c39a12bf91..1f483fc49f032 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -203,7 +203,7 @@ transformers = "~4.35.0" unstructured = { version = "~0.10.27", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] } websocket-client = "~1.7.0" werkzeug = "~3.0.1" -xinference-client = "0.13.3" +xinference-client = "0.15.2" yarl = "~1.9.4" zhipuai = "1.0.7" # Before adding new dependency, consider place it in alphabet order (a-z) and suitable group. diff --git a/api/tests/integration_tests/model_runtime/__mock/xinference.py b/api/tests/integration_tests/model_runtime/__mock/xinference.py index 299523f4f5b7c..8deb50635f3d5 100644 --- a/api/tests/integration_tests/model_runtime/__mock/xinference.py +++ b/api/tests/integration_tests/model_runtime/__mock/xinference.py @@ -9,7 +9,6 @@ from requests.sessions import Session from xinference_client.client.restful.restful_client import ( Client, - RESTfulChatglmCppChatModelHandle, RESTfulChatModelHandle, RESTfulEmbeddingModelHandle, RESTfulGenerateModelHandle, @@ -19,9 +18,7 @@ class MockXinferenceClass: - def get_chat_model( - self: Client, model_uid: str - ) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]: + def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulGenerateModelHandle, RESTfulChatModelHandle]: if not re.match(r"https?:\/\/[^\s\/$.?#].[^\s]*$", self.base_url): raise RuntimeError("404 Not Found") From 0665268578599ca340ff134eec041f17364ef937 Mon Sep 17 00:00:00 2001 From: ice yao Date: Sun, 22 Sep 2024 10:13:00 +0800 Subject: [PATCH 015/235] Add Fireworks AI as new model provider (#8428) --- .../model_providers/_position.yaml | 1 + .../model_providers/fireworks/__init__.py | 0 .../fireworks/_assets/icon_l_en.svg | 3 + .../fireworks/_assets/icon_s_en.svg | 5 + .../model_providers/fireworks/_common.py | 52 ++ .../model_providers/fireworks/fireworks.py | 27 + .../model_providers/fireworks/fireworks.yaml | 29 + .../model_providers/fireworks/llm/__init__.py | 0 .../fireworks/llm/_position.yaml | 16 + .../fireworks/llm/firefunction-v1.yaml | 46 ++ .../fireworks/llm/firefunction-v2.yaml | 46 ++ .../fireworks/llm/gemma2-9b-it.yaml | 45 ++ .../llm/llama-v3-70b-instruct-hf.yaml | 46 ++ .../fireworks/llm/llama-v3-70b-instruct.yaml | 46 ++ .../llm/llama-v3-8b-instruct-hf.yaml | 46 ++ .../fireworks/llm/llama-v3-8b-instruct.yaml | 46 ++ .../llm/llama-v3p1-405b-instruct.yaml | 46 ++ .../llm/llama-v3p1-70b-instruct.yaml | 46 ++ .../fireworks/llm/llama-v3p1-8b-instruct.yaml | 46 ++ .../model_providers/fireworks/llm/llm.py | 610 ++++++++++++++++++ .../fireworks/llm/mixtral-8x22b-instruct.yaml | 46 ++ .../llm/mixtral-8x7b-instruct-hf.yaml | 46 ++ .../fireworks/llm/mixtral-8x7b-instruct.yaml | 46 ++ .../fireworks/llm/mythomax-l2-13b.yaml | 46 ++ .../llm/phi-3-vision-128k-instruct.yaml | 46 ++ .../fireworks/llm/yi-large.yaml | 45 ++ api/pyproject.toml | 1 + .../model_runtime/fireworks/__init__.py | 0 .../model_runtime/fireworks/test_llm.py | 186 ++++++ .../model_runtime/fireworks/test_provider.py | 17 + dev/pytest/pytest_model_runtime.sh | 4 +- 31 files changed, 1683 insertions(+), 2 deletions(-) create mode 100644 api/core/model_runtime/model_providers/fireworks/__init__.py create mode 100644 api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg create mode 100644 api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg create mode 100644 api/core/model_runtime/model_providers/fireworks/_common.py create mode 100644 api/core/model_runtime/model_providers/fireworks/fireworks.py create mode 100644 api/core/model_runtime/model_providers/fireworks/fireworks.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/__init__.py create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/_position.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llm.py create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml create mode 100644 api/tests/integration_tests/model_runtime/fireworks/__init__.py create mode 100644 api/tests/integration_tests/model_runtime/fireworks/test_llm.py create mode 100644 api/tests/integration_tests/model_runtime/fireworks/test_provider.py diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml index d10314ba039e6..1f5f64019a166 100644 --- a/api/core/model_runtime/model_providers/_position.yaml +++ b/api/core/model_runtime/model_providers/_position.yaml @@ -37,3 +37,4 @@ - siliconflow - perfxcloud - zhinao +- fireworks diff --git a/api/core/model_runtime/model_providers/fireworks/__init__.py b/api/core/model_runtime/model_providers/fireworks/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg new file mode 100644 index 0000000000000..582605cc422cc --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg new file mode 100644 index 0000000000000..86eeba66f9290 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/api/core/model_runtime/model_providers/fireworks/_common.py b/api/core/model_runtime/model_providers/fireworks/_common.py new file mode 100644 index 0000000000000..378ced3a4019b --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/_common.py @@ -0,0 +1,52 @@ +from collections.abc import Mapping + +import openai + +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) + + +class _CommonFireworks: + def _to_credential_kwargs(self, credentials: Mapping) -> dict: + """ + Transform credentials to kwargs for model instance + + :param credentials: + :return: + """ + credentials_kwargs = { + "api_key": credentials["fireworks_api_key"], + "base_url": "https://api.fireworks.ai/inference/v1", + "max_retries": 1, + } + + return credentials_kwargs + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + The key is the error type thrown to the caller + The value is the error type thrown by the model, + which needs to be converted into a unified error type for the caller. + + :return: Invoke error mapping + """ + return { + InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], + InvokeServerUnavailableError: [openai.InternalServerError], + InvokeRateLimitError: [openai.RateLimitError], + InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], + InvokeBadRequestError: [ + openai.BadRequestError, + openai.NotFoundError, + openai.UnprocessableEntityError, + openai.APIError, + ], + } diff --git a/api/core/model_runtime/model_providers/fireworks/fireworks.py b/api/core/model_runtime/model_providers/fireworks/fireworks.py new file mode 100644 index 0000000000000..15f25badab994 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/fireworks.py @@ -0,0 +1,27 @@ +import logging + +from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.model_provider import ModelProvider + +logger = logging.getLogger(__name__) + + +class FireworksProvider(ModelProvider): + def validate_provider_credentials(self, credentials: dict) -> None: + """ + Validate provider credentials + if validate failed, raise exception + + :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. + """ + try: + model_instance = self.get_model_instance(ModelType.LLM) + model_instance.validate_credentials( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", credentials=credentials + ) + except CredentialsValidateFailedError as ex: + raise ex + except Exception as ex: + logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") + raise ex diff --git a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml new file mode 100644 index 0000000000000..f886fa23b5bd8 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml @@ -0,0 +1,29 @@ +provider: fireworks +label: + zh_Hans: Fireworks AI + en_US: Fireworks AI +icon_small: + en_US: icon_s_en.svg +icon_large: + en_US: icon_l_en.svg +background: "#FCFDFF" +help: + title: + en_US: Get your API Key from Fireworks AI + zh_Hans: 从 Fireworks AI 获取 API Key + url: + en_US: https://fireworks.ai/account/api-keys +supported_model_types: + - llm +configurate_methods: + - predefined-model +provider_credential_schema: + credential_form_schemas: + - variable: fireworks_api_key + label: + en_US: API Key + type: secret-input + required: true + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/fireworks/llm/__init__.py b/api/core/model_runtime/model_providers/fireworks/llm/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/api/core/model_runtime/model_providers/fireworks/llm/_position.yaml b/api/core/model_runtime/model_providers/fireworks/llm/_position.yaml new file mode 100644 index 0000000000000..9f7c1af68cef7 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/_position.yaml @@ -0,0 +1,16 @@ +- llama-v3p1-405b-instruct +- llama-v3p1-70b-instruct +- llama-v3p1-8b-instruct +- llama-v3-70b-instruct +- mixtral-8x22b-instruct +- mixtral-8x7b-instruct +- firefunction-v2 +- firefunction-v1 +- gemma2-9b-it +- llama-v3-70b-instruct-hf +- llama-v3-8b-instruct +- llama-v3-8b-instruct-hf +- mixtral-8x7b-instruct-hf +- mythomax-l2-13b +- phi-3-vision-128k-instruct +- yi-large diff --git a/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml b/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml new file mode 100644 index 0000000000000..f6bac12832d64 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/firefunction-v1 +label: + zh_Hans: Firefunction V1 + en_US: Firefunction V1 +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.5' + output: '0.5' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml b/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml new file mode 100644 index 0000000000000..2979cb46d572a --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/firefunction-v2 +label: + zh_Hans: Firefunction V2 + en_US: Firefunction V2 +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.9' + output: '0.9' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml b/api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml new file mode 100644 index 0000000000000..ee41a7e2fdc3d --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml @@ -0,0 +1,45 @@ +model: accounts/fireworks/models/gemma2-9b-it +label: + zh_Hans: Gemma2 9B Instruct + en_US: Gemma2 9B Instruct +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml new file mode 100644 index 0000000000000..2ae89b88165d1 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3-70b-instruct-hf +label: + zh_Hans: Llama3 70B Instruct(HF version) + en_US: Llama3 70B Instruct(HF version) +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.9' + output: '0.9' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml new file mode 100644 index 0000000000000..7c24b08ca5cca --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3-70b-instruct +label: + zh_Hans: Llama3 70B Instruct + en_US: Llama3 70B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.9' + output: '0.9' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml new file mode 100644 index 0000000000000..83507ef3e5276 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3-8b-instruct-hf +label: + zh_Hans: Llama3 8B Instruct(HF version) + en_US: Llama3 8B Instruct(HF version) +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml new file mode 100644 index 0000000000000..d8ac9537b80e7 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3-8b-instruct +label: + zh_Hans: Llama3 8B Instruct + en_US: Llama3 8B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml new file mode 100644 index 0000000000000..c4ddb3e9246d4 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p1-405b-instruct +label: + zh_Hans: Llama3.1 405B Instruct + en_US: Llama3.1 405B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '3' + output: '3' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml new file mode 100644 index 0000000000000..62f84f87fa560 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p1-70b-instruct +label: + zh_Hans: Llama3.1 70B Instruct + en_US: Llama3.1 70B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml new file mode 100644 index 0000000000000..9bb99c91b65b0 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p1-8b-instruct +label: + zh_Hans: Llama3.1 8B Instruct + en_US: Llama3.1 8B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llm.py b/api/core/model_runtime/model_providers/fireworks/llm/llm.py new file mode 100644 index 0000000000000..2dcf1adba6451 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llm.py @@ -0,0 +1,610 @@ +import logging +from collections.abc import Generator +from typing import Optional, Union, cast + +from openai import OpenAI, Stream +from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall +from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall, ChoiceDeltaToolCall +from openai.types.chat.chat_completion_message import FunctionCall + +from core.model_runtime.callbacks.base_callback import Callback +from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + ImagePromptMessageContent, + PromptMessage, + PromptMessageContentType, + PromptMessageTool, + SystemPromptMessage, + TextPromptMessageContent, + ToolPromptMessage, + UserPromptMessage, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.model_runtime.model_providers.fireworks._common import _CommonFireworks + +logger = logging.getLogger(__name__) + +FIREWORKS_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. +The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure +if you are not sure about the structure. + + +{{instructions}} + +""" # noqa: E501 + + +class FireworksLargeLanguageModel(_CommonFireworks, LargeLanguageModel): + """ + Model class for Fireworks large language model. + """ + + def _invoke( + self, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, + stop: Optional[list[str]] = None, + stream: bool = True, + user: Optional[str] = None, + ) -> Union[LLMResult, Generator]: + """ + Invoke large language model + + :param model: model name + :param credentials: model credentials + :param prompt_messages: prompt messages + :param model_parameters: model parameters + :param tools: tools for tool calling + :param stop: stop words + :param stream: is stream response + :param user: unique user id + :return: full response or stream response chunk generator result + """ + + return self._chat_generate( + model=model, + credentials=credentials, + prompt_messages=prompt_messages, + model_parameters=model_parameters, + tools=tools, + stop=stop, + stream=stream, + user=user, + ) + + def _code_block_mode_wrapper( + self, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, + stop: Optional[list[str]] = None, + stream: bool = True, + user: Optional[str] = None, + callbacks: Optional[list[Callback]] = None, + ) -> Union[LLMResult, Generator]: + """ + Code block mode wrapper for invoking large language model + """ + if "response_format" in model_parameters and model_parameters["response_format"] in {"JSON", "XML"}: + stop = stop or [] + self._transform_chat_json_prompts( + model=model, + credentials=credentials, + prompt_messages=prompt_messages, + model_parameters=model_parameters, + tools=tools, + stop=stop, + stream=stream, + user=user, + response_format=model_parameters["response_format"], + ) + model_parameters.pop("response_format") + + return self._invoke( + model=model, + credentials=credentials, + prompt_messages=prompt_messages, + model_parameters=model_parameters, + tools=tools, + stop=stop, + stream=stream, + user=user, + ) + + def _transform_chat_json_prompts( + self, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + model_parameters: dict, + tools: list[PromptMessageTool] | None = None, + stop: list[str] | None = None, + stream: bool = True, + user: str | None = None, + response_format: str = "JSON", + ) -> None: + """ + Transform json prompts + """ + if stop is None: + stop = [] + if "```\n" not in stop: + stop.append("```\n") + if "\n```" not in stop: + stop.append("\n```") + + if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): + prompt_messages[0] = SystemPromptMessage( + content=FIREWORKS_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( + "{{block}}", response_format + ) + ) + prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}\n")) + else: + prompt_messages.insert( + 0, + SystemPromptMessage( + content=FIREWORKS_BLOCK_MODE_PROMPT.replace( + "{{instructions}}", f"Please output a valid {response_format} object." + ).replace("{{block}}", response_format) + ), + ) + prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) + + def get_num_tokens( + self, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + tools: Optional[list[PromptMessageTool]] = None, + ) -> int: + """ + Get number of tokens for given prompt messages + + :param model: model name + :param credentials: model credentials + :param prompt_messages: prompt messages + :param tools: tools for tool calling + :return: + """ + return self._num_tokens_from_messages(model, prompt_messages, tools) + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + credentials_kwargs = self._to_credential_kwargs(credentials) + client = OpenAI(**credentials_kwargs) + + client.chat.completions.create( + messages=[{"role": "user", "content": "ping"}], model=model, temperature=0, max_tokens=10, stream=False + ) + except Exception as e: + raise CredentialsValidateFailedError(str(e)) + + def _chat_generate( + self, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, + stop: Optional[list[str]] = None, + stream: bool = True, + user: Optional[str] = None, + ) -> Union[LLMResult, Generator]: + credentials_kwargs = self._to_credential_kwargs(credentials) + client = OpenAI(**credentials_kwargs) + + extra_model_kwargs = {} + + if tools: + extra_model_kwargs["functions"] = [ + {"name": tool.name, "description": tool.description, "parameters": tool.parameters} for tool in tools + ] + + if stop: + extra_model_kwargs["stop"] = stop + + if user: + extra_model_kwargs["user"] = user + + # chat model + response = client.chat.completions.create( + messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], + model=model, + stream=stream, + **model_parameters, + **extra_model_kwargs, + ) + + if stream: + return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) + return self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) + + def _handle_chat_generate_response( + self, + model: str, + credentials: dict, + response: ChatCompletion, + prompt_messages: list[PromptMessage], + tools: Optional[list[PromptMessageTool]] = None, + ) -> LLMResult: + """ + Handle llm chat response + + :param model: model name + :param credentials: credentials + :param response: response + :param prompt_messages: prompt messages + :param tools: tools for tool calling + :return: llm response + """ + assistant_message = response.choices[0].message + # assistant_message_tool_calls = assistant_message.tool_calls + assistant_message_function_call = assistant_message.function_call + + # extract tool calls from response + # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) + function_call = self._extract_response_function_call(assistant_message_function_call) + tool_calls = [function_call] if function_call else [] + + # transform assistant message to prompt message + assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) + + # calculate num tokens + if response.usage: + # transform usage + prompt_tokens = response.usage.prompt_tokens + completion_tokens = response.usage.completion_tokens + else: + # calculate num tokens + prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) + completion_tokens = self._num_tokens_from_messages(model, [assistant_prompt_message]) + + # transform usage + usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) + + # transform response + response = LLMResult( + model=response.model, + prompt_messages=prompt_messages, + message=assistant_prompt_message, + usage=usage, + system_fingerprint=response.system_fingerprint, + ) + + return response + + def _handle_chat_generate_stream_response( + self, + model: str, + credentials: dict, + response: Stream[ChatCompletionChunk], + prompt_messages: list[PromptMessage], + tools: Optional[list[PromptMessageTool]] = None, + ) -> Generator: + """ + Handle llm chat stream response + + :param model: model name + :param response: response + :param prompt_messages: prompt messages + :param tools: tools for tool calling + :return: llm response chunk generator + """ + full_assistant_content = "" + delta_assistant_message_function_call_storage: Optional[ChoiceDeltaFunctionCall] = None + prompt_tokens = 0 + completion_tokens = 0 + final_tool_calls = [] + final_chunk = LLMResultChunk( + model=model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content=""), + ), + ) + + for chunk in response: + if len(chunk.choices) == 0: + if chunk.usage: + # calculate num tokens + prompt_tokens = chunk.usage.prompt_tokens + completion_tokens = chunk.usage.completion_tokens + continue + + delta = chunk.choices[0] + has_finish_reason = delta.finish_reason is not None + + if ( + not has_finish_reason + and (delta.delta.content is None or delta.delta.content == "") + and delta.delta.function_call is None + ): + continue + + # assistant_message_tool_calls = delta.delta.tool_calls + assistant_message_function_call = delta.delta.function_call + + # extract tool calls from response + if delta_assistant_message_function_call_storage is not None: + # handle process of stream function call + if assistant_message_function_call: + # message has not ended ever + delta_assistant_message_function_call_storage.arguments += assistant_message_function_call.arguments + continue + else: + # message has ended + assistant_message_function_call = delta_assistant_message_function_call_storage + delta_assistant_message_function_call_storage = None + else: + if assistant_message_function_call: + # start of stream function call + delta_assistant_message_function_call_storage = assistant_message_function_call + if delta_assistant_message_function_call_storage.arguments is None: + delta_assistant_message_function_call_storage.arguments = "" + if not has_finish_reason: + continue + + # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) + function_call = self._extract_response_function_call(assistant_message_function_call) + tool_calls = [function_call] if function_call else [] + if tool_calls: + final_tool_calls.extend(tool_calls) + + # transform assistant message to prompt message + assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) + + full_assistant_content += delta.delta.content or "" + + if has_finish_reason: + final_chunk = LLMResultChunk( + model=chunk.model, + prompt_messages=prompt_messages, + system_fingerprint=chunk.system_fingerprint, + delta=LLMResultChunkDelta( + index=delta.index, + message=assistant_prompt_message, + finish_reason=delta.finish_reason, + ), + ) + else: + yield LLMResultChunk( + model=chunk.model, + prompt_messages=prompt_messages, + system_fingerprint=chunk.system_fingerprint, + delta=LLMResultChunkDelta( + index=delta.index, + message=assistant_prompt_message, + ), + ) + + if not prompt_tokens: + prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) + + if not completion_tokens: + full_assistant_prompt_message = AssistantPromptMessage( + content=full_assistant_content, tool_calls=final_tool_calls + ) + completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message]) + + # transform usage + usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) + final_chunk.delta.usage = usage + + yield final_chunk + + def _extract_response_tool_calls( + self, response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall] + ) -> list[AssistantPromptMessage.ToolCall]: + """ + Extract tool calls from response + + :param response_tool_calls: response tool calls + :return: list of tool calls + """ + tool_calls = [] + if response_tool_calls: + for response_tool_call in response_tool_calls: + function = AssistantPromptMessage.ToolCall.ToolCallFunction( + name=response_tool_call.function.name, arguments=response_tool_call.function.arguments + ) + + tool_call = AssistantPromptMessage.ToolCall( + id=response_tool_call.id, type=response_tool_call.type, function=function + ) + tool_calls.append(tool_call) + + return tool_calls + + def _extract_response_function_call( + self, response_function_call: FunctionCall | ChoiceDeltaFunctionCall + ) -> AssistantPromptMessage.ToolCall: + """ + Extract function call from response + + :param response_function_call: response function call + :return: tool call + """ + tool_call = None + if response_function_call: + function = AssistantPromptMessage.ToolCall.ToolCallFunction( + name=response_function_call.name, arguments=response_function_call.arguments + ) + + tool_call = AssistantPromptMessage.ToolCall( + id=response_function_call.name, type="function", function=function + ) + + return tool_call + + def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: + """ + Convert PromptMessage to dict for Fireworks API + """ + if isinstance(message, UserPromptMessage): + message = cast(UserPromptMessage, message) + if isinstance(message.content, str): + message_dict = {"role": "user", "content": message.content} + else: + sub_messages = [] + for message_content in message.content: + if message_content.type == PromptMessageContentType.TEXT: + message_content = cast(TextPromptMessageContent, message_content) + sub_message_dict = {"type": "text", "text": message_content.data} + sub_messages.append(sub_message_dict) + elif message_content.type == PromptMessageContentType.IMAGE: + message_content = cast(ImagePromptMessageContent, message_content) + sub_message_dict = { + "type": "image_url", + "image_url": {"url": message_content.data, "detail": message_content.detail.value}, + } + sub_messages.append(sub_message_dict) + + message_dict = {"role": "user", "content": sub_messages} + elif isinstance(message, AssistantPromptMessage): + message = cast(AssistantPromptMessage, message) + message_dict = {"role": "assistant", "content": message.content} + if message.tool_calls: + # message_dict["tool_calls"] = [tool_call.dict() for tool_call in + # message.tool_calls] + function_call = message.tool_calls[0] + message_dict["function_call"] = { + "name": function_call.function.name, + "arguments": function_call.function.arguments, + } + elif isinstance(message, SystemPromptMessage): + message = cast(SystemPromptMessage, message) + message_dict = {"role": "system", "content": message.content} + elif isinstance(message, ToolPromptMessage): + message = cast(ToolPromptMessage, message) + # message_dict = { + # "role": "tool", + # "content": message.content, + # "tool_call_id": message.tool_call_id + # } + message_dict = {"role": "function", "content": message.content, "name": message.tool_call_id} + else: + raise ValueError(f"Got unknown type {message}") + + if message.name: + message_dict["name"] = message.name + + return message_dict + + def _num_tokens_from_messages( + self, + model: str, + messages: list[PromptMessage], + tools: Optional[list[PromptMessageTool]] = None, + credentials: dict = None, + ) -> int: + """ + Approximate num tokens with GPT2 tokenizer. + """ + + tokens_per_message = 3 + tokens_per_name = 1 + + num_tokens = 0 + messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] + for message in messages_dict: + num_tokens += tokens_per_message + for key, value in message.items(): + # Cast str(value) in case the message value is not a string + # This occurs with function messages + # TODO: The current token calculation method for the image type is not implemented, + # which need to download the image and then get the resolution for calculation, + # and will increase the request delay + if isinstance(value, list): + text = "" + for item in value: + if isinstance(item, dict) and item["type"] == "text": + text += item["text"] + + value = text + + if key == "tool_calls": + for tool_call in value: + for t_key, t_value in tool_call.items(): + num_tokens += self._get_num_tokens_by_gpt2(t_key) + if t_key == "function": + for f_key, f_value in t_value.items(): + num_tokens += self._get_num_tokens_by_gpt2(f_key) + num_tokens += self._get_num_tokens_by_gpt2(f_value) + else: + num_tokens += self._get_num_tokens_by_gpt2(t_key) + num_tokens += self._get_num_tokens_by_gpt2(t_value) + else: + num_tokens += self._get_num_tokens_by_gpt2(str(value)) + + if key == "name": + num_tokens += tokens_per_name + + # every reply is primed with assistant + num_tokens += 3 + + if tools: + num_tokens += self._num_tokens_for_tools(tools) + + return num_tokens + + def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: + """ + Calculate num tokens for tool calling with tiktoken package. + + :param tools: tools for tool calling + :return: number of tokens + """ + num_tokens = 0 + for tool in tools: + num_tokens += self._get_num_tokens_by_gpt2("type") + num_tokens += self._get_num_tokens_by_gpt2("function") + num_tokens += self._get_num_tokens_by_gpt2("function") + + # calculate num tokens for function object + num_tokens += self._get_num_tokens_by_gpt2("name") + num_tokens += self._get_num_tokens_by_gpt2(tool.name) + num_tokens += self._get_num_tokens_by_gpt2("description") + num_tokens += self._get_num_tokens_by_gpt2(tool.description) + parameters = tool.parameters + num_tokens += self._get_num_tokens_by_gpt2("parameters") + if "title" in parameters: + num_tokens += self._get_num_tokens_by_gpt2("title") + num_tokens += self._get_num_tokens_by_gpt2(parameters.get("title")) + num_tokens += self._get_num_tokens_by_gpt2("type") + num_tokens += self._get_num_tokens_by_gpt2(parameters.get("type")) + if "properties" in parameters: + num_tokens += self._get_num_tokens_by_gpt2("properties") + for key, value in parameters.get("properties").items(): + num_tokens += self._get_num_tokens_by_gpt2(key) + for field_key, field_value in value.items(): + num_tokens += self._get_num_tokens_by_gpt2(field_key) + if field_key == "enum": + for enum_field in field_value: + num_tokens += 3 + num_tokens += self._get_num_tokens_by_gpt2(enum_field) + else: + num_tokens += self._get_num_tokens_by_gpt2(field_key) + num_tokens += self._get_num_tokens_by_gpt2(str(field_value)) + if "required" in parameters: + num_tokens += self._get_num_tokens_by_gpt2("required") + for required_field in parameters["required"]: + num_tokens += 3 + num_tokens += self._get_num_tokens_by_gpt2(required_field) + + return num_tokens diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml new file mode 100644 index 0000000000000..87d977e26cf1b --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/mixtral-8x22b-instruct +label: + zh_Hans: Mixtral MoE 8x22B Instruct + en_US: Mixtral MoE 8x22B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 65536 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '1.2' + output: '1.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml new file mode 100644 index 0000000000000..e3d5a90858c5a --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/mixtral-8x7b-instruct-hf +label: + zh_Hans: Mixtral MoE 8x7B Instruct(HF version) + en_US: Mixtral MoE 8x7B Instruct(HF version) +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.5' + output: '0.5' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml new file mode 100644 index 0000000000000..45f632ceff2cf --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/mixtral-8x7b-instruct +label: + zh_Hans: Mixtral MoE 8x7B Instruct + en_US: Mixtral MoE 8x7B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.5' + output: '0.5' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml new file mode 100644 index 0000000000000..9c3486ba10751 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/mythomax-l2-13b +label: + zh_Hans: MythoMax L2 13b + en_US: MythoMax L2 13b +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 4096 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml new file mode 100644 index 0000000000000..e399f2edb1b1b --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/phi-3-vision-128k-instruct +label: + zh_Hans: Phi3.5 Vision Instruct + en_US: Phi3.5 Vision Instruct +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml b/api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml new file mode 100644 index 0000000000000..bb4b6f994ec12 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml @@ -0,0 +1,45 @@ +model: accounts/yi-01-ai/models/yi-large +label: + zh_Hans: Yi-Large + en_US: Yi-Large +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '3' + output: '3' + unit: '0.000001' + currency: USD diff --git a/api/pyproject.toml b/api/pyproject.toml index 1f483fc49f032..93482b032d536 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -100,6 +100,7 @@ exclude = [ [tool.pytest_env] OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii" UPSTAGE_API_KEY = "up-aaaaaaaaaaaaaaaaaaaa" +FIREWORKS_API_KEY = "fw_aaaaaaaaaaaaaaaaaaaa" AZURE_OPENAI_API_BASE = "https://difyai-openai.openai.azure.com" AZURE_OPENAI_API_KEY = "xxxxb1707exxxxxxxxxxaaxxxxxf94" ANTHROPIC_API_KEY = "sk-ant-api11-IamNotARealKeyJustForMockTestKawaiiiiiiiiii-NotBaka-ASkksz" diff --git a/api/tests/integration_tests/model_runtime/fireworks/__init__.py b/api/tests/integration_tests/model_runtime/fireworks/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_llm.py b/api/tests/integration_tests/model_runtime/fireworks/test_llm.py new file mode 100644 index 0000000000000..699ca293a2fca --- /dev/null +++ b/api/tests/integration_tests/model_runtime/fireworks/test_llm.py @@ -0,0 +1,186 @@ +import os +from collections.abc import Generator + +import pytest + +from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + PromptMessageTool, + SystemPromptMessage, + UserPromptMessage, +) +from core.model_runtime.entities.model_entities import AIModelEntity +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.fireworks.llm.llm import FireworksLargeLanguageModel + +"""FOR MOCK FIXTURES, DO NOT REMOVE""" +from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock + + +def test_predefined_models(): + model = FireworksLargeLanguageModel() + model_schemas = model.predefined_models() + + assert len(model_schemas) >= 1 + assert isinstance(model_schemas[0], AIModelEntity) + + +@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) +def test_validate_credentials_for_chat_model(setup_openai_mock): + model = FireworksLargeLanguageModel() + + with pytest.raises(CredentialsValidateFailedError): + # model name to gpt-3.5-turbo because of mocking + model.validate_credentials(model="gpt-3.5-turbo", credentials={"fireworks_api_key": "invalid_key"}) + + model.validate_credentials( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", + credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, + ) + + +@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) +def test_invoke_chat_model(setup_openai_mock): + model = FireworksLargeLanguageModel() + + result = model.invoke( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", + credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, + prompt_messages=[ + SystemPromptMessage( + content="You are a helpful AI assistant.", + ), + UserPromptMessage(content="Hello World!"), + ], + model_parameters={ + "temperature": 0.0, + "top_p": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "max_tokens": 10, + }, + stop=["How"], + stream=False, + user="foo", + ) + + assert isinstance(result, LLMResult) + assert len(result.message.content) > 0 + + +@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) +def test_invoke_chat_model_with_tools(setup_openai_mock): + model = FireworksLargeLanguageModel() + + result = model.invoke( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", + credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, + prompt_messages=[ + SystemPromptMessage( + content="You are a helpful AI assistant.", + ), + UserPromptMessage( + content="what's the weather today in London?", + ), + ], + model_parameters={"temperature": 0.0, "max_tokens": 100}, + tools=[ + PromptMessageTool( + name="get_weather", + description="Determine weather in my location", + parameters={ + "type": "object", + "properties": { + "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, + "unit": {"type": "string", "enum": ["c", "f"]}, + }, + "required": ["location"], + }, + ), + PromptMessageTool( + name="get_stock_price", + description="Get the current stock price", + parameters={ + "type": "object", + "properties": {"symbol": {"type": "string", "description": "The stock symbol"}}, + "required": ["symbol"], + }, + ), + ], + stream=False, + user="foo", + ) + + assert isinstance(result, LLMResult) + assert isinstance(result.message, AssistantPromptMessage) + assert len(result.message.tool_calls) > 0 + + +@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) +def test_invoke_stream_chat_model(setup_openai_mock): + model = FireworksLargeLanguageModel() + + result = model.invoke( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", + credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, + prompt_messages=[ + SystemPromptMessage( + content="You are a helpful AI assistant.", + ), + UserPromptMessage(content="Hello World!"), + ], + model_parameters={"temperature": 0.0, "max_tokens": 100}, + stream=True, + user="foo", + ) + + assert isinstance(result, Generator) + + for chunk in result: + assert isinstance(chunk, LLMResultChunk) + assert isinstance(chunk.delta, LLMResultChunkDelta) + assert isinstance(chunk.delta.message, AssistantPromptMessage) + assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True + if chunk.delta.finish_reason is not None: + assert chunk.delta.usage is not None + assert chunk.delta.usage.completion_tokens > 0 + + +def test_get_num_tokens(): + model = FireworksLargeLanguageModel() + + num_tokens = model.get_num_tokens( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", + credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, + prompt_messages=[UserPromptMessage(content="Hello World!")], + ) + + assert num_tokens == 10 + + num_tokens = model.get_num_tokens( + model="accounts/fireworks/models/llama-v3p1-8b-instruct", + credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, + prompt_messages=[ + SystemPromptMessage( + content="You are a helpful AI assistant.", + ), + UserPromptMessage(content="Hello World!"), + ], + tools=[ + PromptMessageTool( + name="get_weather", + description="Determine weather in my location", + parameters={ + "type": "object", + "properties": { + "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, + "unit": {"type": "string", "enum": ["c", "f"]}, + }, + "required": ["location"], + }, + ), + ], + ) + + assert num_tokens == 77 diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_provider.py b/api/tests/integration_tests/model_runtime/fireworks/test_provider.py new file mode 100644 index 0000000000000..a68cf1a1a8fbd --- /dev/null +++ b/api/tests/integration_tests/model_runtime/fireworks/test_provider.py @@ -0,0 +1,17 @@ +import os + +import pytest + +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.fireworks.fireworks import FireworksProvider +from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock + + +@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) +def test_validate_provider_credentials(setup_openai_mock): + provider = FireworksProvider() + + with pytest.raises(CredentialsValidateFailedError): + provider.validate_provider_credentials(credentials={}) + + provider.validate_provider_credentials(credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}) diff --git a/dev/pytest/pytest_model_runtime.sh b/dev/pytest/pytest_model_runtime.sh index aba13292ab831..4c1c6bf4f3ab1 100755 --- a/dev/pytest/pytest_model_runtime.sh +++ b/dev/pytest/pytest_model_runtime.sh @@ -6,5 +6,5 @@ pytest api/tests/integration_tests/model_runtime/anthropic \ api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm \ api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference \ api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py \ - api/tests/integration_tests/model_runtime/upstage - + api/tests/integration_tests/model_runtime/upstage \ + api/tests/integration_tests/model_runtime/fireworks From 740fad06c1fe9a5a24a6afdd7174807b82bbb464 Mon Sep 17 00:00:00 2001 From: Waffle <52460705+ox01024@users.noreply.github.com> Date: Sun, 22 Sep 2024 10:14:14 +0800 Subject: [PATCH 016/235] feat(tools/cogview): Updated cogview tool to support cogview-3 and the latest cogview-3-plus (#8382) --- .../zhipuai/zhipuai_sdk/__init__.py | 3 +- .../zhipuai/zhipuai_sdk/__version__.py | 2 +- .../zhipuai/zhipuai_sdk/_client.py | 29 +- .../zhipuai_sdk/api_resource/__init__.py | 35 +- .../api_resource/assistant/__init__.py | 3 + .../api_resource/assistant/assistant.py | 122 +++ .../zhipuai_sdk/api_resource/batches.py | 146 ++++ .../zhipuai_sdk/api_resource/chat/__init__.py | 5 + .../api_resource/chat/async_completions.py | 91 ++- .../zhipuai_sdk/api_resource/chat/chat.py | 15 +- .../api_resource/chat/completions.py | 80 +- .../zhipuai_sdk/api_resource/embeddings.py | 13 +- .../zhipuai/zhipuai_sdk/api_resource/files.py | 167 +++- .../api_resource/fine_tuning/__init__.py | 5 + .../api_resource/fine_tuning/fine_tuning.py | 15 +- .../api_resource/fine_tuning/jobs/__init__.py | 3 + .../fine_tuning/{ => jobs}/jobs.py | 68 +- .../fine_tuning/models/__init__.py | 3 + .../fine_tuning/models/fine_tuned_models.py | 41 + .../zhipuai_sdk/api_resource/images.py | 13 +- .../api_resource/knowledge/__init__.py | 3 + .../knowledge/document/__init__.py | 3 + .../knowledge/document/document.py | 217 +++++ .../api_resource/knowledge/knowledge.py | 173 ++++ .../api_resource/tools/__init__.py | 3 + .../zhipuai_sdk/api_resource/tools/tools.py | 65 ++ .../api_resource/videos/__init__.py | 7 + .../zhipuai_sdk/api_resource/videos/videos.py | 77 ++ .../zhipuai/zhipuai_sdk/core/__init__.py | 108 +++ .../zhipuai/zhipuai_sdk/core/_base_api.py | 1 + .../zhipuai/zhipuai_sdk/core/_base_compat.py | 209 +++++ .../zhipuai/zhipuai_sdk/core/_base_models.py | 671 ++++++++++++++++ .../zhipuai/zhipuai_sdk/core/_base_type.py | 99 ++- .../zhipuai/zhipuai_sdk/core/_constants.py | 12 + .../zhipuai/zhipuai_sdk/core/_errors.py | 17 +- .../zhipuai/zhipuai_sdk/core/_files.py | 78 +- .../zhipuai/zhipuai_sdk/core/_http_client.py | 756 +++++++++++++++--- .../zhipuai/zhipuai_sdk/core/_jwt_token.py | 6 +- .../core/_legacy_binary_response.py | 207 +++++ .../zhipuai_sdk/core/_legacy_response.py | 341 ++++++++ .../zhipuai/zhipuai_sdk/core/_request_opt.py | 89 ++- .../zhipuai/zhipuai_sdk/core/_response.py | 361 ++++++++- .../zhipuai/zhipuai_sdk/core/_sse_client.py | 68 +- .../zhipuai/zhipuai_sdk/core/_utils.py | 19 - .../zhipuai_sdk/core/_utils/__init__.py | 52 ++ .../zhipuai_sdk/core/_utils/_transform.py | 383 +++++++++ .../zhipuai_sdk/core/_utils/_typing.py | 122 +++ .../zhipuai/zhipuai_sdk/core/_utils/_utils.py | 409 ++++++++++ .../zhipuai/zhipuai_sdk/core/logs.py | 78 ++ .../zhipuai/zhipuai_sdk/core/pagination.py | 62 ++ .../zhipuai_sdk/types/assistant/__init__.py | 5 + .../types/assistant/assistant_completion.py | 40 + .../assistant_conversation_params.py | 7 + .../assistant/assistant_conversation_resp.py | 29 + .../assistant/assistant_create_params.py | 32 + .../types/assistant/assistant_support_resp.py | 21 + .../types/assistant/message/__init__.py | 3 + .../assistant/message/message_content.py | 13 + .../assistant/message/text_content_block.py | 14 + .../tools/code_interpreter_delta_block.py | 27 + .../message/tools/drawing_tool_delta_block.py | 21 + .../message/tools/function_delta_block.py | 22 + .../message/tools/retrieval_delta_black.py | 41 + .../assistant/message/tools/tools_type.py | 16 + .../message/tools/web_browser_delta_block.py | 48 ++ .../assistant/message/tools_delta_block.py | 16 + .../zhipuai/zhipuai_sdk/types/batch.py | 82 ++ .../zhipuai_sdk/types/batch_create_params.py | 37 + .../zhipuai/zhipuai_sdk/types/batch_error.py | 21 + .../zhipuai_sdk/types/batch_list_params.py | 20 + .../zhipuai_sdk/types/batch_request_counts.py | 14 + .../types/chat/async_chat_completion.py | 5 +- .../zhipuai_sdk/types/chat/chat_completion.py | 2 +- .../types/chat/chat_completion_chunk.py | 6 +- .../types/chat/code_geex/code_geex_params.py | 146 ++++ .../zhipuai/zhipuai_sdk/types/embeddings.py | 3 +- .../zhipuai_sdk/types/files/__init__.py | 5 + .../types/files/file_create_params.py | 38 + .../zhipuai_sdk/types/files/file_deleted.py | 13 + .../types/{ => files}/file_object.py | 4 +- .../zhipuai_sdk/types/files/upload_detail.py | 13 + .../types/fine_tuning/fine_tuning_job.py | 2 +- .../fine_tuning/fine_tuning_job_event.py | 2 +- .../types/fine_tuning/models/__init__.py | 1 + .../fine_tuning/models/fine_tuned_models.py | 13 + .../zhipuai/zhipuai_sdk/types/image.py | 2 +- .../zhipuai_sdk/types/knowledge/__init__.py | 8 + .../types/knowledge/document/__init__.py | 8 + .../types/knowledge/document/document.py | 51 ++ .../document/document_edit_params.py | 29 + .../document/document_list_params.py | 26 + .../knowledge/document/document_list_resp.py | 11 + .../zhipuai_sdk/types/knowledge/knowledge.py | 21 + .../knowledge/knowledge_create_params.py | 30 + .../types/knowledge/knowledge_list_params.py | 15 + .../types/knowledge/knowledge_list_resp.py | 11 + .../types/knowledge/knowledge_used.py | 21 + .../types/sensitive_word_check/__init__.py | 3 + .../sensitive_word_check.py | 14 + .../zhipuai_sdk/types/tools/__init__.py | 9 + .../types/tools/tools_web_search_params.py | 35 + .../zhipuai_sdk/types/tools/web_search.py | 71 ++ .../types/tools/web_search_chunk.py | 33 + .../zhipuai_sdk/types/video/__init__.py | 3 + .../types/video/video_create_params.py | 27 + .../zhipuai_sdk/types/video/video_object.py | 30 + .../builtin/cogview/tools/cogview3.py | 48 +- .../builtin/cogview/tools/cogview3.yaml | 45 +- 108 files changed, 6498 insertions(+), 390 deletions(-) create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py rename api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/{ => jobs}/jobs.py (53%) create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py delete mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py rename api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/{ => files}/file_object.py (86%) create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py create mode 100644 api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py index bf9b093cb3f02..fc71d64714bd9 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py @@ -1,7 +1,8 @@ from .__version__ import __version__ from ._client import ZhipuAI -from .core._errors import ( +from .core import ( APIAuthenticationError, + APIConnectionError, APIInternalError, APIReachLimitError, APIRequestFailedError, diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py index 659f38d7ff32d..51f8c49ecb827 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py @@ -1 +1 @@ -__version__ = "v2.0.1" +__version__ = "v2.1.0" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py index df9e506095fab..705d371e628f0 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py @@ -9,15 +9,13 @@ from typing_extensions import override from . import api_resource -from .core import _jwt_token -from .core._base_type import NOT_GIVEN, NotGiven -from .core._errors import ZhipuAIError -from .core._http_client import ZHIPUAI_DEFAULT_MAX_RETRIES, HttpClient +from .core import NOT_GIVEN, ZHIPUAI_DEFAULT_MAX_RETRIES, HttpClient, NotGiven, ZhipuAIError, _jwt_token class ZhipuAI(HttpClient): - chat: api_resource.chat + chat: api_resource.chat.Chat api_key: str + _disable_token_cache: bool = True def __init__( self, @@ -28,10 +26,15 @@ def __init__( max_retries: int = ZHIPUAI_DEFAULT_MAX_RETRIES, http_client: httpx.Client | None = None, custom_headers: Mapping[str, str] | None = None, + disable_token_cache: bool = True, + _strict_response_validation: bool = False, ) -> None: if api_key is None: - raise ZhipuAIError("No api_key provided, please provide it through parameters or environment variables") + api_key = os.environ.get("ZHIPUAI_API_KEY") + if api_key is None: + raise ZhipuAIError("未提供api_key,请通过参数或环境变量提供") self.api_key = api_key + self._disable_token_cache = disable_token_cache if base_url is None: base_url = os.environ.get("ZHIPUAI_BASE_URL") @@ -42,21 +45,31 @@ def __init__( super().__init__( version=__version__, base_url=base_url, + max_retries=max_retries, timeout=timeout, custom_httpx_client=http_client, custom_headers=custom_headers, + _strict_response_validation=_strict_response_validation, ) self.chat = api_resource.chat.Chat(self) self.images = api_resource.images.Images(self) self.embeddings = api_resource.embeddings.Embeddings(self) self.files = api_resource.files.Files(self) self.fine_tuning = api_resource.fine_tuning.FineTuning(self) + self.batches = api_resource.Batches(self) + self.knowledge = api_resource.Knowledge(self) + self.tools = api_resource.Tools(self) + self.videos = api_resource.Videos(self) + self.assistant = api_resource.Assistant(self) @property @override - def _auth_headers(self) -> dict[str, str]: + def auth_headers(self) -> dict[str, str]: api_key = self.api_key - return {"Authorization": f"{_jwt_token.generate_token(api_key)}"} + if self._disable_token_cache: + return {"Authorization": f"Bearer {api_key}"} + else: + return {"Authorization": f"Bearer {_jwt_token.generate_token(api_key)}"} def __del__(self) -> None: if not hasattr(self, "_has_custom_http_client") or not hasattr(self, "close") or not hasattr(self, "_client"): diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py index 0a90e21e48bcc..4fe0719dde3e0 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py @@ -1,5 +1,34 @@ -from .chat import chat +from .assistant import ( + Assistant, +) +from .batches import Batches +from .chat import ( + AsyncCompletions, + Chat, + Completions, +) from .embeddings import Embeddings -from .files import Files -from .fine_tuning import fine_tuning +from .files import Files, FilesWithRawResponse +from .fine_tuning import FineTuning from .images import Images +from .knowledge import Knowledge +from .tools import Tools +from .videos import ( + Videos, +) + +__all__ = [ + "Videos", + "AsyncCompletions", + "Chat", + "Completions", + "Images", + "Embeddings", + "Files", + "FilesWithRawResponse", + "FineTuning", + "Batches", + "Knowledge", + "Tools", + "Assistant", +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py new file mode 100644 index 0000000000000..ce619aa7f0922 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py @@ -0,0 +1,3 @@ +from .assistant import Assistant + +__all__ = ["Assistant"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py new file mode 100644 index 0000000000000..f772340a82c4b --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Optional + +import httpx + +from ...core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + StreamResponse, + deepcopy_minimal, + make_request_options, + maybe_transform, +) +from ...types.assistant import AssistantCompletion +from ...types.assistant.assistant_conversation_resp import ConversationUsageListResp +from ...types.assistant.assistant_support_resp import AssistantSupportResp + +if TYPE_CHECKING: + from ..._client import ZhipuAI + +from ...types.assistant import assistant_conversation_params, assistant_create_params + +__all__ = ["Assistant"] + + +class Assistant(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + def conversation( + self, + assistant_id: str, + model: str, + messages: list[assistant_create_params.ConversationMessage], + *, + stream: bool = True, + conversation_id: Optional[str] = None, + attachments: Optional[list[assistant_create_params.AssistantAttachments]] = None, + metadata: dict | None = None, + request_id: str = None, + user_id: str = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> StreamResponse[AssistantCompletion]: + body = deepcopy_minimal( + { + "assistant_id": assistant_id, + "model": model, + "messages": messages, + "stream": stream, + "conversation_id": conversation_id, + "attachments": attachments, + "metadata": metadata, + "request_id": request_id, + "user_id": user_id, + } + ) + return self._post( + "/assistant", + body=maybe_transform(body, assistant_create_params.AssistantParameters), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=AssistantCompletion, + stream=stream or True, + stream_cls=StreamResponse[AssistantCompletion], + ) + + def query_support( + self, + *, + assistant_id_list: list[str] = None, + request_id: str = None, + user_id: str = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantSupportResp: + body = deepcopy_minimal( + { + "assistant_id_list": assistant_id_list, + "request_id": request_id, + "user_id": user_id, + } + ) + return self._post( + "/assistant/list", + body=body, + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=AssistantSupportResp, + ) + + def query_conversation_usage( + self, + assistant_id: str, + page: int = 1, + page_size: int = 10, + *, + request_id: str = None, + user_id: str = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationUsageListResp: + body = deepcopy_minimal( + { + "assistant_id": assistant_id, + "page": page, + "page_size": page_size, + "request_id": request_id, + "user_id": user_id, + } + ) + return self._post( + "/assistant/conversation/list", + body=maybe_transform(body, assistant_conversation_params.ConversationParameters), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=ConversationUsageListResp, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py new file mode 100644 index 0000000000000..ae2f2be85eb9b --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal, Optional + +import httpx + +from ..core import NOT_GIVEN, BaseAPI, Body, Headers, NotGiven, make_request_options, maybe_transform +from ..core.pagination import SyncCursorPage +from ..types import batch_create_params, batch_list_params +from ..types.batch import Batch + +if TYPE_CHECKING: + from .._client import ZhipuAI + + +class Batches(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + def create( + self, + *, + completion_window: str | None = None, + endpoint: Literal["/v1/chat/completions", "/v1/embeddings"], + input_file_id: str, + metadata: Optional[dict[str, str]] | NotGiven = NOT_GIVEN, + auto_delete_input_file: bool = True, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + return self._post( + "/batches", + body=maybe_transform( + { + "completion_window": completion_window, + "endpoint": endpoint, + "input_file_id": input_file_id, + "metadata": metadata, + "auto_delete_input_file": auto_delete_input_file, + }, + batch_create_params.BatchCreateParams, + ), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=Batch, + ) + + def retrieve( + self, + batch_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """ + Retrieves a batch. + + Args: + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._get( + f"/batches/{batch_id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=Batch, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncCursorPage[Batch]: + """List your organization's batches. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get_api_list( + "/batches", + page=SyncCursorPage[Batch], + options=make_request_options( + extra_headers=extra_headers, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + batch_list_params.BatchListParams, + ), + ), + model=Batch, + ) + + def cancel( + self, + batch_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """ + Cancels an in-progress batch. + + Args: + batch_id: The ID of the batch to cancel. + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + + """ + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._post( + f"/batches/{batch_id}/cancel", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=Batch, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py index e69de29bb2d1d..5cd8dc6f339a6 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py @@ -0,0 +1,5 @@ +from .async_completions import AsyncCompletions +from .chat import Chat +from .completions import Completions + +__all__ = ["AsyncCompletions", "Chat", "Completions"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py index 1f8011973951b..d8ecc310644d1 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py @@ -1,13 +1,25 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Literal, Optional, Union import httpx -from ...core._base_api import BaseAPI -from ...core._base_type import NOT_GIVEN, Headers, NotGiven -from ...core._http_client import make_user_request_input +from ...core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + drop_prefix_image_data, + make_request_options, + maybe_transform, +) from ...types.chat.async_chat_completion import AsyncCompletion, AsyncTaskStatus +from ...types.chat.code_geex import code_geex_params +from ...types.sensitive_word_check import SensitiveWordCheckRequest + +logger = logging.getLogger(__name__) if TYPE_CHECKING: from ..._client import ZhipuAI @@ -22,6 +34,7 @@ def create( *, model: str, request_id: Optional[str] | NotGiven = NOT_GIVEN, + user_id: Optional[str] | NotGiven = NOT_GIVEN, do_sample: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, @@ -29,50 +42,74 @@ def create( seed: int | NotGiven = NOT_GIVEN, messages: Union[str, list[str], list[int], list[list[int]], None], stop: Optional[Union[str, list[str], None]] | NotGiven = NOT_GIVEN, - sensitive_word_check: Optional[object] | NotGiven = NOT_GIVEN, + sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, tools: Optional[object] | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, + meta: Optional[dict[str, str]] | NotGiven = NOT_GIVEN, + extra: Optional[code_geex_params.CodeGeexExtra] | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, - disable_strict_validation: Optional[bool] | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncTaskStatus: _cast_type = AsyncTaskStatus + logger.debug(f"temperature:{temperature}, top_p:{top_p}") + if temperature is not None and temperature != NOT_GIVEN: + if temperature <= 0: + do_sample = False + temperature = 0.01 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501 + if temperature >= 1: + temperature = 0.99 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") + if top_p is not None and top_p != NOT_GIVEN: + if top_p >= 1: + top_p = 0.99 + # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") + if top_p <= 0: + top_p = 0.01 + # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") + + logger.debug(f"temperature:{temperature}, top_p:{top_p}") + if isinstance(messages, list): + for item in messages: + if item.get("content"): + item["content"] = drop_prefix_image_data(item["content"]) - if disable_strict_validation: - _cast_type = object + body = { + "model": model, + "request_id": request_id, + "user_id": user_id, + "temperature": temperature, + "top_p": top_p, + "do_sample": do_sample, + "max_tokens": max_tokens, + "seed": seed, + "messages": messages, + "stop": stop, + "sensitive_word_check": sensitive_word_check, + "tools": tools, + "tool_choice": tool_choice, + "meta": meta, + "extra": maybe_transform(extra, code_geex_params.CodeGeexExtra), + } return self._post( "/async/chat/completions", - body={ - "model": model, - "request_id": request_id, - "temperature": temperature, - "top_p": top_p, - "do_sample": do_sample, - "max_tokens": max_tokens, - "seed": seed, - "messages": messages, - "stop": stop, - "sensitive_word_check": sensitive_word_check, - "tools": tools, - "tool_choice": tool_choice, - }, - options=make_user_request_input(extra_headers=extra_headers, timeout=timeout), + body=body, + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), cast_type=_cast_type, - enable_stream=False, + stream=False, ) def retrieve_completion_result( self, id: str, extra_headers: Headers | None = None, - disable_strict_validation: Optional[bool] | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Union[AsyncCompletion, AsyncTaskStatus]: _cast_type = Union[AsyncCompletion, AsyncTaskStatus] - if disable_strict_validation: - _cast_type = object return self._get( path=f"/async-result/{id}", cast_type=_cast_type, - options=make_user_request_input(extra_headers=extra_headers, timeout=timeout), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py index 92362fc50a725..b3cc46566c7bf 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py @@ -1,17 +1,18 @@ from typing import TYPE_CHECKING -from ...core._base_api import BaseAPI +from ...core import BaseAPI, cached_property from .async_completions import AsyncCompletions from .completions import Completions if TYPE_CHECKING: - from ..._client import ZhipuAI + pass class Chat(BaseAPI): - completions: Completions + @cached_property + def completions(self) -> Completions: + return Completions(self._client) - def __init__(self, client: "ZhipuAI") -> None: - super().__init__(client) - self.completions = Completions(client) - self.asyncCompletions = AsyncCompletions(client) + @cached_property + def asyncCompletions(self) -> AsyncCompletions: # noqa: N802 + return AsyncCompletions(self._client) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py index ec29f33864203..1c23473a03ae3 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py @@ -1,15 +1,28 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Literal, Optional, Union import httpx -from ...core._base_api import BaseAPI -from ...core._base_type import NOT_GIVEN, Headers, NotGiven -from ...core._http_client import make_user_request_input -from ...core._sse_client import StreamResponse +from ...core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + StreamResponse, + deepcopy_minimal, + drop_prefix_image_data, + make_request_options, + maybe_transform, +) from ...types.chat.chat_completion import Completion from ...types.chat.chat_completion_chunk import ChatCompletionChunk +from ...types.chat.code_geex import code_geex_params +from ...types.sensitive_word_check import SensitiveWordCheckRequest + +logger = logging.getLogger(__name__) if TYPE_CHECKING: from ..._client import ZhipuAI @@ -24,6 +37,7 @@ def create( *, model: str, request_id: Optional[str] | NotGiven = NOT_GIVEN, + user_id: Optional[str] | NotGiven = NOT_GIVEN, do_sample: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -32,23 +46,43 @@ def create( seed: int | NotGiven = NOT_GIVEN, messages: Union[str, list[str], list[int], object, None], stop: Optional[Union[str, list[str], None]] | NotGiven = NOT_GIVEN, - sensitive_word_check: Optional[object] | NotGiven = NOT_GIVEN, + sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, tools: Optional[object] | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, + meta: Optional[dict[str, str]] | NotGiven = NOT_GIVEN, + extra: Optional[code_geex_params.CodeGeexExtra] | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, - disable_strict_validation: Optional[bool] | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | StreamResponse[ChatCompletionChunk]: - _cast_type = Completion - _stream_cls = StreamResponse[ChatCompletionChunk] - if disable_strict_validation: - _cast_type = object - _stream_cls = StreamResponse[object] - return self._post( - "/chat/completions", - body={ + logger.debug(f"temperature:{temperature}, top_p:{top_p}") + if temperature is not None and temperature != NOT_GIVEN: + if temperature <= 0: + do_sample = False + temperature = 0.01 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501 + if temperature >= 1: + temperature = 0.99 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") + if top_p is not None and top_p != NOT_GIVEN: + if top_p >= 1: + top_p = 0.99 + # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") + if top_p <= 0: + top_p = 0.01 + # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") + + logger.debug(f"temperature:{temperature}, top_p:{top_p}") + if isinstance(messages, list): + for item in messages: + if item.get("content"): + item["content"] = drop_prefix_image_data(item["content"]) + + body = deepcopy_minimal( + { "model": model, "request_id": request_id, + "user_id": user_id, "temperature": temperature, "top_p": top_p, "do_sample": do_sample, @@ -60,11 +94,15 @@ def create( "stream": stream, "tools": tools, "tool_choice": tool_choice, - }, - options=make_user_request_input( - extra_headers=extra_headers, - ), - cast_type=_cast_type, - enable_stream=stream or False, - stream_cls=_stream_cls, + "meta": meta, + "extra": maybe_transform(extra, code_geex_params.CodeGeexExtra), + } + ) + return self._post( + "/chat/completions", + body=body, + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=Completion, + stream=stream or False, + stream_cls=StreamResponse[ChatCompletionChunk], ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py index 2308a204514e1..4b4baef9421ba 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py @@ -4,9 +4,7 @@ import httpx -from ..core._base_api import BaseAPI -from ..core._base_type import NOT_GIVEN, Headers, NotGiven -from ..core._http_client import make_user_request_input +from ..core import NOT_GIVEN, BaseAPI, Body, Headers, NotGiven, make_request_options from ..types.embeddings import EmbeddingsResponded if TYPE_CHECKING: @@ -22,10 +20,13 @@ def create( *, input: Union[str, list[str], list[int], list[list[int]]], model: Union[str], + dimensions: Union[int] | NotGiven = NOT_GIVEN, encoding_format: str | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + request_id: Optional[str] | NotGiven = NOT_GIVEN, sensitive_word_check: Optional[object] | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, + extra_body: Body | None = None, disable_strict_validation: Optional[bool] | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EmbeddingsResponded: @@ -37,11 +38,13 @@ def create( body={ "input": input, "model": model, + "dimensions": dimensions, "encoding_format": encoding_format, "user": user, + "request_id": request_id, "sensitive_word_check": sensitive_word_check, }, - options=make_user_request_input(extra_headers=extra_headers, timeout=timeout), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), cast_type=_cast_type, - enable_stream=False, + stream=False, ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py index f2ac74bffa843..ba9de75b7ef09 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py @@ -1,19 +1,30 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from collections.abc import Mapping +from typing import TYPE_CHECKING, Literal, cast import httpx -from ..core._base_api import BaseAPI -from ..core._base_type import NOT_GIVEN, FileTypes, Headers, NotGiven -from ..core._files import is_file_content -from ..core._http_client import make_user_request_input -from ..types.file_object import FileObject, ListOfFileObject +from ..core import ( + NOT_GIVEN, + BaseAPI, + Body, + FileTypes, + Headers, + NotGiven, + _legacy_binary_response, + _legacy_response, + deepcopy_minimal, + extract_files, + make_request_options, + maybe_transform, +) +from ..types.files import FileDeleted, FileObject, ListOfFileObject, UploadDetail, file_create_params if TYPE_CHECKING: from .._client import ZhipuAI -__all__ = ["Files"] +__all__ = ["Files", "FilesWithRawResponse"] class Files(BaseAPI): @@ -23,30 +34,69 @@ def __init__(self, client: ZhipuAI) -> None: def create( self, *, - file: FileTypes, - purpose: str, + file: FileTypes = None, + upload_detail: list[UploadDetail] = None, + purpose: Literal["fine-tune", "retrieval", "batch"], + knowledge_id: str = None, + sentence_size: int = None, extra_headers: Headers | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: - if not is_file_content(file): - prefix = f"Expected file input `{file!r}`" - raise RuntimeError( - f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(file)} instead." - ) from None - files = [("file", file)] - - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - + if not file and not upload_detail: + raise ValueError("At least one of `file` and `upload_detail` must be provided.") + body = deepcopy_minimal( + { + "file": file, + "upload_detail": upload_detail, + "purpose": purpose, + "knowledge_id": knowledge_id, + "sentence_size": sentence_size, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + if files: + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/files", - body={ - "purpose": purpose, - }, + body=maybe_transform(body, file_create_params.FileCreateParams), files=files, - options=make_user_request_input(extra_headers=extra_headers, timeout=timeout), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), cast_type=FileObject, ) + # def retrieve( + # self, + # file_id: str, + # *, + # extra_headers: Headers | None = None, + # extra_body: Body | None = None, + # timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + # ) -> FileObject: + # """ + # Returns information about a specific file. + # + # Args: + # file_id: The ID of the file to retrieve information about + # extra_headers: Send extra headers + # + # extra_body: Add additional JSON properties to the request + # + # timeout: Override the client-level default timeout for this request, in seconds + # """ + # if not file_id: + # raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + # return self._get( + # f"/files/{file_id}", + # options=make_request_options( + # extra_headers=extra_headers, extra_body=extra_body, timeout=timeout + # ), + # cast_type=FileObject, + # ) + def list( self, *, @@ -55,13 +105,15 @@ def list( after: str | NotGiven = NOT_GIVEN, order: str | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ListOfFileObject: return self._get( "/files", cast_type=ListOfFileObject, - options=make_user_request_input( + options=make_request_options( extra_headers=extra_headers, + extra_body=extra_body, timeout=timeout, query={ "purpose": purpose, @@ -71,3 +123,72 @@ def list( }, ), ) + + def delete( + self, + file_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleted: + """ + Delete a file. + + Args: + file_id: The ID of the file to delete + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._delete( + f"/files/{file_id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=FileDeleted, + ) + + def content( + self, + file_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> _legacy_response.HttpxBinaryResponseContent: + """ + Returns the contents of the specified file. + + Args: + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + extra_headers = {"Accept": "application/binary", **(extra_headers or {})} + return self._get( + f"/files/{file_id}/content", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=_legacy_binary_response.HttpxBinaryResponseContent, + ) + + +class FilesWithRawResponse: + def __init__(self, files: Files) -> None: + self._files = files + + self.create = _legacy_response.to_raw_response_wrapper( + files.create, + ) + self.list = _legacy_response.to_raw_response_wrapper( + files.list, + ) + self.content = _legacy_response.to_raw_response_wrapper( + files.content, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py index e69de29bb2d1d..7c309b8341680 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py @@ -0,0 +1,5 @@ +from .fine_tuning import FineTuning +from .jobs import Jobs +from .models import FineTunedModels + +__all__ = ["Jobs", "FineTunedModels", "FineTuning"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py index dc30bd33edfbb..8670f7de00df8 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py @@ -1,15 +1,18 @@ from typing import TYPE_CHECKING -from ...core._base_api import BaseAPI +from ...core import BaseAPI, cached_property from .jobs import Jobs +from .models import FineTunedModels if TYPE_CHECKING: - from ..._client import ZhipuAI + pass class FineTuning(BaseAPI): - jobs: Jobs + @cached_property + def jobs(self) -> Jobs: + return Jobs(self._client) - def __init__(self, client: "ZhipuAI") -> None: - super().__init__(client) - self.jobs = Jobs(client) + @cached_property + def models(self) -> FineTunedModels: + return FineTunedModels(self._client) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py new file mode 100644 index 0000000000000..40777a153f272 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py @@ -0,0 +1,3 @@ +from .jobs import Jobs + +__all__ = ["Jobs"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/jobs.py similarity index 53% rename from api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs.py rename to api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/jobs.py index 3d2e9208a11f1..8b038cadc0640 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/jobs.py @@ -4,13 +4,23 @@ import httpx -from ...core._base_api import BaseAPI -from ...core._base_type import NOT_GIVEN, Headers, NotGiven -from ...core._http_client import make_user_request_input -from ...types.fine_tuning import FineTuningJob, FineTuningJobEvent, ListOfFineTuningJob, job_create_params +from ....core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + make_request_options, +) +from ....types.fine_tuning import ( + FineTuningJob, + FineTuningJobEvent, + ListOfFineTuningJob, + job_create_params, +) if TYPE_CHECKING: - from ..._client import ZhipuAI + from ...._client import ZhipuAI __all__ = ["Jobs"] @@ -29,6 +39,7 @@ def create( request_id: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: return self._post( @@ -41,7 +52,7 @@ def create( "validation_file": validation_file, "request_id": request_id, }, - options=make_user_request_input(extra_headers=extra_headers, timeout=timeout), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), cast_type=FineTuningJob, ) @@ -50,11 +61,12 @@ def retrieve( fine_tuning_job_id: str, *, extra_headers: Headers | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: return self._get( f"/fine_tuning/jobs/{fine_tuning_job_id}", - options=make_user_request_input(extra_headers=extra_headers, timeout=timeout), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), cast_type=FineTuningJob, ) @@ -64,13 +76,15 @@ def list( after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ListOfFineTuningJob: return self._get( "/fine_tuning/jobs", cast_type=ListOfFineTuningJob, - options=make_user_request_input( + options=make_request_options( extra_headers=extra_headers, + extra_body=extra_body, timeout=timeout, query={ "after": after, @@ -79,6 +93,24 @@ def list( ), ) + def cancel( + self, + fine_tuning_job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # noqa: E501 + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return self._post( + f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=FineTuningJob, + ) + def list_events( self, fine_tuning_job_id: str, @@ -86,13 +118,15 @@ def list_events( after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, + extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJobEvent: return self._get( f"/fine_tuning/jobs/{fine_tuning_job_id}/events", cast_type=FineTuningJobEvent, - options=make_user_request_input( + options=make_request_options( extra_headers=extra_headers, + extra_body=extra_body, timeout=timeout, query={ "after": after, @@ -100,3 +134,19 @@ def list_events( }, ), ) + + def delete( + self, + fine_tuning_job_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return self._delete( + f"/fine_tuning/jobs/{fine_tuning_job_id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=FineTuningJob, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py new file mode 100644 index 0000000000000..d832635bafbc6 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py @@ -0,0 +1,3 @@ +from .fine_tuned_models import FineTunedModels + +__all__ = ["FineTunedModels"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py new file mode 100644 index 0000000000000..29c023e3b1cd5 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import httpx + +from ....core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + make_request_options, +) +from ....types.fine_tuning.models import FineTunedModelsStatus + +if TYPE_CHECKING: + from ...._client import ZhipuAI + +__all__ = ["FineTunedModels"] + + +class FineTunedModels(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + def delete( + self, + fine_tuned_model: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTunedModelsStatus: + if not fine_tuned_model: + raise ValueError(f"Expected a non-empty value for `fine_tuned_model` but received {fine_tuned_model!r}") + return self._delete( + f"fine_tuning/fine_tuned_models/{fine_tuned_model}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=FineTunedModelsStatus, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py index 2692b093af8b4..8ad411913fa11 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py @@ -4,10 +4,9 @@ import httpx -from ..core._base_api import BaseAPI -from ..core._base_type import NOT_GIVEN, Body, Headers, NotGiven -from ..core._http_client import make_user_request_input +from ..core import NOT_GIVEN, BaseAPI, Body, Headers, NotGiven, make_request_options from ..types.image import ImagesResponded +from ..types.sensitive_word_check import SensitiveWordCheckRequest if TYPE_CHECKING: from .._client import ZhipuAI @@ -27,8 +26,10 @@ def generations( response_format: Optional[str] | NotGiven = NOT_GIVEN, size: Optional[str] | NotGiven = NOT_GIVEN, style: Optional[str] | NotGiven = NOT_GIVEN, + sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, request_id: Optional[str] | NotGiven = NOT_GIVEN, + user_id: Optional[str] | NotGiven = NOT_GIVEN, extra_headers: Headers | None = None, extra_body: Body | None = None, disable_strict_validation: Optional[bool] | None = None, @@ -45,12 +46,14 @@ def generations( "n": n, "quality": quality, "response_format": response_format, + "sensitive_word_check": sensitive_word_check, "size": size, "style": style, "user": user, + "user_id": user_id, "request_id": request_id, }, - options=make_user_request_input(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), cast_type=_cast_type, - enable_stream=False, + stream=False, ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py new file mode 100644 index 0000000000000..5a67d743c35b9 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py @@ -0,0 +1,3 @@ +from .knowledge import Knowledge + +__all__ = ["Knowledge"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py new file mode 100644 index 0000000000000..fd289e2232b95 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py @@ -0,0 +1,3 @@ +from .document import Document + +__all__ = ["Document"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py new file mode 100644 index 0000000000000..2c4066d893034 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +from collections.abc import Mapping +from typing import TYPE_CHECKING, Literal, Optional, cast + +import httpx + +from ....core import ( + NOT_GIVEN, + BaseAPI, + Body, + FileTypes, + Headers, + NotGiven, + deepcopy_minimal, + extract_files, + make_request_options, + maybe_transform, +) +from ....types.files import UploadDetail, file_create_params +from ....types.knowledge.document import DocumentData, DocumentObject, document_edit_params, document_list_params +from ....types.knowledge.document.document_list_resp import DocumentPage + +if TYPE_CHECKING: + from ...._client import ZhipuAI + +__all__ = ["Document"] + + +class Document(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + def create( + self, + *, + file: FileTypes = None, + custom_separator: Optional[list[str]] = None, + upload_detail: list[UploadDetail] = None, + purpose: Literal["retrieval"], + knowledge_id: str = None, + sentence_size: int = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DocumentObject: + if not file and not upload_detail: + raise ValueError("At least one of `file` and `upload_detail` must be provided.") + body = deepcopy_minimal( + { + "file": file, + "upload_detail": upload_detail, + "purpose": purpose, + "custom_separator": custom_separator, + "knowledge_id": knowledge_id, + "sentence_size": sentence_size, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + if files: + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/files", + body=maybe_transform(body, file_create_params.FileCreateParams), + files=files, + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=DocumentObject, + ) + + def edit( + self, + document_id: str, + knowledge_type: str, + *, + custom_separator: Optional[list[str]] = None, + sentence_size: Optional[int] = None, + callback_url: Optional[str] = None, + callback_header: Optional[dict[str, str]] = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> httpx.Response: + """ + + Args: + document_id: 知识id + knowledge_type: 知识类型: + 1:文章知识: 支持pdf,url,docx + 2.问答知识-文档: 支持pdf,url,docx + 3.问答知识-表格: 支持xlsx + 4.商品库-表格: 支持xlsx + 5.自定义: 支持pdf,url,docx + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + :param knowledge_type: + :param document_id: + :param timeout: + :param extra_body: + :param callback_header: + :param sentence_size: + :param extra_headers: + :param callback_url: + :param custom_separator: + """ + if not document_id: + raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") + + body = deepcopy_minimal( + { + "id": document_id, + "knowledge_type": knowledge_type, + "custom_separator": custom_separator, + "sentence_size": sentence_size, + "callback_url": callback_url, + "callback_header": callback_header, + } + ) + + return self._put( + f"/document/{document_id}", + body=maybe_transform(body, document_edit_params.DocumentEditParams), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=httpx.Response, + ) + + def list( + self, + knowledge_id: str, + *, + purpose: str | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + limit: str | NotGiven = NOT_GIVEN, + order: Literal["desc", "asc"] | NotGiven = NOT_GIVEN, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DocumentPage: + return self._get( + "/files", + options=make_request_options( + extra_headers=extra_headers, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "knowledge_id": knowledge_id, + "purpose": purpose, + "page": page, + "limit": limit, + "order": order, + }, + document_list_params.DocumentListParams, + ), + ), + cast_type=DocumentPage, + ) + + def delete( + self, + document_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> httpx.Response: + """ + Delete a file. + + Args: + + document_id: 知识id + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not document_id: + raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") + + return self._delete( + f"/document/{document_id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=httpx.Response, + ) + + def retrieve( + self, + document_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DocumentData: + """ + + Args: + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not document_id: + raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") + + return self._get( + f"/document/{document_id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=DocumentData, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py new file mode 100644 index 0000000000000..fea4c73ac997c --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py @@ -0,0 +1,173 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal, Optional + +import httpx + +from ...core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + cached_property, + deepcopy_minimal, + make_request_options, + maybe_transform, +) +from ...types.knowledge import KnowledgeInfo, KnowledgeUsed, knowledge_create_params, knowledge_list_params +from ...types.knowledge.knowledge_list_resp import KnowledgePage +from .document import Document + +if TYPE_CHECKING: + from ..._client import ZhipuAI + +__all__ = ["Knowledge"] + + +class Knowledge(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + @cached_property + def document(self) -> Document: + return Document(self._client) + + def create( + self, + embedding_id: int, + name: str, + *, + customer_identifier: Optional[str] = None, + description: Optional[str] = None, + background: Optional[Literal["blue", "red", "orange", "purple", "sky"]] = None, + icon: Optional[Literal["question", "book", "seal", "wrench", "tag", "horn", "house"]] = None, + bucket_id: Optional[str] = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeInfo: + body = deepcopy_minimal( + { + "embedding_id": embedding_id, + "name": name, + "customer_identifier": customer_identifier, + "description": description, + "background": background, + "icon": icon, + "bucket_id": bucket_id, + } + ) + return self._post( + "/knowledge", + body=maybe_transform(body, knowledge_create_params.KnowledgeBaseParams), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=KnowledgeInfo, + ) + + def modify( + self, + knowledge_id: str, + embedding_id: int, + *, + name: str, + description: Optional[str] = None, + background: Optional[Literal["blue", "red", "orange", "purple", "sky"]] = None, + icon: Optional[Literal["question", "book", "seal", "wrench", "tag", "horn", "house"]] = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> httpx.Response: + body = deepcopy_minimal( + { + "id": knowledge_id, + "embedding_id": embedding_id, + "name": name, + "description": description, + "background": background, + "icon": icon, + } + ) + return self._put( + f"/knowledge/{knowledge_id}", + body=maybe_transform(body, knowledge_create_params.KnowledgeBaseParams), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=httpx.Response, + ) + + def query( + self, + *, + page: int | NotGiven = 1, + size: int | NotGiven = 10, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgePage: + return self._get( + "/knowledge", + options=make_request_options( + extra_headers=extra_headers, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "size": size, + }, + knowledge_list_params.KnowledgeListParams, + ), + ), + cast_type=KnowledgePage, + ) + + def delete( + self, + knowledge_id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> httpx.Response: + """ + Delete a file. + + Args: + knowledge_id: 知识库ID + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_id: + raise ValueError("Expected a non-empty value for `knowledge_id`") + + return self._delete( + f"/knowledge/{knowledge_id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=httpx.Response, + ) + + def used( + self, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeUsed: + """ + Returns the contents of the specified file. + + Args: + extra_headers: Send extra headers + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/knowledge/capacity", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=KnowledgeUsed, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py new file mode 100644 index 0000000000000..43e4e37da1779 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py @@ -0,0 +1,3 @@ +from .tools import Tools + +__all__ = ["Tools"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py new file mode 100644 index 0000000000000..3c3a630aff47d --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Literal, Optional, Union + +import httpx + +from ...core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + StreamResponse, + deepcopy_minimal, + make_request_options, + maybe_transform, +) +from ...types.tools import WebSearch, WebSearchChunk, tools_web_search_params + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from ..._client import ZhipuAI + +__all__ = ["Tools"] + + +class Tools(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + def web_search( + self, + *, + model: str, + request_id: Optional[str] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + messages: Union[str, list[str], list[int], object, None], + scope: Optional[str] | NotGiven = NOT_GIVEN, + location: Optional[str] | NotGiven = NOT_GIVEN, + recent_days: Optional[int] | NotGiven = NOT_GIVEN, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WebSearch | StreamResponse[WebSearchChunk]: + body = deepcopy_minimal( + { + "model": model, + "request_id": request_id, + "messages": messages, + "stream": stream, + "scope": scope, + "location": location, + "recent_days": recent_days, + } + ) + return self._post( + "/tools", + body=maybe_transform(body, tools_web_search_params.WebSearchParams), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=WebSearch, + stream=stream or False, + stream_cls=StreamResponse[WebSearchChunk], + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py new file mode 100644 index 0000000000000..6b0f99ed09efe --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py @@ -0,0 +1,7 @@ +from .videos import ( + Videos, +) + +__all__ = [ + "Videos", +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py new file mode 100644 index 0000000000000..f1f1c08036a66 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Optional + +import httpx + +from ...core import ( + NOT_GIVEN, + BaseAPI, + Body, + Headers, + NotGiven, + deepcopy_minimal, + make_request_options, + maybe_transform, +) +from ...types.sensitive_word_check import SensitiveWordCheckRequest +from ...types.video import VideoObject, video_create_params + +if TYPE_CHECKING: + from ..._client import ZhipuAI + +__all__ = ["Videos"] + + +class Videos(BaseAPI): + def __init__(self, client: ZhipuAI) -> None: + super().__init__(client) + + def generations( + self, + model: str, + *, + prompt: str = None, + image_url: str = None, + sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, + request_id: str = None, + user_id: str = None, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VideoObject: + if not model and not model: + raise ValueError("At least one of `model` and `prompt` must be provided.") + body = deepcopy_minimal( + { + "model": model, + "prompt": prompt, + "image_url": image_url, + "sensitive_word_check": sensitive_word_check, + "request_id": request_id, + "user_id": user_id, + } + ) + return self._post( + "/videos/generations", + body=maybe_transform(body, video_create_params.VideoCreateParams), + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=VideoObject, + ) + + def retrieve_videos_result( + self, + id: str, + *, + extra_headers: Headers | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VideoObject: + if not id: + raise ValueError("At least one of `id` must be provided.") + + return self._get( + f"/async-result/{id}", + options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), + cast_type=VideoObject, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py index e69de29bb2d1d..3d6466d279861 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py @@ -0,0 +1,108 @@ +from ._base_api import BaseAPI +from ._base_compat import ( + PYDANTIC_V2, + ConfigDict, + GenericModel, + cached_property, + field_get_default, + get_args, + get_model_config, + get_model_fields, + get_origin, + is_literal_type, + is_union, + parse_obj, +) +from ._base_models import BaseModel, construct_type +from ._base_type import ( + NOT_GIVEN, + Body, + FileTypes, + Headers, + IncEx, + ModelT, + NotGiven, + Query, +) +from ._constants import ( + ZHIPUAI_DEFAULT_LIMITS, + ZHIPUAI_DEFAULT_MAX_RETRIES, + ZHIPUAI_DEFAULT_TIMEOUT, +) +from ._errors import ( + APIAuthenticationError, + APIConnectionError, + APIInternalError, + APIReachLimitError, + APIRequestFailedError, + APIResponseError, + APIResponseValidationError, + APIServerFlowExceedError, + APIStatusError, + APITimeoutError, + ZhipuAIError, +) +from ._files import is_file_content +from ._http_client import HttpClient, make_request_options +from ._sse_client import StreamResponse +from ._utils import ( + deepcopy_minimal, + drop_prefix_image_data, + extract_files, + is_given, + is_list, + is_mapping, + maybe_transform, + parse_date, + parse_datetime, +) + +__all__ = [ + "BaseModel", + "construct_type", + "BaseAPI", + "NOT_GIVEN", + "Headers", + "NotGiven", + "Body", + "IncEx", + "ModelT", + "Query", + "FileTypes", + "PYDANTIC_V2", + "ConfigDict", + "GenericModel", + "get_args", + "is_union", + "parse_obj", + "get_origin", + "is_literal_type", + "get_model_config", + "get_model_fields", + "field_get_default", + "is_file_content", + "ZhipuAIError", + "APIStatusError", + "APIRequestFailedError", + "APIAuthenticationError", + "APIReachLimitError", + "APIInternalError", + "APIServerFlowExceedError", + "APIResponseError", + "APIResponseValidationError", + "APITimeoutError", + "make_request_options", + "HttpClient", + "ZHIPUAI_DEFAULT_TIMEOUT", + "ZHIPUAI_DEFAULT_MAX_RETRIES", + "ZHIPUAI_DEFAULT_LIMITS", + "is_list", + "is_mapping", + "parse_date", + "parse_datetime", + "is_given", + "maybe_transform", + "deepcopy_minimal", + "extract_files", + "StreamResponse", +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py index 10b46ff8e381a..3592ea6bacd17 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py @@ -16,3 +16,4 @@ def __init__(self, client: ZhipuAI) -> None: self._post = client.post self._put = client.put self._patch = client.patch + self._get_api_list = client.get_api_list diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py new file mode 100644 index 0000000000000..92a5d683be673 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +from collections.abc import Callable +from datetime import date, datetime +from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union, cast, overload + +import pydantic +from pydantic.fields import FieldInfo +from typing_extensions import Self + +from ._base_type import StrBytesIntFloat + +_T = TypeVar("_T") +_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) + +# --------------- Pydantic v2 compatibility --------------- + +# Pyright incorrectly reports some of our functions as overriding a method when they don't +# pyright: reportIncompatibleMethodOverride=false + +PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + +# v1 re-exports +if TYPE_CHECKING: + + def parse_date(value: date | StrBytesIntFloat) -> date: ... + + def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: ... + + def get_args(t: type[Any]) -> tuple[Any, ...]: ... + + def is_union(tp: type[Any] | None) -> bool: ... + + def get_origin(t: type[Any]) -> type[Any] | None: ... + + def is_literal_type(type_: type[Any]) -> bool: ... + + def is_typeddict(type_: type[Any]) -> bool: ... + +else: + if PYDANTIC_V2: + from pydantic.v1.typing import ( # noqa: I001 + get_args as get_args, # noqa: PLC0414 + is_union as is_union, # noqa: PLC0414 + get_origin as get_origin, # noqa: PLC0414 + is_typeddict as is_typeddict, # noqa: PLC0414 + is_literal_type as is_literal_type, # noqa: PLC0414 + ) + from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # noqa: PLC0414 + else: + from pydantic.typing import ( # noqa: I001 + get_args as get_args, # noqa: PLC0414 + is_union as is_union, # noqa: PLC0414 + get_origin as get_origin, # noqa: PLC0414 + is_typeddict as is_typeddict, # noqa: PLC0414 + is_literal_type as is_literal_type, # noqa: PLC0414 + ) + from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # noqa: PLC0414 + + +# refactored config +if TYPE_CHECKING: + from pydantic import ConfigDict +else: + if PYDANTIC_V2: + from pydantic import ConfigDict + else: + # TODO: provide an error message here? + ConfigDict = None + + +# renamed methods / properties +def parse_obj(model: type[_ModelT], value: object) -> _ModelT: + if PYDANTIC_V2: + return model.model_validate(value) + else: + # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + return cast(_ModelT, model.parse_obj(value)) + + +def field_is_required(field: FieldInfo) -> bool: + if PYDANTIC_V2: + return field.is_required() + return field.required # type: ignore + + +def field_get_default(field: FieldInfo) -> Any: + value = field.get_default() + if PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value + + +def field_outer_type(field: FieldInfo) -> Any: + if PYDANTIC_V2: + return field.annotation + return field.outer_type_ # type: ignore + + +def get_model_config(model: type[pydantic.BaseModel]) -> Any: + if PYDANTIC_V2: + return model.model_config + return model.__config__ # type: ignore + + +def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: + if PYDANTIC_V2: + return model.model_fields + return model.__fields__ # type: ignore + + +def model_copy(model: _ModelT) -> _ModelT: + if PYDANTIC_V2: + return model.model_copy() + return model.copy() # type: ignore + + +def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: + if PYDANTIC_V2: + return model.model_dump_json(indent=indent) + return model.json(indent=indent) # type: ignore + + +def model_dump( + model: pydantic.BaseModel, + *, + exclude_unset: bool = False, + exclude_defaults: bool = False, +) -> dict[str, Any]: + if PYDANTIC_V2: + return model.model_dump( + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + ) + return cast( + "dict[str, Any]", + model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + ), + ) + + +def model_parse(model: type[_ModelT], data: Any) -> _ModelT: + if PYDANTIC_V2: + return model.model_validate(data) + return model.parse_obj(data) # pyright: ignore[reportDeprecated] + + +# generic models +if TYPE_CHECKING: + + class GenericModel(pydantic.BaseModel): ... + +else: + if PYDANTIC_V2: + # there no longer needs to be a distinction in v2 but + # we still have to create our own subclass to avoid + # inconsistent MRO ordering errors + class GenericModel(pydantic.BaseModel): ... + + else: + import pydantic.generics + + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... + + +# cached properties +if TYPE_CHECKING: + cached_property = property + + # we define a separate type (copied from typeshed) + # that represents that `cached_property` is `set`able + # at runtime, which differs from `@property`. + # + # this is a separate type as editors likely special case + # `@property` and we don't want to cause issues just to have + # more helpful internal types. + + class typed_cached_property(Generic[_T]): # noqa: N801 + func: Callable[[Any], _T] + attrname: str | None + + def __init__(self, func: Callable[[Any], _T]) -> None: ... + + @overload + def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ... + + @overload + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... + + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self: + raise NotImplementedError() + + def __set_name__(self, owner: type[Any], name: str) -> None: ... + + # __set__ is not defined at runtime, but @cached_property is designed to be settable + def __set__(self, instance: object, value: _T) -> None: ... +else: + try: + from functools import cached_property + except ImportError: + from cached_property import cached_property + + typed_cached_property = cached_property diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py new file mode 100644 index 0000000000000..5e9a7e0a987e2 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py @@ -0,0 +1,671 @@ +from __future__ import annotations + +import inspect +import os +from collections.abc import Callable +from datetime import date, datetime +from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, TypeGuard, TypeVar, cast + +import pydantic +import pydantic.generics +from pydantic.fields import FieldInfo +from typing_extensions import ( + ParamSpec, + Protocol, + override, + runtime_checkable, +) + +from ._base_compat import ( + PYDANTIC_V2, + ConfigDict, + field_get_default, + get_args, + get_model_config, + get_model_fields, + get_origin, + is_literal_type, + is_union, + parse_obj, +) +from ._base_compat import ( + GenericModel as BaseGenericModel, +) +from ._base_type import ( + IncEx, + ModelT, +) +from ._utils import ( + PropertyInfo, + coerce_boolean, + extract_type_arg, + is_annotated_type, + is_list, + is_mapping, + parse_date, + parse_datetime, + strip_annotated_type, +) + +if TYPE_CHECKING: + from pydantic_core.core_schema import LiteralSchema, ModelField, ModelFieldsSchema + +__all__ = ["BaseModel", "GenericModel"] +_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel") + +_T = TypeVar("_T") +P = ParamSpec("P") + + +@runtime_checkable +class _ConfigProtocol(Protocol): + allow_population_by_field_name: bool + + +class BaseModel(pydantic.BaseModel): + if PYDANTIC_V2: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) + ) + else: + + @property + @override + def model_fields_set(self) -> set[str]: + # a forwards-compat shim for pydantic v2 + return self.__fields_set__ # type: ignore + + class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] + extra: Any = pydantic.Extra.allow # type: ignore + + def to_dict( + self, + *, + mode: Literal["json", "python"] = "python", + use_api_names: bool = True, + exclude_unset: bool = True, + exclude_defaults: bool = False, + exclude_none: bool = False, + warnings: bool = True, + ) -> dict[str, object]: + """Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + By default, fields that were not set by the API will not be included, + and keys will match the API response, *not* the property names from the model. + + For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, + the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). + + Args: + mode: + If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`. + If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)` + + use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that are set to their default value from the output. + exclude_none: Whether to exclude fields that have a value of `None` from the output. + warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2. + """ # noqa: E501 + return self.model_dump( + mode=mode, + by_alias=use_api_names, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + warnings=warnings, + ) + + def to_json( + self, + *, + indent: int | None = 2, + use_api_names: bool = True, + exclude_unset: bool = True, + exclude_defaults: bool = False, + exclude_none: bool = False, + warnings: bool = True, + ) -> str: + """Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation). + + By default, fields that were not set by the API will not be included, + and keys will match the API response, *not* the property names from the model. + + For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, + the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). + + Args: + indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2` + use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that have the default value. + exclude_none: Whether to exclude fields that have a value of `None`. + warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2. + """ # noqa: E501 + return self.model_dump_json( + indent=indent, + by_alias=use_api_names, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + warnings=warnings, + ) + + @override + def __str__(self) -> str: + # mypy complains about an invalid self arg + return f'{self.__repr_name__()}({self.__repr_str__(", ")})' # type: ignore[misc] + + # Override the 'construct' method in a way that supports recursive parsing without validation. + # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. + @classmethod + @override + def construct( + cls: type[ModelT], + _fields_set: set[str] | None = None, + **values: object, + ) -> ModelT: + m = cls.__new__(cls) + fields_values: dict[str, object] = {} + + config = get_model_config(cls) + populate_by_name = ( + config.allow_population_by_field_name + if isinstance(config, _ConfigProtocol) + else config.get("populate_by_name") + ) + + if _fields_set is None: + _fields_set = set() + + model_fields = get_model_fields(cls) + for name, field in model_fields.items(): + key = field.alias + if key is None or (key not in values and populate_by_name): + key = name + + if key in values: + fields_values[name] = _construct_field(value=values[key], field=field, key=key) + _fields_set.add(name) + else: + fields_values[name] = field_get_default(field) + + _extra = {} + for key, value in values.items(): + if key not in model_fields: + if PYDANTIC_V2: + _extra[key] = value + else: + _fields_set.add(key) + fields_values[key] = value + + object.__setattr__(m, "__dict__", fields_values) # noqa: PLC2801 + + if PYDANTIC_V2: + # these properties are copied from Pydantic's `model_construct()` method + object.__setattr__(m, "__pydantic_private__", None) # noqa: PLC2801 + object.__setattr__(m, "__pydantic_extra__", _extra) # noqa: PLC2801 + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) # noqa: PLC2801 + else: + # init_private_attributes() does not exist in v2 + m._init_private_attributes() # type: ignore + + # copied from Pydantic v1's `construct()` method + object.__setattr__(m, "__fields_set__", _fields_set) # noqa: PLC2801 + + return m + + if not TYPE_CHECKING: + # type checkers incorrectly complain about this assignment + # because the type signatures are technically different + # although not in practice + model_construct = construct + + if not PYDANTIC_V2: + # we define aliases for some of the new pydantic v2 methods so + # that we can just document these methods without having to specify + # a specific pydantic version as some users may not know which + # pydantic version they are currently using + + @override + def model_dump( + self, + *, + mode: Literal["json", "python"] | str = "python", + include: IncEx = None, + exclude: IncEx = None, + by_alias: bool = False, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + round_trip: bool = False, + warnings: bool | Literal["none", "warn", "error"] = True, + context: dict[str, Any] | None = None, + serialize_as_any: bool = False, + ) -> dict[str, Any]: + """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump + + Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + Args: + mode: The mode in which `to_python` should run. + If mode is 'json', the dictionary will only contain JSON serializable types. + If mode is 'python', the dictionary may contain any Python objects. + include: A list of fields to include in the output. + exclude: A list of fields to exclude from the output. + by_alias: Whether to use the field's alias in the dictionary key if defined. + exclude_unset: Whether to exclude fields that are unset or None from the output. + exclude_defaults: Whether to exclude fields that are set to their default value from the output. + exclude_none: Whether to exclude fields that have a value of `None` from the output. + round_trip: Whether to enable serialization and deserialization round-trip support. + warnings: Whether to log warnings when invalid fields are encountered. + + Returns: + A dictionary representation of the model. + """ + if mode != "python": + raise ValueError("mode is only supported in Pydantic v2") + if round_trip != False: + raise ValueError("round_trip is only supported in Pydantic v2") + if warnings != True: + raise ValueError("warnings is only supported in Pydantic v2") + if context is not None: + raise ValueError("context is only supported in Pydantic v2") + if serialize_as_any != False: + raise ValueError("serialize_as_any is only supported in Pydantic v2") + return super().dict( # pyright: ignore[reportDeprecated] + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + @override + def model_dump_json( + self, + *, + indent: int | None = None, + include: IncEx = None, + exclude: IncEx = None, + by_alias: bool = False, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + round_trip: bool = False, + warnings: bool | Literal["none", "warn", "error"] = True, + context: dict[str, Any] | None = None, + serialize_as_any: bool = False, + ) -> str: + """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json + + Generates a JSON representation of the model using Pydantic's `to_json` method. + + Args: + indent: Indentation to use in the JSON output. If None is passed, the output will be compact. + include: Field(s) to include in the JSON output. Can take either a string or set of strings. + exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings. + by_alias: Whether to serialize using field aliases. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that have the default value. + exclude_none: Whether to exclude fields that have a value of `None`. + round_trip: Whether to use serialization/deserialization between JSON and class instance. + warnings: Whether to show any warnings that occurred during serialization. + + Returns: + A JSON string representation of the model. + """ + if round_trip != False: + raise ValueError("round_trip is only supported in Pydantic v2") + if warnings != True: + raise ValueError("warnings is only supported in Pydantic v2") + if context is not None: + raise ValueError("context is only supported in Pydantic v2") + if serialize_as_any != False: + raise ValueError("serialize_as_any is only supported in Pydantic v2") + return super().json( # type: ignore[reportDeprecated] + indent=indent, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + +def _construct_field(value: object, field: FieldInfo, key: str) -> object: + if value is None: + return field_get_default(field) + + if PYDANTIC_V2: + type_ = field.annotation + else: + type_ = cast(type, field.outer_type_) # type: ignore + + if type_ is None: + raise RuntimeError(f"Unexpected field type is None for {key}") + + return construct_type(value=value, type_=type_) + + +def is_basemodel(type_: type) -> bool: + """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" + if is_union(type_): + return any(is_basemodel(variant) for variant in get_args(type_)) + + return is_basemodel_type(type_) + + +def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: + origin = get_origin(type_) or type_ + return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) + + +def build( + base_model_cls: Callable[P, _BaseModelT], + *args: P.args, + **kwargs: P.kwargs, +) -> _BaseModelT: + """Construct a BaseModel class without validation. + + This is useful for cases where you need to instantiate a `BaseModel` + from an API response as this provides type-safe params which isn't supported + by helpers like `construct_type()`. + + ```py + build(MyModel, my_field_a="foo", my_field_b=123) + ``` + """ + if args: + raise TypeError( + "Received positional arguments which are not supported; Keyword arguments must be used instead", + ) + + return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) + + +def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: + """Loose coercion to the expected type with construction of nested values. + + Note: the returned value from this function is not guaranteed to match the + given type. + """ + return cast(_T, construct_type(value=value, type_=type_)) + + +def construct_type(*, value: object, type_: type) -> object: + """Loose coercion to the expected type with construction of nested values. + + If the given value does not match the expected type then it is returned as-is. + """ + # we allow `object` as the input type because otherwise, passing things like + # `Literal['value']` will be reported as a type error by type checkers + type_ = cast("type[object]", type_) + + # unwrap `Annotated[T, ...]` -> `T` + if is_annotated_type(type_): + meta: tuple[Any, ...] = get_args(type_)[1:] + type_ = extract_type_arg(type_, 0) + else: + meta = () + # we need to use the origin class for any types that are subscripted generics + # e.g. Dict[str, object] + origin = get_origin(type_) or type_ + args = get_args(type_) + + if is_union(origin): + try: + return validate_type(type_=cast("type[object]", type_), value=value) + except Exception: + pass + + # if the type is a discriminated union then we want to construct the right variant + # in the union, even if the data doesn't match exactly, otherwise we'd break code + # that relies on the constructed class types, e.g. + # + # class FooType: + # kind: Literal['foo'] + # value: str + # + # class BarType: + # kind: Literal['bar'] + # value: int + # + # without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then + # we'd end up constructing `FooType` when it should be `BarType`. + discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta) + if discriminator and is_mapping(value): + variant_value = value.get(discriminator.field_alias_from or discriminator.field_name) + if variant_value and isinstance(variant_value, str): + variant_type = discriminator.mapping.get(variant_value) + if variant_type: + return construct_type(type_=variant_type, value=value) + + # if the data is not valid, use the first variant that doesn't fail while deserializing + for variant in args: + try: + return construct_type(value=value, type_=variant) + except Exception: + continue + + raise RuntimeError(f"Could not convert data into a valid instance of {type_}") + if origin == dict: + if not is_mapping(value): + return value + + _, items_type = get_args(type_) # Dict[_, items_type] + return {key: construct_type(value=item, type_=items_type) for key, item in value.items()} + + if not is_literal_type(type_) and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)): + if is_list(value): + return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value] + + if is_mapping(value): + if issubclass(type_, BaseModel): + return type_.construct(**value) # type: ignore[arg-type] + + return cast(Any, type_).construct(**value) + + if origin == list: + if not is_list(value): + return value + + inner_type = args[0] # List[inner_type] + return [construct_type(value=entry, type_=inner_type) for entry in value] + + if origin == float: + if isinstance(value, int): + coerced = float(value) + if coerced != value: + return value + return coerced + + return value + + if type_ == datetime: + try: + return parse_datetime(value) # type: ignore + except Exception: + return value + + if type_ == date: + try: + return parse_date(value) # type: ignore + except Exception: + return value + + return value + + +@runtime_checkable +class CachedDiscriminatorType(Protocol): + __discriminator__: DiscriminatorDetails + + +class DiscriminatorDetails: + field_name: str + """The name of the discriminator field in the variant class, e.g. + + ```py + class Foo(BaseModel): + type: Literal['foo'] + ``` + + Will result in field_name='type' + """ + + field_alias_from: str | None + """The name of the discriminator field in the API response, e.g. + + ```py + class Foo(BaseModel): + type: Literal['foo'] = Field(alias='type_from_api') + ``` + + Will result in field_alias_from='type_from_api' + """ + + mapping: dict[str, type] + """Mapping of discriminator value to variant type, e.g. + + {'foo': FooVariant, 'bar': BarVariant} + """ + + def __init__( + self, + *, + mapping: dict[str, type], + discriminator_field: str, + discriminator_alias: str | None, + ) -> None: + self.mapping = mapping + self.field_name = discriminator_field + self.field_alias_from = discriminator_alias + + +def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: + if isinstance(union, CachedDiscriminatorType): + return union.__discriminator__ + + discriminator_field_name: str | None = None + + for annotation in meta_annotations: + if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None: + discriminator_field_name = annotation.discriminator + break + + if not discriminator_field_name: + return None + + mapping: dict[str, type] = {} + discriminator_alias: str | None = None + + for variant in get_args(union): + variant = strip_annotated_type(variant) + if is_basemodel_type(variant): + if PYDANTIC_V2: + field = _extract_field_schema_pv2(variant, discriminator_field_name) + if not field: + continue + + # Note: if one variant defines an alias then they all should + discriminator_alias = field.get("serialization_alias") + + field_schema = field["schema"] + + if field_schema["type"] == "literal": + for entry in cast("LiteralSchema", field_schema)["expected"]: + if isinstance(entry, str): + mapping[entry] = variant + else: + field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + if not field_info: + continue + + # Note: if one variant defines an alias then they all should + discriminator_alias = field_info.alias + + if field_info.annotation and is_literal_type(field_info.annotation): + for entry in get_args(field_info.annotation): + if isinstance(entry, str): + mapping[entry] = variant + + if not mapping: + return None + + details = DiscriminatorDetails( + mapping=mapping, + discriminator_field=discriminator_field_name, + discriminator_alias=discriminator_alias, + ) + cast(CachedDiscriminatorType, union).__discriminator__ = details + return details + + +def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: + schema = model.__pydantic_core_schema__ + if schema["type"] != "model": + return None + + fields_schema = schema["schema"] + if fields_schema["type"] != "model-fields": + return None + + fields_schema = cast("ModelFieldsSchema", fields_schema) + + field = fields_schema["fields"].get(field_name) + if not field: + return None + + return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast] + + +def validate_type(*, type_: type[_T], value: object) -> _T: + """Strict validation that the given value matches the expected type""" + if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): + return cast(_T, parse_obj(type_, value)) + + return cast(_T, _validate_non_model_type(type_=type_, value=value)) + + +# our use of subclasssing here causes weirdness for type checkers, +# so we just pretend that we don't subclass +if TYPE_CHECKING: + GenericModel = BaseModel +else: + + class GenericModel(BaseGenericModel, BaseModel): + pass + + +if PYDANTIC_V2: + from pydantic import TypeAdapter + + def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: + return TypeAdapter(type_).validate_python(value) + +elif not TYPE_CHECKING: + + class TypeAdapter(Generic[_T]): + """Used as a placeholder to easily convert runtime types to a Pydantic format + to provide validation. + + For example: + ```py + validated = RootModel[int](__root__="5").__root__ + # validated: 5 + ``` + """ + + def __init__(self, type_: type[_T]): + self.type_ = type_ + + def validate_python(self, value: Any) -> _T: + if not isinstance(value, self.type_): + raise ValueError(f"Invalid type: {value} is not of type {self.type_}") + return value + + def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: + return TypeAdapter(type_).validate_python(value) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py index 7a91f9b79627c..ea1d3f09dc42e 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py @@ -1,11 +1,21 @@ from __future__ import annotations -from collections.abc import Mapping, Sequence +from collections.abc import Callable, Mapping, Sequence from os import PathLike -from typing import IO, TYPE_CHECKING, Any, Literal, TypeVar, Union +from typing import ( + IO, + TYPE_CHECKING, + Any, + Literal, + Optional, + TypeAlias, + TypeVar, + Union, +) import pydantic -from typing_extensions import override +from httpx import Response +from typing_extensions import Protocol, TypedDict, override, runtime_checkable Query = Mapping[str, object] Body = object @@ -22,7 +32,7 @@ # Sentinel class used until PEP 0661 is accepted -class NotGiven(pydantic.BaseModel): +class NotGiven: """ A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior). @@ -50,7 +60,7 @@ def __repr__(self) -> str: NOT_GIVEN = NotGiven() -class Omit(pydantic.BaseModel): +class Omit: """In certain situations you need to be able to represent a case where a default value has to be explicitly removed and `None` is not an appropriate substitute, for example: @@ -71,37 +81,90 @@ def __bool__(self) -> Literal[False]: return False +@runtime_checkable +class ModelBuilderProtocol(Protocol): + @classmethod + def build( + cls: type[_T], + *, + response: Response, + data: object, + ) -> _T: ... + + Headers = Mapping[str, Union[str, Omit]] + +class HeadersLikeProtocol(Protocol): + def get(self, __key: str) -> str | None: ... + + +HeadersLike = Union[Headers, HeadersLikeProtocol] + ResponseT = TypeVar( "ResponseT", - bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol," - " BinaryResponseContent]", + bound="Union[str, None, BaseModel, list[Any], dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", # noqa: E501 ) +StrBytesIntFloat = Union[str, bytes, int, float] + +# Note: copied from Pydantic +# https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49 +IncEx: TypeAlias = "set[int] | set[str] | dict[int, Any] | dict[str, Any] | None" + +PostParser = Callable[[Any], Any] + + +@runtime_checkable +class InheritsGeneric(Protocol): + """Represents a type that has inherited from `Generic` + + The `__orig_bases__` property can be used to determine the resolved + type variable for a given base class. + """ + + __orig_bases__: tuple[_GenericAlias] + + +class _GenericAlias(Protocol): + __origin__: type[object] + + +class HttpxSendArgs(TypedDict, total=False): + auth: httpx.Auth + + # for user input files if TYPE_CHECKING: + Base64FileInput = Union[IO[bytes], PathLike[str]] FileContent = Union[IO[bytes], bytes, PathLike[str]] else: + Base64FileInput = Union[IO[bytes], PathLike] FileContent = Union[IO[bytes], bytes, PathLike] FileTypes = Union[ - FileContent, # file content - tuple[str, FileContent], # (filename, file) - tuple[str, FileContent, str], # (filename, file , content_type) - tuple[str, FileContent, str, Mapping[str, str]], # (filename, file , content_type, headers) + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], ] - RequestFiles = Union[Mapping[str, FileTypes], Sequence[tuple[str, FileTypes]]] -# for httpx client supported files - +# duplicate of the above but without our custom file support HttpxFileContent = Union[bytes, IO[bytes]] HttpxFileTypes = Union[ - FileContent, # file content - tuple[str, HttpxFileContent], # (filename, file) - tuple[str, HttpxFileContent, str], # (filename, file , content_type) - tuple[str, HttpxFileContent, str, Mapping[str, str]], # (filename, file , content_type, headers) + # file (or bytes) + HttpxFileContent, + # (filename, file (or bytes)) + tuple[Optional[str], HttpxFileContent], + # (filename, file (or bytes), content_type) + tuple[Optional[str], HttpxFileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]], ] HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[tuple[str, HttpxFileTypes]]] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py new file mode 100644 index 0000000000000..8e43bdebecb61 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py @@ -0,0 +1,12 @@ +import httpx + +RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" +# 通过 `Timeout` 控制接口`connect` 和 `read` 超时时间,默认为`timeout=300.0, connect=8.0` +ZHIPUAI_DEFAULT_TIMEOUT = httpx.Timeout(timeout=300.0, connect=8.0) +# 通过 `retry` 参数控制重试次数,默认为3次 +ZHIPUAI_DEFAULT_MAX_RETRIES = 3 +# 通过 `Limits` 控制最大连接数和保持连接数,默认为`max_connections=50, max_keepalive_connections=10` +ZHIPUAI_DEFAULT_LIMITS = httpx.Limits(max_connections=50, max_keepalive_connections=10) + +INITIAL_RETRY_DELAY = 0.5 +MAX_RETRY_DELAY = 8.0 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py index 1027c1bc5b1e5..e2c9d24c6c0d2 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py @@ -13,6 +13,7 @@ "APIResponseError", "APIResponseValidationError", "APITimeoutError", + "APIConnectionError", ] @@ -24,7 +25,7 @@ def __init__( super().__init__(message) -class APIStatusError(Exception): +class APIStatusError(ZhipuAIError): response: httpx.Response status_code: int @@ -49,7 +50,7 @@ class APIInternalError(APIStatusError): ... class APIServerFlowExceedError(APIStatusError): ... -class APIResponseError(Exception): +class APIResponseError(ZhipuAIError): message: str request: httpx.Request json_data: object @@ -75,9 +76,11 @@ def __init__(self, response: httpx.Response, json_data: object | None, *, messag self.status_code = response.status_code -class APITimeoutError(Exception): - request: httpx.Request +class APIConnectionError(APIResponseError): + def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None: + super().__init__(message, request, json_data=None) - def __init__(self, request: httpx.Request): - self.request = request - super().__init__("Request Timeout") + +class APITimeoutError(APIConnectionError): + def __init__(self, request: httpx.Request) -> None: + super().__init__(message="Request timed out.", request=request) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py index 0796bfe11cc65..f9d2e14d9ecb9 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py @@ -2,40 +2,74 @@ import io import os -from collections.abc import Mapping, Sequence -from pathlib import Path +import pathlib +from typing import TypeGuard, overload -from ._base_type import FileTypes, HttpxFileTypes, HttpxRequestFiles, RequestFiles +from ._base_type import ( + Base64FileInput, + FileContent, + FileTypes, + HttpxFileContent, + HttpxFileTypes, + HttpxRequestFiles, + RequestFiles, +) +from ._utils import is_mapping_t, is_sequence_t, is_tuple_t -def is_file_content(obj: object) -> bool: +def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]: + return isinstance(obj, io.IOBase | os.PathLike) + + +def is_file_content(obj: object) -> TypeGuard[FileContent]: return isinstance(obj, bytes | tuple | io.IOBase | os.PathLike) -def _transform_file(file: FileTypes) -> HttpxFileTypes: - if is_file_content(file): - if isinstance(file, os.PathLike): - path = Path(file) - return path.name, path.read_bytes() - else: - return file - if isinstance(file, tuple): - if isinstance(file[1], os.PathLike): - return (file[0], Path(file[1]).read_bytes(), *file[2:]) - else: - return (file[0], file[1], *file[2:]) - else: - raise TypeError(f"Unexpected input file with type {type(file)},Expected FileContent type or tuple type") +def assert_is_file_content(obj: object, *, key: str | None = None) -> None: + if not is_file_content(obj): + prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" + raise RuntimeError( + f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/main#file-uploads" + ) from None -def make_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: +@overload +def to_httpx_files(files: None) -> None: ... + + +@overload +def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... + + +def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: if files is None: return None - if isinstance(files, Mapping): + if is_mapping_t(files): files = {key: _transform_file(file) for key, file in files.items()} - elif isinstance(files, Sequence): + elif is_sequence_t(files): files = [(key, _transform_file(file)) for key, file in files] else: - raise TypeError(f"Unexpected input file with type {type(files)}, excepted Mapping or Sequence") + raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence") + return files + + +def _transform_file(file: FileTypes) -> HttpxFileTypes: + if is_file_content(file): + if isinstance(file, os.PathLike): + path = pathlib.Path(file) + return (path.name, path.read_bytes()) + + return file + + if is_tuple_t(file): + return (file[0], _read_file_content(file[1]), *file[2:]) + + raise TypeError("Expected file types input to be a FileContent type or to be a tuple") + + +def _read_file_content(file: FileContent) -> HttpxFileContent: + if isinstance(file, os.PathLike): + return pathlib.Path(file).read_bytes() + return file diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py index 5f7f6d04f20d9..d0f933d814138 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py @@ -1,23 +1,70 @@ from __future__ import annotations import inspect -from collections.abc import Mapping -from typing import Any, Union, cast +import logging +import time +import warnings +from collections.abc import Iterator, Mapping +from itertools import starmap +from random import random +from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union, cast, overload import httpx import pydantic from httpx import URL, Timeout -from tenacity import retry -from tenacity.stop import stop_after_attempt - -from . import _errors -from ._base_type import NOT_GIVEN, AnyMapping, Body, Data, Headers, NotGiven, Query, RequestFiles, ResponseT -from ._errors import APIResponseValidationError, APIStatusError, APITimeoutError -from ._files import make_httpx_files -from ._request_opt import ClientRequestParam, UserRequestInput -from ._response import HttpResponse + +from . import _errors, get_origin +from ._base_compat import model_copy +from ._base_models import GenericModel, construct_type, validate_type +from ._base_type import ( + NOT_GIVEN, + AnyMapping, + Body, + Data, + Headers, + HttpxSendArgs, + ModelBuilderProtocol, + NotGiven, + Omit, + PostParser, + Query, + RequestFiles, + ResponseT, +) +from ._constants import ( + INITIAL_RETRY_DELAY, + MAX_RETRY_DELAY, + RAW_RESPONSE_HEADER, + ZHIPUAI_DEFAULT_LIMITS, + ZHIPUAI_DEFAULT_MAX_RETRIES, + ZHIPUAI_DEFAULT_TIMEOUT, +) +from ._errors import APIConnectionError, APIResponseValidationError, APIStatusError, APITimeoutError +from ._files import to_httpx_files +from ._legacy_response import LegacyAPIResponse +from ._request_opt import FinalRequestOptions, UserRequestInput +from ._response import APIResponse, BaseAPIResponse, extract_response_type from ._sse_client import StreamResponse -from ._utils import flatten +from ._utils import flatten, is_given, is_mapping + +log: logging.Logger = logging.getLogger(__name__) + +# TODO: make base page type vars covariant +SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]") +# AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]") + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +if TYPE_CHECKING: + from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT +else: + try: + from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT + except ImportError: + # taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366 + HTTPX_DEFAULT_TIMEOUT = Timeout(5.0) + headers = { "Accept": "application/json", @@ -25,50 +72,180 @@ } -def _merge_map(map1: Mapping, map2: Mapping) -> Mapping: - merged = {**map1, **map2} - return {key: val for key, val in merged.items() if val is not None} +class PageInfo: + """Stores the necessary information to build the request to retrieve the next page. + Either `url` or `params` must be set. + """ -from itertools import starmap + url: URL | NotGiven + params: Query | NotGiven + + @overload + def __init__( + self, + *, + url: URL, + ) -> None: ... + + @overload + def __init__( + self, + *, + params: Query, + ) -> None: ... + + def __init__( + self, + *, + url: URL | NotGiven = NOT_GIVEN, + params: Query | NotGiven = NOT_GIVEN, + ) -> None: + self.url = url + self.params = params + + +class BasePage(GenericModel, Generic[_T]): + """ + Defines the core interface for pagination. + + Type Args: + ModelT: The pydantic model that represents an item in the response. + + Methods: + has_next_page(): Check if there is another page available + next_page_info(): Get the necessary information to make a request for the next page + """ + + _options: FinalRequestOptions = pydantic.PrivateAttr() + _model: type[_T] = pydantic.PrivateAttr() -from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT + def has_next_page(self) -> bool: + items = self._get_page_items() + if not items: + return False + return self.next_page_info() is not None -ZHIPUAI_DEFAULT_TIMEOUT = httpx.Timeout(timeout=300.0, connect=8.0) -ZHIPUAI_DEFAULT_MAX_RETRIES = 3 -ZHIPUAI_DEFAULT_LIMITS = httpx.Limits(max_connections=5, max_keepalive_connections=5) + def next_page_info(self) -> Optional[PageInfo]: ... + + def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body] + ... + + def _params_from_url(self, url: URL) -> httpx.QueryParams: + # TODO: do we have to preprocess params here? + return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params) + + def _info_to_options(self, info: PageInfo) -> FinalRequestOptions: + options = model_copy(self._options) + options._strip_raw_response_header() + + if not isinstance(info.params, NotGiven): + options.params = {**options.params, **info.params} + return options + + if not isinstance(info.url, NotGiven): + params = self._params_from_url(info.url) + url = info.url.copy_with(params=params) + options.params = dict(url.params) + options.url = str(url) + return options + + raise ValueError("Unexpected PageInfo state") + + +class BaseSyncPage(BasePage[_T], Generic[_T]): + _client: HttpClient = pydantic.PrivateAttr() + + def _set_private_attributes( + self, + client: HttpClient, + model: type[_T], + options: FinalRequestOptions, + ) -> None: + self._model = model + self._client = client + self._options = options + + # Pydantic uses a custom `__iter__` method to support casting BaseModels + # to dictionaries. e.g. dict(model). + # As we want to support `for item in page`, this is inherently incompatible + # with the default pydantic behaviour. It is not possible to support both + # use cases at once. Fortunately, this is not a big deal as all other pydantic + # methods should continue to work as expected as there is an alternative method + # to cast a model to a dictionary, model.dict(), which is used internally + # by pydantic. + def __iter__(self) -> Iterator[_T]: # type: ignore + for page in self.iter_pages(): + yield from page._get_page_items() + + def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]: + page = self + while True: + yield page + if page.has_next_page(): + page = page.get_next_page() + else: + return + + def get_next_page(self: SyncPageT) -> SyncPageT: + info = self.next_page_info() + if not info: + raise RuntimeError( + "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." + ) + + options = self._info_to_options(info) + return self._client._request_api_list(self._model, page=self.__class__, options=options) class HttpClient: _client: httpx.Client _version: str _base_url: URL - + max_retries: int timeout: Union[float, Timeout, None] _limits: httpx.Limits _has_custom_http_client: bool _default_stream_cls: type[StreamResponse[Any]] | None = None + _strict_response_validation: bool + def __init__( self, *, version: str, base_url: URL, + _strict_response_validation: bool, + max_retries: int = ZHIPUAI_DEFAULT_MAX_RETRIES, timeout: Union[float, Timeout, None], + limits: httpx.Limits | None = None, custom_httpx_client: httpx.Client | None = None, custom_headers: Mapping[str, str] | None = None, ) -> None: - if timeout is None or isinstance(timeout, NotGiven): + if limits is not None: + warnings.warn( + "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", # noqa: E501 + category=DeprecationWarning, + stacklevel=3, + ) + if custom_httpx_client is not None: + raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`") + else: + limits = ZHIPUAI_DEFAULT_LIMITS + + if not is_given(timeout): if custom_httpx_client and custom_httpx_client.timeout != HTTPX_DEFAULT_TIMEOUT: timeout = custom_httpx_client.timeout else: timeout = ZHIPUAI_DEFAULT_TIMEOUT - self.timeout = cast(Timeout, timeout) + self.max_retries = max_retries + self.timeout = timeout + self._limits = limits self._has_custom_http_client = bool(custom_httpx_client) self._client = custom_httpx_client or httpx.Client( base_url=base_url, timeout=self.timeout, - limits=ZHIPUAI_DEFAULT_LIMITS, + limits=limits, ) self._version = version url = URL(url=base_url) @@ -76,6 +253,7 @@ def __init__( url = url.copy_with(raw_path=url.raw_path + b"/") self._base_url = url self._custom_headers = custom_headers or {} + self._strict_response_validation = _strict_response_validation def _prepare_url(self, url: str) -> URL: sub_url = URL(url) @@ -93,55 +271,101 @@ def _default_headers(self): "ZhipuAI-SDK-Ver": self._version, "source_type": "zhipu-sdk-python", "x-request-sdk": "zhipu-sdk-python", - **self._auth_headers, + **self.auth_headers, **self._custom_headers, } @property - def _auth_headers(self): + def custom_auth(self) -> httpx.Auth | None: + return None + + @property + def auth_headers(self): return {} - def _prepare_headers(self, request_param: ClientRequestParam) -> httpx.Headers: - custom_headers = request_param.headers or {} - headers_dict = _merge_map(self._default_headers, custom_headers) + def _prepare_headers(self, options: FinalRequestOptions) -> httpx.Headers: + custom_headers = options.headers or {} + headers_dict = _merge_mappings(self._default_headers, custom_headers) httpx_headers = httpx.Headers(headers_dict) return httpx_headers - def _prepare_request(self, request_param: ClientRequestParam) -> httpx.Request: + def _remaining_retries( + self, + remaining_retries: Optional[int], + options: FinalRequestOptions, + ) -> int: + return remaining_retries if remaining_retries is not None else options.get_max_retries(self.max_retries) + + def _calculate_retry_timeout( + self, + remaining_retries: int, + options: FinalRequestOptions, + response_headers: Optional[httpx.Headers] = None, + ) -> float: + max_retries = options.get_max_retries(self.max_retries) + + # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + # retry_after = self._parse_retry_after_header(response_headers) + # if retry_after is not None and 0 < retry_after <= 60: + # return retry_after + + nb_retries = max_retries - remaining_retries + + # Apply exponential backoff, but not more than the max. + sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) + + # Apply some jitter, plus-or-minus half a second. + jitter = 1 - 0.25 * random() + timeout = sleep_seconds * jitter + return max(timeout, 0) + + def _build_request(self, options: FinalRequestOptions) -> httpx.Request: kwargs: dict[str, Any] = {} - json_data = request_param.json_data - headers = self._prepare_headers(request_param) - url = self._prepare_url(request_param.url) - json_data = request_param.json_data + headers = self._prepare_headers(options) + url = self._prepare_url(options.url) + json_data = options.json_data + if options.extra_json is not None: + if json_data is None: + json_data = cast(Body, options.extra_json) + elif is_mapping(json_data): + json_data = _merge_mappings(json_data, options.extra_json) + else: + raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") + + content_type = headers.get("Content-Type") + # multipart/form-data; boundary=---abc-- if headers.get("Content-Type") == "multipart/form-data": - headers.pop("Content-Type") + if "boundary" not in content_type: + # only remove the header if the boundary hasn't been explicitly set + # as the caller doesn't want httpx to come up with their own boundary + headers.pop("Content-Type") if json_data: kwargs["data"] = self._make_multipartform(json_data) return self._client.build_request( headers=headers, - timeout=self.timeout if isinstance(request_param.timeout, NotGiven) else request_param.timeout, - method=request_param.method, + timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, + method=options.method, url=url, json=json_data, - files=request_param.files, - params=request_param.params, + files=options.files, + params=options.params, **kwargs, ) - def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: + def _object_to_formfata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: items = [] if isinstance(value, Mapping): for k, v in value.items(): - items.extend(self._object_to_formdata(f"{key}[{k}]", v)) + items.extend(self._object_to_formfata(f"{key}[{k}]", v)) return items if isinstance(value, list | tuple): for v in value: - items.extend(self._object_to_formdata(key + "[]", v)) + items.extend(self._object_to_formfata(key + "[]", v)) return items def _primitive_value_to_str(val) -> str: @@ -161,7 +385,7 @@ def _primitive_value_to_str(val) -> str: return [(key, str_data)] def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: - items = flatten(list(starmap(self._object_to_formdata, data.items()))) + items = flatten(list(starmap(self._object_to_formfata, data.items()))) serialized: dict[str, object] = {} for key, value in items: @@ -170,20 +394,6 @@ def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object serialized[key] = value return serialized - def _parse_response( - self, - *, - cast_type: type[ResponseT], - response: httpx.Response, - enable_stream: bool, - request_param: ClientRequestParam, - stream_cls: type[StreamResponse[Any]] | None = None, - ) -> HttpResponse: - http_response = HttpResponse( - raw_response=response, cast_type=cast_type, client=self, enable_stream=enable_stream, stream_cls=stream_cls - ) - return http_response.parse() - def _process_response_data( self, *, @@ -194,14 +404,58 @@ def _process_response_data( if data is None: return cast(ResponseT, None) + if cast_type is object: + return cast(ResponseT, data) + try: - if inspect.isclass(cast_type) and issubclass(cast_type, pydantic.BaseModel): - return cast(ResponseT, cast_type.validate(data)) + if inspect.isclass(cast_type) and issubclass(cast_type, ModelBuilderProtocol): + return cast(ResponseT, cast_type.build(response=response, data=data)) + + if self._strict_response_validation: + return cast(ResponseT, validate_type(type_=cast_type, value=data)) - return cast(ResponseT, pydantic.TypeAdapter(cast_type).validate_python(data)) + return cast(ResponseT, construct_type(type_=cast_type, value=data)) except pydantic.ValidationError as err: raise APIResponseValidationError(response=response, json_data=data) from err + def _should_stream_response_body(self, request: httpx.Request) -> bool: + return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return] + + def _should_retry(self, response: httpx.Response) -> bool: + # Note: this is not a standard header + should_retry_header = response.headers.get("x-should-retry") + + # If the server explicitly says whether or not to retry, obey. + if should_retry_header == "true": + log.debug("Retrying as header `x-should-retry` is set to `true`") + return True + if should_retry_header == "false": + log.debug("Not retrying as header `x-should-retry` is set to `false`") + return False + + # Retry on request timeouts. + if response.status_code == 408: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry on lock timeouts. + if response.status_code == 409: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry on rate limits. + if response.status_code == 429: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry internal errors. + if response.status_code >= 500: + log.debug("Retrying due to status code %i", response.status_code) + return True + + log.debug("Not retrying") + return False + def is_closed(self) -> bool: return self._client.is_closed @@ -214,117 +468,385 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): self.close() - @retry(stop=stop_after_attempt(ZHIPUAI_DEFAULT_MAX_RETRIES)) def request( + self, + cast_type: type[ResponseT], + options: FinalRequestOptions, + remaining_retries: Optional[int] = None, + *, + stream: bool = False, + stream_cls: type[StreamResponse] | None = None, + ) -> ResponseT | StreamResponse: + return self._request( + cast_type=cast_type, + options=options, + stream=stream, + stream_cls=stream_cls, + remaining_retries=remaining_retries, + ) + + def _request( self, *, cast_type: type[ResponseT], - params: ClientRequestParam, - enable_stream: bool = False, - stream_cls: type[StreamResponse[Any]] | None = None, + options: FinalRequestOptions, + remaining_retries: int | None, + stream: bool, + stream_cls: type[StreamResponse] | None, ) -> ResponseT | StreamResponse: - request = self._prepare_request(params) + retries = self._remaining_retries(remaining_retries, options) + request = self._build_request(options) + kwargs: HttpxSendArgs = {} + if self.custom_auth is not None: + kwargs["auth"] = self.custom_auth try: response = self._client.send( request, - stream=enable_stream, + stream=stream or self._should_stream_response_body(request=request), + **kwargs, ) - response.raise_for_status() except httpx.TimeoutException as err: + log.debug("Encountered httpx.TimeoutException", exc_info=True) + + if retries > 0: + return self._retry_request( + options, + cast_type, + retries, + stream=stream, + stream_cls=stream_cls, + response_headers=None, + ) + + log.debug("Raising timeout error") raise APITimeoutError(request=request) from err - except httpx.HTTPStatusError as err: - err.response.read() - # raise err - raise self._make_status_error(err.response) from None - except Exception as err: - raise err + log.debug("Encountered Exception", exc_info=True) + + if retries > 0: + return self._retry_request( + options, + cast_type, + retries, + stream=stream, + stream_cls=stream_cls, + response_headers=None, + ) + + log.debug("Raising connection error") + raise APIConnectionError(request=request) from err + + log.debug( + 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase + ) - return self._parse_response( + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code + log.debug("Encountered httpx.HTTPStatusError", exc_info=True) + + if retries > 0 and self._should_retry(err.response): + err.response.close() + return self._retry_request( + options, + cast_type, + retries, + err.response.headers, + stream=stream, + stream_cls=stream_cls, + ) + + # If the response is streamed then we need to explicitly read the response + # to completion before attempting to access the response text. + if not err.response.is_closed: + err.response.read() + + log.debug("Re-raising status error") + raise self._make_status_error(err.response) from None + + # return self._parse_response( + # cast_type=cast_type, + # options=options, + # response=response, + # stream=stream, + # stream_cls=stream_cls, + # ) + return self._process_response( cast_type=cast_type, - request_param=params, + options=options, response=response, - enable_stream=enable_stream, + stream=stream, + stream_cls=stream_cls, + ) + + def _retry_request( + self, + options: FinalRequestOptions, + cast_type: type[ResponseT], + remaining_retries: int, + response_headers: httpx.Headers | None, + *, + stream: bool, + stream_cls: type[StreamResponse] | None, + ) -> ResponseT | StreamResponse: + remaining = remaining_retries - 1 + if remaining == 1: + log.debug("1 retry left") + else: + log.debug("%i retries left", remaining) + + timeout = self._calculate_retry_timeout(remaining, options, response_headers) + log.info("Retrying request to %s in %f seconds", options.url, timeout) + + # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a + # different thread if necessary. + time.sleep(timeout) + + return self._request( + options=options, + cast_type=cast_type, + remaining_retries=remaining, + stream=stream, + stream_cls=stream_cls, + ) + + def _process_response( + self, + *, + cast_type: type[ResponseT], + options: FinalRequestOptions, + response: httpx.Response, + stream: bool, + stream_cls: type[StreamResponse] | None, + ) -> ResponseT: + # _legacy_response with raw_response_header to paser method + if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": + return cast( + ResponseT, + LegacyAPIResponse( + raw=response, + client=self, + cast_type=cast_type, + stream=stream, + stream_cls=stream_cls, + options=options, + ), + ) + + origin = get_origin(cast_type) or cast_type + + if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if not issubclass(origin, APIResponse): + raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") + + response_cls = cast("type[BaseAPIResponse[Any]]", cast_type) + return cast( + ResponseT, + response_cls( + raw=response, + client=self, + cast_type=extract_response_type(response_cls), + stream=stream, + stream_cls=stream_cls, + options=options, + ), + ) + + if cast_type == httpx.Response: + return cast(ResponseT, response) + + api_response = APIResponse( + raw=response, + client=self, + cast_type=cast("type[ResponseT]", cast_type), # pyright: ignore[reportUnnecessaryCast] + stream=stream, stream_cls=stream_cls, + options=options, ) + if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): + return cast(ResponseT, api_response) + + return api_response.parse() + + def _request_api_list( + self, + model: type[object], + page: type[SyncPageT], + options: FinalRequestOptions, + ) -> SyncPageT: + def _parser(resp: SyncPageT) -> SyncPageT: + resp._set_private_attributes( + client=self, + model=model, + options=options, + ) + return resp + + options.post_parser = _parser + + return self.request(page, options, stream=False) + @overload def get( self, path: str, *, cast_type: type[ResponseT], options: UserRequestInput = {}, - enable_stream: bool = False, - ) -> ResponseT | StreamResponse: - opts = ClientRequestParam.construct(method="get", url=path, **options) - return self.request(cast_type=cast_type, params=opts, enable_stream=enable_stream) + stream: Literal[False] = False, + ) -> ResponseT: ... + @overload + def get( + self, + path: str, + *, + cast_type: type[ResponseT], + options: UserRequestInput = {}, + stream: Literal[True], + stream_cls: type[StreamResponse], + ) -> StreamResponse: ... + + @overload + def get( + self, + path: str, + *, + cast_type: type[ResponseT], + options: UserRequestInput = {}, + stream: bool, + stream_cls: type[StreamResponse] | None = None, + ) -> ResponseT | StreamResponse: ... + + def get( + self, + path: str, + *, + cast_type: type[ResponseT], + options: UserRequestInput = {}, + stream: bool = False, + stream_cls: type[StreamResponse] | None = None, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="get", url=path, **options) + return cast(ResponseT, self.request(cast_type, opts, stream=stream, stream_cls=stream_cls)) + + @overload def post( self, path: str, *, + cast_type: type[ResponseT], body: Body | None = None, + options: UserRequestInput = {}, + files: RequestFiles | None = None, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def post( + self, + path: str, + *, cast_type: type[ResponseT], + body: Body | None = None, options: UserRequestInput = {}, files: RequestFiles | None = None, - enable_stream: bool = False, + stream: Literal[True], + stream_cls: type[StreamResponse], + ) -> StreamResponse: ... + + @overload + def post( + self, + path: str, + *, + cast_type: type[ResponseT], + body: Body | None = None, + options: UserRequestInput = {}, + files: RequestFiles | None = None, + stream: bool, + stream_cls: type[StreamResponse] | None = None, + ) -> ResponseT | StreamResponse: ... + + def post( + self, + path: str, + *, + cast_type: type[ResponseT], + body: Body | None = None, + options: UserRequestInput = {}, + files: RequestFiles | None = None, + stream: bool = False, stream_cls: type[StreamResponse[Any]] | None = None, ) -> ResponseT | StreamResponse: - opts = ClientRequestParam.construct( - method="post", json_data=body, files=make_httpx_files(files), url=path, **options + opts = FinalRequestOptions.construct( + method="post", url=path, json_data=body, files=to_httpx_files(files), **options ) - return self.request(cast_type=cast_type, params=opts, enable_stream=enable_stream, stream_cls=stream_cls) + return cast(ResponseT, self.request(cast_type, opts, stream=stream, stream_cls=stream_cls)) def patch( self, path: str, *, - body: Body | None = None, cast_type: type[ResponseT], + body: Body | None = None, options: UserRequestInput = {}, ) -> ResponseT: - opts = ClientRequestParam.construct(method="patch", url=path, json_data=body, **options) + opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) return self.request( cast_type=cast_type, - params=opts, + options=opts, ) def put( self, path: str, *, - body: Body | None = None, cast_type: type[ResponseT], + body: Body | None = None, options: UserRequestInput = {}, files: RequestFiles | None = None, ) -> ResponseT | StreamResponse: - opts = ClientRequestParam.construct( - method="put", url=path, json_data=body, files=make_httpx_files(files), **options + opts = FinalRequestOptions.construct( + method="put", url=path, json_data=body, files=to_httpx_files(files), **options ) return self.request( cast_type=cast_type, - params=opts, + options=opts, ) def delete( self, path: str, *, - body: Body | None = None, cast_type: type[ResponseT], + body: Body | None = None, options: UserRequestInput = {}, ) -> ResponseT | StreamResponse: - opts = ClientRequestParam.construct(method="delete", url=path, json_data=body, **options) + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) return self.request( cast_type=cast_type, - params=opts, + options=opts, ) + def get_api_list( + self, + path: str, + *, + model: type[object], + page: type[SyncPageT], + body: Body | None = None, + options: UserRequestInput = {}, + method: str = "get", + ) -> SyncPageT: + opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) + return self._request_api_list(model, page, opts) + def _make_status_error(self, response) -> APIStatusError: response_text = response.text.strip() status_code = response.status_code @@ -343,24 +865,46 @@ def _make_status_error(self, response) -> APIStatusError: return APIStatusError(message=error_msg, response=response) -def make_user_request_input( - max_retries: int | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, - extra_headers: Headers = None, - extra_body: Body | None = None, +def make_request_options( + *, query: Query | None = None, + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + post_parser: PostParser | NotGiven = NOT_GIVEN, ) -> UserRequestInput: + """Create a dict of type RequestOptions without keys of NotGiven values.""" options: UserRequestInput = {} - if extra_headers is not None: options["headers"] = extra_headers - if max_retries is not None: - options["max_retries"] = max_retries - if not isinstance(timeout, NotGiven): - options["timeout"] = timeout - if query is not None: - options["params"] = query + if extra_body is not None: options["extra_json"] = cast(AnyMapping, extra_body) + if query is not None: + options["params"] = query + + if extra_query is not None: + options["params"] = {**options.get("params", {}), **extra_query} + + if not isinstance(timeout, NotGiven): + options["timeout"] = timeout + + if is_given(post_parser): + # internal + options["post_parser"] = post_parser # type: ignore + return options + + +def _merge_mappings( + obj1: Mapping[_T_co, Union[_T, Omit]], + obj2: Mapping[_T_co, Union[_T, Omit]], +) -> dict[_T_co, _T]: + """Merge two mappings of the same type, removing any values that are instances of `Omit`. + + In cases with duplicate keys the second mapping takes precedence. + """ + merged = {**obj1, **obj2} + return {key: value for key, value in merged.items() if not isinstance(value, Omit)} diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py index b0a91d04a9944..21f158a5f4525 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py @@ -3,9 +3,11 @@ import cachetools.func import jwt -API_TOKEN_TTL_SECONDS = 3 * 60 +# 缓存时间 3分钟 +CACHE_TTL_SECONDS = 3 * 60 -CACHE_TTL_SECONDS = API_TOKEN_TTL_SECONDS - 30 +# token 有效期比缓存时间 多30秒 +API_TOKEN_TTL_SECONDS = CACHE_TTL_SECONDS + 30 @cachetools.func.ttl_cache(maxsize=10, ttl=CACHE_TTL_SECONDS) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py new file mode 100644 index 0000000000000..51623bd860951 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py @@ -0,0 +1,207 @@ +from __future__ import annotations + +import os +from collections.abc import AsyncIterator, Iterator +from typing import Any + +import httpx + + +class HttpxResponseContent: + @property + def content(self) -> bytes: + raise NotImplementedError("This method is not implemented for this class.") + + @property + def text(self) -> str: + raise NotImplementedError("This method is not implemented for this class.") + + @property + def encoding(self) -> str | None: + raise NotImplementedError("This method is not implemented for this class.") + + @property + def charset_encoding(self) -> str | None: + raise NotImplementedError("This method is not implemented for this class.") + + def json(self, **kwargs: Any) -> Any: + raise NotImplementedError("This method is not implemented for this class.") + + def read(self) -> bytes: + raise NotImplementedError("This method is not implemented for this class.") + + def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: + raise NotImplementedError("This method is not implemented for this class.") + + def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: + raise NotImplementedError("This method is not implemented for this class.") + + def iter_lines(self) -> Iterator[str]: + raise NotImplementedError("This method is not implemented for this class.") + + def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]: + raise NotImplementedError("This method is not implemented for this class.") + + def write_to_file( + self, + file: str | os.PathLike[str], + ) -> None: + raise NotImplementedError("This method is not implemented for this class.") + + def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + raise NotImplementedError("This method is not implemented for this class.") + + def close(self) -> None: + raise NotImplementedError("This method is not implemented for this class.") + + async def aread(self) -> bytes: + raise NotImplementedError("This method is not implemented for this class.") + + async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: + raise NotImplementedError("This method is not implemented for this class.") + + async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: + raise NotImplementedError("This method is not implemented for this class.") + + async def aiter_lines(self) -> AsyncIterator[str]: + raise NotImplementedError("This method is not implemented for this class.") + + async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: + raise NotImplementedError("This method is not implemented for this class.") + + async def astream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + raise NotImplementedError("This method is not implemented for this class.") + + async def aclose(self) -> None: + raise NotImplementedError("This method is not implemented for this class.") + + +class HttpxBinaryResponseContent(HttpxResponseContent): + response: httpx.Response + + def __init__(self, response: httpx.Response) -> None: + self.response = response + + @property + def content(self) -> bytes: + return self.response.content + + @property + def encoding(self) -> str | None: + return self.response.encoding + + @property + def charset_encoding(self) -> str | None: + return self.response.charset_encoding + + def read(self) -> bytes: + return self.response.read() + + def text(self) -> str: + raise NotImplementedError("Not implemented for binary response content") + + def json(self, **kwargs: Any) -> Any: + raise NotImplementedError("Not implemented for binary response content") + + def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: + raise NotImplementedError("Not implemented for binary response content") + + def iter_lines(self) -> Iterator[str]: + raise NotImplementedError("Not implemented for binary response content") + + async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: + raise NotImplementedError("Not implemented for binary response content") + + async def aiter_lines(self) -> AsyncIterator[str]: + raise NotImplementedError("Not implemented for binary response content") + + def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: + return self.response.iter_bytes(chunk_size) + + def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]: + return self.response.iter_raw(chunk_size) + + def write_to_file( + self, + file: str | os.PathLike[str], + ) -> None: + """Write the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + + Note: if you want to stream the data to the file instead of writing + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `client.with_streaming_response.foo().stream_to_file('my_filename.txt')` + """ + with open(file, mode="wb") as f: + for data in self.response.iter_bytes(): + f.write(data) + + def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + with open(file, mode="wb") as f: + for data in self.response.iter_bytes(chunk_size): + f.write(data) + + def close(self) -> None: + return self.response.close() + + async def aread(self) -> bytes: + return await self.response.aread() + + async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: + return self.response.aiter_bytes(chunk_size) + + async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: + return self.response.aiter_raw(chunk_size) + + async def astream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + path = anyio.Path(file) + async with await path.open(mode="wb") as f: + async for data in self.response.aiter_bytes(chunk_size): + await f.write(data) + + async def aclose(self) -> None: + return await self.response.aclose() + + +class HttpxTextBinaryResponseContent(HttpxBinaryResponseContent): + response: httpx.Response + + @property + def text(self) -> str: + return self.response.text + + def json(self, **kwargs: Any) -> Any: + return self.response.json(**kwargs) + + def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: + return self.response.iter_text(chunk_size) + + def iter_lines(self) -> Iterator[str]: + return self.response.iter_lines() + + async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: + return self.response.aiter_text(chunk_size) + + async def aiter_lines(self) -> AsyncIterator[str]: + return self.response.aiter_lines() diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py new file mode 100644 index 0000000000000..47183b9eee9c0 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py @@ -0,0 +1,341 @@ +from __future__ import annotations + +import datetime +import functools +import inspect +import logging +from collections.abc import Callable +from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union, cast, get_origin, overload + +import httpx +import pydantic +from typing_extensions import ParamSpec, override + +from ._base_models import BaseModel, is_basemodel +from ._base_type import NoneType +from ._constants import RAW_RESPONSE_HEADER +from ._errors import APIResponseValidationError +from ._legacy_binary_response import HttpxResponseContent, HttpxTextBinaryResponseContent +from ._sse_client import StreamResponse, extract_stream_chunk_type, is_stream_class_type +from ._utils import extract_type_arg, is_annotated_type, is_given + +if TYPE_CHECKING: + from ._http_client import HttpClient + from ._request_opt import FinalRequestOptions + +P = ParamSpec("P") +R = TypeVar("R") +_T = TypeVar("_T") + +log: logging.Logger = logging.getLogger(__name__) + + +class LegacyAPIResponse(Generic[R]): + """This is a legacy class as it will be replaced by `APIResponse` + and `AsyncAPIResponse` in the `_response.py` file in the next major + release. + + For the sync client this will mostly be the same with the exception + of `content` & `text` will be methods instead of properties. In the + async client, all methods will be async. + + A migration script will be provided & the migration in general should + be smooth. + """ + + _cast_type: type[R] + _client: HttpClient + _parsed_by_type: dict[type[Any], Any] + _stream: bool + _stream_cls: type[StreamResponse[Any]] | None + _options: FinalRequestOptions + + http_response: httpx.Response + + def __init__( + self, + *, + raw: httpx.Response, + cast_type: type[R], + client: HttpClient, + stream: bool, + stream_cls: type[StreamResponse[Any]] | None, + options: FinalRequestOptions, + ) -> None: + self._cast_type = cast_type + self._client = client + self._parsed_by_type = {} + self._stream = stream + self._stream_cls = stream_cls + self._options = options + self.http_response = raw + + @property + def request_id(self) -> str | None: + return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] + + @overload + def parse(self, *, to: type[_T]) -> _T: ... + + @overload + def parse(self) -> R: ... + + def parse(self, *, to: type[_T] | None = None) -> R | _T: + """Returns the rich python representation of this response's data. + + NOTE: For the async client: this will become a coroutine in the next major version. + + For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. + + You can customise the type that the response is parsed into through + the `to` argument, e.g. + + ```py + from zhipuai import BaseModel + + + class MyModel(BaseModel): + foo: str + + + obj = response.parse(to=MyModel) + print(obj.foo) + ``` + + We support parsing: + - `BaseModel` + - `dict` + - `list` + - `Union` + - `str` + - `int` + - `float` + - `httpx.Response` + """ + cache_key = to if to is not None else self._cast_type + cached = self._parsed_by_type.get(cache_key) + if cached is not None: + return cached # type: ignore[no-any-return] + + parsed = self._parse(to=to) + if is_given(self._options.post_parser): + parsed = self._options.post_parser(parsed) + + self._parsed_by_type[cache_key] = parsed + return parsed + + @property + def headers(self) -> httpx.Headers: + return self.http_response.headers + + @property + def http_request(self) -> httpx.Request: + return self.http_response.request + + @property + def status_code(self) -> int: + return self.http_response.status_code + + @property + def url(self) -> httpx.URL: + return self.http_response.url + + @property + def method(self) -> str: + return self.http_request.method + + @property + def content(self) -> bytes: + """Return the binary response content. + + NOTE: this will be removed in favour of `.read()` in the + next major version. + """ + return self.http_response.content + + @property + def text(self) -> str: + """Return the decoded response content. + + NOTE: this will be turned into a method in the next major version. + """ + return self.http_response.text + + @property + def http_version(self) -> str: + return self.http_response.http_version + + @property + def is_closed(self) -> bool: + return self.http_response.is_closed + + @property + def elapsed(self) -> datetime.timedelta: + """The time taken for the complete request/response cycle to complete.""" + return self.http_response.elapsed + + def _parse(self, *, to: type[_T] | None = None) -> R | _T: + # unwrap `Annotated[T, ...]` -> `T` + if to and is_annotated_type(to): + to = extract_type_arg(to, 0) + + if self._stream: + if to: + if not is_stream_class_type(to): + raise TypeError(f"Expected custom parse type to be a subclass of {StreamResponse}") + + return cast( + _T, + to( + cast_type=extract_stream_chunk_type( + to, + failure_message="Expected custom stream type to be passed with a type argument, e.g. StreamResponse[ChunkType]", # noqa: E501 + ), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + if self._stream_cls: + return cast( + R, + self._stream_cls( + cast_type=extract_stream_chunk_type(self._stream_cls), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + stream_cls = cast("type[StreamResponse[Any]] | None", self._client._default_stream_cls) + if stream_cls is None: + raise MissingStreamClassError() + + return cast( + R, + stream_cls( + cast_type=self._cast_type, + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + cast_type = to if to is not None else self._cast_type + + # unwrap `Annotated[T, ...]` -> `T` + if is_annotated_type(cast_type): + cast_type = extract_type_arg(cast_type, 0) + + if cast_type is NoneType: + return cast(R, None) + + response = self.http_response + if cast_type == str: + return cast(R, response.text) + + if cast_type == int: + return cast(R, int(response.text)) + + if cast_type == float: + return cast(R, float(response.text)) + + origin = get_origin(cast_type) or cast_type + + if inspect.isclass(origin) and issubclass(origin, HttpxResponseContent): + # in the response, e.g. mime file + *_, filename = response.headers.get("content-disposition", "").split("filename=") + # 判断文件类型是jsonl类型的使用HttpxTextBinaryResponseContent + if filename and filename.endswith(".jsonl") or filename and filename.endswith(".xlsx"): + return cast(R, HttpxTextBinaryResponseContent(response)) + else: + return cast(R, cast_type(response)) # type: ignore + + if origin == LegacyAPIResponse: + raise RuntimeError("Unexpected state - cast_type is `APIResponse`") + + if inspect.isclass(origin) and issubclass(origin, httpx.Response): + # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response + # and pass that class to our request functions. We cannot change the variance to be either + # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct + # the response class ourselves but that is something that should be supported directly in httpx + # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. + if cast_type != httpx.Response: + raise ValueError("Subclasses of httpx.Response cannot be passed to `cast_type`") + return cast(R, response) + + if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") + + if ( + cast_type is not object + and origin is not list + and origin is not dict + and origin is not Union + and not issubclass(origin, BaseModel) + ): + raise RuntimeError( + f"Unsupported type, expected {cast_type} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." # noqa: E501 + ) + + # split is required to handle cases where additional information is included + # in the response, e.g. application/json; charset=utf-8 + content_type, *_ = response.headers.get("content-type", "*").split(";") + if content_type != "application/json": + if is_basemodel(cast_type): + try: + data = response.json() + except Exception as exc: + log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) + else: + return self._client._process_response_data( + data=data, + cast_type=cast_type, # type: ignore + response=response, + ) + + if self._client._strict_response_validation: + raise APIResponseValidationError( + response=response, + message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", # noqa: E501 + json_data=response.text, + ) + + # If the API responds with content that isn't JSON then we just return + # the (decoded) text without performing any parsing so that you can still + # handle the response however you need to. + return response.text # type: ignore + + data = response.json() + + return self._client._process_response_data( + data=data, + cast_type=cast_type, # type: ignore + response=response, + ) + + @override + def __repr__(self) -> str: + return f"" + + +class MissingStreamClassError(TypeError): + def __init__(self) -> None: + super().__init__( + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", # noqa: E501 + ) + + +def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]: + """Higher order function that takes one of our bound API methods and wraps it + to support returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "true" + + kwargs["extra_headers"] = extra_headers + + return cast(LegacyAPIResponse[R], func(*args, **kwargs)) + + return wrapped diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py index ac459151fc3a4..c3b894b3a3d88 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py @@ -1,48 +1,97 @@ from __future__ import annotations -from typing import Any, ClassVar, Union +from collections.abc import Callable +from typing import TYPE_CHECKING, Any, ClassVar, Union, cast +import pydantic.generics from httpx import Timeout -from pydantic import ConfigDict -from typing_extensions import TypedDict, Unpack +from typing_extensions import Required, TypedDict, Unpack, final -from ._base_type import Body, Headers, HttpxRequestFiles, NotGiven, Query -from ._utils import remove_notgiven_indict +from ._base_compat import PYDANTIC_V2, ConfigDict +from ._base_type import AnyMapping, Body, Headers, HttpxRequestFiles, NotGiven, Query +from ._constants import RAW_RESPONSE_HEADER +from ._utils import is_given, strip_not_given class UserRequestInput(TypedDict, total=False): + headers: Headers max_retries: int timeout: float | Timeout | None + params: Query + extra_json: AnyMapping + + +class FinalRequestOptionsInput(TypedDict, total=False): + method: Required[str] + url: Required[str] + params: Query headers: Headers - params: Query | None + max_retries: int + timeout: float | Timeout | None + files: HttpxRequestFiles | None + json_data: Body + extra_json: AnyMapping -class ClientRequestParam: +@final +class FinalRequestOptions(pydantic.BaseModel): method: str url: str - max_retries: Union[int, NotGiven] = NotGiven() - timeout: Union[float, NotGiven] = NotGiven() + params: Query = {} headers: Union[Headers, NotGiven] = NotGiven() - json_data: Union[Body, None] = None + max_retries: Union[int, NotGiven] = NotGiven() + timeout: Union[float, Timeout, None, NotGiven] = NotGiven() files: Union[HttpxRequestFiles, None] = None - params: Query = {} - model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) + idempotency_key: Union[str, None] = None + post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + + # It should be noted that we cannot use `json` here as that would override + # a BaseModel method in an incompatible fashion. + json_data: Union[Body, None] = None + extra_json: Union[AnyMapping, None] = None + + if PYDANTIC_V2: + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) + else: - def get_max_retries(self, max_retries) -> int: + class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] + arbitrary_types_allowed: bool = True + + def get_max_retries(self, max_retries: int) -> int: if isinstance(self.max_retries, NotGiven): return max_retries return self.max_retries + def _strip_raw_response_header(self) -> None: + if not is_given(self.headers): + return + + if self.headers.get(RAW_RESPONSE_HEADER): + self.headers = {**self.headers} + self.headers.pop(RAW_RESPONSE_HEADER) + + # override the `construct` method so that we can run custom transformations. + # this is necessary as we don't want to do any actual runtime type checking + # (which means we can't use validators) but we do want to ensure that `NotGiven` + # values are not present + # + # type ignore required because we're adding explicit types to `**values` @classmethod def construct( # type: ignore cls, _fields_set: set[str] | None = None, **values: Unpack[UserRequestInput], - ) -> ClientRequestParam: - kwargs: dict[str, Any] = {key: remove_notgiven_indict(value) for key, value in values.items()} - client = cls() - client.__dict__.update(kwargs) - - return client + ) -> FinalRequestOptions: + kwargs: dict[str, Any] = { + # we unconditionally call `strip_not_given` on any value + # as it will just ignore any non-mapping types + key: strip_not_given(value) + for key, value in values.items() + } + if PYDANTIC_V2: + return super().model_construct(_fields_set, **kwargs) + return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] - model_construct = construct + if not TYPE_CHECKING: + # type checkers incorrectly complain about this assignment + model_construct = construct diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py index 56e60a793407c..45443da662d57 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py @@ -1,87 +1,193 @@ from __future__ import annotations import datetime -from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, get_args, get_origin +import inspect +import logging +from collections.abc import Iterator +from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union, cast, get_origin, overload import httpx import pydantic -from typing_extensions import ParamSpec +from typing_extensions import ParamSpec, override +from ._base_models import BaseModel, is_basemodel from ._base_type import NoneType -from ._sse_client import StreamResponse +from ._errors import APIResponseValidationError, ZhipuAIError +from ._sse_client import StreamResponse, extract_stream_chunk_type, is_stream_class_type +from ._utils import extract_type_arg, extract_type_var_from_base, is_annotated_type, is_given if TYPE_CHECKING: from ._http_client import HttpClient + from ._request_opt import FinalRequestOptions P = ParamSpec("P") R = TypeVar("R") +_T = TypeVar("_T") +_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]") +log: logging.Logger = logging.getLogger(__name__) -class HttpResponse(Generic[R]): +class BaseAPIResponse(Generic[R]): _cast_type: type[R] _client: HttpClient - _parsed: R | None - _enable_stream: bool + _parsed_by_type: dict[type[Any], Any] + _is_sse_stream: bool _stream_cls: type[StreamResponse[Any]] + _options: FinalRequestOptions http_response: httpx.Response def __init__( self, *, - raw_response: httpx.Response, + raw: httpx.Response, cast_type: type[R], client: HttpClient, - enable_stream: bool = False, + stream: bool, stream_cls: type[StreamResponse[Any]] | None = None, + options: FinalRequestOptions, ) -> None: self._cast_type = cast_type self._client = client - self._parsed = None + self._parsed_by_type = {} + self._is_sse_stream = stream self._stream_cls = stream_cls - self._enable_stream = enable_stream - self.http_response = raw_response + self._options = options + self.http_response = raw - def parse(self) -> R: - self._parsed = self._parse() - return self._parsed + def _parse(self, *, to: type[_T] | None = None) -> R | _T: + # unwrap `Annotated[T, ...]` -> `T` + if to and is_annotated_type(to): + to = extract_type_arg(to, 0) - def _parse(self) -> R: - if self._enable_stream: - self._parsed = cast( + if self._is_sse_stream: + if to: + if not is_stream_class_type(to): + raise TypeError(f"Expected custom parse type to be a subclass of {StreamResponse}") + + return cast( + _T, + to( + cast_type=extract_stream_chunk_type( + to, + failure_message="Expected custom stream type to be passed with a type argument, e.g. StreamResponse[ChunkType]", # noqa: E501 + ), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + if self._stream_cls: + return cast( + R, + self._stream_cls( + cast_type=extract_stream_chunk_type(self._stream_cls), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + stream_cls = cast("type[Stream[Any]] | None", self._client._default_stream_cls) + if stream_cls is None: + raise MissingStreamClassError() + + return cast( R, - self._stream_cls( - cast_type=cast(type, get_args(self._stream_cls)[0]), + stream_cls( + cast_type=self._cast_type, response=self.http_response, - client=self._client, + client=cast(Any, self._client), ), ) - return self._parsed - cast_type = self._cast_type + + cast_type = to if to is not None else self._cast_type + + # unwrap `Annotated[T, ...]` -> `T` + if is_annotated_type(cast_type): + cast_type = extract_type_arg(cast_type, 0) + if cast_type is NoneType: return cast(R, None) - http_response = self.http_response + + response = self.http_response if cast_type == str: - return cast(R, http_response.text) + return cast(R, response.text) + + if cast_type == bytes: + return cast(R, response.content) + + if cast_type == int: + return cast(R, int(response.text)) + + if cast_type == float: + return cast(R, float(response.text)) - content_type, *_ = http_response.headers.get("content-type", "application/json").split(";") origin = get_origin(cast_type) or cast_type + + # handle the legacy binary response case + if inspect.isclass(cast_type) and cast_type.__name__ == "HttpxBinaryResponseContent": + return cast(R, cast_type(response)) # type: ignore + + if origin == APIResponse: + raise RuntimeError("Unexpected state - cast_type is `APIResponse`") + + if inspect.isclass(origin) and issubclass(origin, httpx.Response): + # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response + # and pass that class to our request functions. We cannot change the variance to be either + # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct + # the response class ourselves but that is something that should be supported directly in httpx + # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. + if cast_type != httpx.Response: + raise ValueError("Subclasses of httpx.Response cannot be passed to `cast_type`") + return cast(R, response) + + if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") + + if ( + cast_type is not object + and origin is not list + and origin is not dict + and origin is not Union + and not issubclass(origin, BaseModel) + ): + raise RuntimeError( + f"Unsupported type, expected {cast_type} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." # noqa: E501 + ) + + # split is required to handle cases where additional information is included + # in the response, e.g. application/json; charset=utf-8 + content_type, *_ = response.headers.get("content-type", "*").split(";") if content_type != "application/json": - if issubclass(origin, pydantic.BaseModel): - data = http_response.json() - return self._client._process_response_data( - data=data, - cast_type=cast_type, # type: ignore - response=http_response, + if is_basemodel(cast_type): + try: + data = response.json() + except Exception as exc: + log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) + else: + return self._client._process_response_data( + data=data, + cast_type=cast_type, # type: ignore + response=response, + ) + + if self._client._strict_response_validation: + raise APIResponseValidationError( + response=response, + message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", # noqa: E501 + json_data=response.text, ) - return http_response.text + # If the API responds with content that isn't JSON then we just return + # the (decoded) text without performing any parsing so that you can still + # handle the response however you need to. + return response.text # type: ignore - data = http_response.json() + data = response.json() return self._client._process_response_data( data=data, cast_type=cast_type, # type: ignore - response=http_response, + response=response, ) @property @@ -90,6 +196,7 @@ def headers(self) -> httpx.Headers: @property def http_request(self) -> httpx.Request: + """Returns the httpx Request instance associated with the current response.""" return self.http_response.request @property @@ -98,24 +205,194 @@ def status_code(self) -> int: @property def url(self) -> httpx.URL: + """Returns the URL for which the request was made.""" return self.http_response.url @property def method(self) -> str: return self.http_request.method - @property - def content(self) -> bytes: - return self.http_response.content - - @property - def text(self) -> str: - return self.http_response.text - @property def http_version(self) -> str: return self.http_response.http_version @property def elapsed(self) -> datetime.timedelta: + """The time taken for the complete request/response cycle to complete.""" return self.http_response.elapsed + + @property + def is_closed(self) -> bool: + """Whether or not the response body has been closed. + + If this is False then there is response data that has not been read yet. + You must either fully consume the response body or call `.close()` + before discarding the response to prevent resource leaks. + """ + return self.http_response.is_closed + + @override + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_type}>" # noqa: E501 + + +class APIResponse(BaseAPIResponse[R]): + @property + def request_id(self) -> str | None: + return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] + + @overload + def parse(self, *, to: type[_T]) -> _T: ... + + @overload + def parse(self) -> R: ... + + def parse(self, *, to: type[_T] | None = None) -> R | _T: + """Returns the rich python representation of this response's data. + + For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. + + You can customise the type that the response is parsed into through + the `to` argument, e.g. + + ```py + from openai import BaseModel + + + class MyModel(BaseModel): + foo: str + + + obj = response.parse(to=MyModel) + print(obj.foo) + ``` + + We support parsing: + - `BaseModel` + - `dict` + - `list` + - `Union` + - `str` + - `int` + - `float` + - `httpx.Response` + """ + cache_key = to if to is not None else self._cast_type + cached = self._parsed_by_type.get(cache_key) + if cached is not None: + return cached # type: ignore[no-any-return] + + if not self._is_sse_stream: + self.read() + + parsed = self._parse(to=to) + if is_given(self._options.post_parser): + parsed = self._options.post_parser(parsed) + + self._parsed_by_type[cache_key] = parsed + return parsed + + def read(self) -> bytes: + """Read and return the binary response content.""" + try: + return self.http_response.read() + except httpx.StreamConsumed as exc: + # The default error raised by httpx isn't very + # helpful in our case so we re-raise it with + # a different error message. + raise StreamAlreadyConsumed() from exc + + def text(self) -> str: + """Read and decode the response content into a string.""" + self.read() + return self.http_response.text + + def json(self) -> object: + """Read and decode the JSON response content.""" + self.read() + return self.http_response.json() + + def close(self) -> None: + """Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + self.http_response.close() + + def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: + """ + A byte-iterator over the decoded response content. + + This automatically handles gzip, deflate and brotli encoded responses. + """ + yield from self.http_response.iter_bytes(chunk_size) + + def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: + """A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + yield from self.http_response.iter_text(chunk_size) + + def iter_lines(self) -> Iterator[str]: + """Like `iter_text()` but will only yield chunks for each line""" + yield from self.http_response.iter_lines() + + +class MissingStreamClassError(TypeError): + def __init__(self) -> None: + super().__init__( + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", # noqa: E501 + ) + + +class StreamAlreadyConsumed(ZhipuAIError): # noqa: N818 + """ + Attempted to read or stream content, but the content has already + been streamed. + + This can happen if you use a method like `.iter_lines()` and then attempt + to read th entire response body afterwards, e.g. + + ```py + response = await client.post(...) + async for line in response.iter_lines(): + ... # do something with `line` + + content = await response.read() + # ^ error + ``` + + If you want this behaviour you'll need to either manually accumulate the response + content or call `await response.read()` before iterating over the stream. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream some content, but the content has " + "already been streamed. " + "This could be due to attempting to stream the response " + "content more than once." + "\n\n" + "You can fix this by manually accumulating the response content while streaming " + "or by calling `.read()` before starting to stream." + ) + super().__init__(message) + + +def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type: + """Given a type like `APIResponse[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyResponse(APIResponse[bytes]): + ... + + extract_response_type(MyResponse) -> bytes + ``` + """ + return extract_type_var_from_base( + typ, + generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse)), + index=0, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py index ec2745d05912d..cbc449d24421d 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py @@ -1,13 +1,16 @@ from __future__ import annotations +import inspect import json from collections.abc import Iterator, Mapping -from typing import TYPE_CHECKING, Generic +from typing import TYPE_CHECKING, Generic, TypeGuard, cast import httpx +from . import get_origin from ._base_type import ResponseT from ._errors import APIResponseError +from ._utils import extract_type_var_from_base, is_mapping _FIELD_SEPARATOR = ":" @@ -53,8 +56,41 @@ def __stream__(self) -> Iterator[ResponseT]: request=self.response.request, json_data=data["error"], ) + if sse.event is None: + data = sse.json_data() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + raise APIResponseError( + message=message, + request=self.response.request, + json_data=data["error"], + ) yield self._data_process_func(data=data, cast_type=self._cast_type, response=self.response) + + else: + data = sse.json_data() + + if sse.event == "error" and is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIResponseError( + message=message, + request=self.response.request, + json_data=data["error"], + ) + yield self._data_process_func(data=data, cast_type=self._cast_type, response=self.response) + for sse in iterator: pass @@ -138,3 +174,33 @@ def decode_line(self, line: str): except (TypeError, ValueError): pass return + + +def is_stream_class_type(typ: type) -> TypeGuard[type[StreamResponse[object]]]: + """TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`""" + origin = get_origin(typ) or typ + return inspect.isclass(origin) and issubclass(origin, StreamResponse) + + +def extract_stream_chunk_type( + stream_cls: type, + *, + failure_message: str | None = None, +) -> type: + """Given a type like `StreamResponse[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyStream(StreamResponse[bytes]): + ... + + extract_stream_chunk_type(MyStream) -> bytes + ``` + """ + + return extract_type_var_from_base( + stream_cls, + index=0, + generic_bases=cast("tuple[type, ...]", (StreamResponse,)), + failure_message=failure_message, + ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils.py deleted file mode 100644 index 6b610567daa09..0000000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations - -from collections.abc import Iterable, Mapping -from typing import TypeVar - -from ._base_type import NotGiven - - -def remove_notgiven_indict(obj): - if obj is None or (not isinstance(obj, Mapping)): - return obj - return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} - - -_T = TypeVar("_T") - - -def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: - return [item for sublist in t for item in sublist] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py new file mode 100644 index 0000000000000..a66b095816b8b --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py @@ -0,0 +1,52 @@ +from ._utils import ( # noqa: I001 + remove_notgiven_indict as remove_notgiven_indict, # noqa: PLC0414 + flatten as flatten, # noqa: PLC0414 + is_dict as is_dict, # noqa: PLC0414 + is_list as is_list, # noqa: PLC0414 + is_given as is_given, # noqa: PLC0414 + is_tuple as is_tuple, # noqa: PLC0414 + is_mapping as is_mapping, # noqa: PLC0414 + is_tuple_t as is_tuple_t, # noqa: PLC0414 + parse_date as parse_date, # noqa: PLC0414 + is_iterable as is_iterable, # noqa: PLC0414 + is_sequence as is_sequence, # noqa: PLC0414 + coerce_float as coerce_float, # noqa: PLC0414 + is_mapping_t as is_mapping_t, # noqa: PLC0414 + removeprefix as removeprefix, # noqa: PLC0414 + removesuffix as removesuffix, # noqa: PLC0414 + extract_files as extract_files, # noqa: PLC0414 + is_sequence_t as is_sequence_t, # noqa: PLC0414 + required_args as required_args, # noqa: PLC0414 + coerce_boolean as coerce_boolean, # noqa: PLC0414 + coerce_integer as coerce_integer, # noqa: PLC0414 + file_from_path as file_from_path, # noqa: PLC0414 + parse_datetime as parse_datetime, # noqa: PLC0414 + strip_not_given as strip_not_given, # noqa: PLC0414 + deepcopy_minimal as deepcopy_minimal, # noqa: PLC0414 + get_async_library as get_async_library, # noqa: PLC0414 + maybe_coerce_float as maybe_coerce_float, # noqa: PLC0414 + get_required_header as get_required_header, # noqa: PLC0414 + maybe_coerce_boolean as maybe_coerce_boolean, # noqa: PLC0414 + maybe_coerce_integer as maybe_coerce_integer, # noqa: PLC0414 + drop_prefix_image_data as drop_prefix_image_data, # noqa: PLC0414 +) + + +from ._typing import ( + is_list_type as is_list_type, # noqa: PLC0414 + is_union_type as is_union_type, # noqa: PLC0414 + extract_type_arg as extract_type_arg, # noqa: PLC0414 + is_iterable_type as is_iterable_type, # noqa: PLC0414 + is_required_type as is_required_type, # noqa: PLC0414 + is_annotated_type as is_annotated_type, # noqa: PLC0414 + strip_annotated_type as strip_annotated_type, # noqa: PLC0414 + extract_type_var_from_base as extract_type_var_from_base, # noqa: PLC0414 +) + +from ._transform import ( + PropertyInfo as PropertyInfo, # noqa: PLC0414 + transform as transform, # noqa: PLC0414 + async_transform as async_transform, # noqa: PLC0414 + maybe_transform as maybe_transform, # noqa: PLC0414 + async_maybe_transform as async_maybe_transform, # noqa: PLC0414 +) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py new file mode 100644 index 0000000000000..e8ef1f79358a9 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +import base64 +import io +import pathlib +from collections.abc import Mapping +from datetime import date, datetime +from typing import Any, Literal, TypeVar, cast, get_args, get_type_hints + +import anyio +import pydantic +from typing_extensions import override + +from .._base_compat import is_typeddict, model_dump +from .._files import is_base64_file_input +from ._typing import ( + extract_type_arg, + is_annotated_type, + is_iterable_type, + is_list_type, + is_required_type, + is_union_type, + strip_annotated_type, +) +from ._utils import ( + is_iterable, + is_list, + is_mapping, +) + +_T = TypeVar("_T") + + +# TODO: support for drilling globals() and locals() +# TODO: ensure works correctly with forward references in all cases + + +PropertyFormat = Literal["iso8601", "base64", "custom"] + + +class PropertyInfo: + """Metadata class to be used in Annotated types to provide information about a given type. + + For example: + + class MyParams(TypedDict): + account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')] + + This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API. + """ # noqa: E501 + + alias: str | None + format: PropertyFormat | None + format_template: str | None + discriminator: str | None + + def __init__( + self, + *, + alias: str | None = None, + format: PropertyFormat | None = None, + format_template: str | None = None, + discriminator: str | None = None, + ) -> None: + self.alias = alias + self.format = format + self.format_template = format_template + self.discriminator = discriminator + + @override + def __repr__(self) -> str: + return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')" # noqa: E501 + + +def maybe_transform( + data: object, + expected_type: object, +) -> Any | None: + """Wrapper over `transform()` that allows `None` to be passed. + + See `transform()` for more details. + """ + if data is None: + return None + return transform(data, expected_type) + + +# Wrapper over _transform_recursive providing fake types +def transform( + data: _T, + expected_type: object, +) -> _T: + """Transform dictionaries based off of type information from the given type, for example: + + ```py + class Params(TypedDict, total=False): + card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] + + + transformed = transform({"card_id": ""}, Params) + # {'cardID': ''} + ``` + + Any keys / data that does not have type information given will be included as is. + + It should be noted that the transformations that this function does are not represented in the type system. + """ + transformed = _transform_recursive(data, annotation=cast(type, expected_type)) + return cast(_T, transformed) + + +def _get_annotated_type(type_: type) -> type | None: + """If the given type is an `Annotated` type then it is returned, if not `None` is returned. + + This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]` + """ + if is_required_type(type_): + # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]` + type_ = get_args(type_)[0] + + if is_annotated_type(type_): + return type_ + + return None + + +def _maybe_transform_key(key: str, type_: type) -> str: + """Transform the given `data` based on the annotations provided in `type_`. + + Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata. + """ + annotated_type = _get_annotated_type(type_) + if annotated_type is None: + # no `Annotated` definition for this type, no transformation needed + return key + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.alias is not None: + return annotation.alias + + return key + + +def _transform_recursive( + data: object, + *, + annotation: type, + inner_type: type | None = None, +) -> object: + """Transform the given data against the expected type. + + Args: + annotation: The direct type annotation given to the particular piece of data. + This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc + + inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type + is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in + the list can be transformed using the metadata from the container type. + + Defaults to the same value as the `annotation` argument. + """ + if inner_type is None: + inner_type = annotation + + stripped_type = strip_annotated_type(inner_type) + if is_typeddict(stripped_type) and is_mapping(data): + return _transform_typeddict(data, stripped_type) + + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + ): + inner_type = extract_type_arg(stripped_type, 0) + return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] + + if is_union_type(stripped_type): + # For union types we run the transformation against all subtypes to ensure that everything is transformed. + # + # TODO: there may be edge cases where the same normalized field name will transform to two different names + # in different subtypes. + for subtype in get_args(stripped_type): + data = _transform_recursive(data, annotation=annotation, inner_type=subtype) + return data + + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True) + + annotated_type = _get_annotated_type(annotation) + if annotated_type is None: + return data + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.format is not None: + return _format_data(data, annotation.format, annotation.format_template) + + return data + + +def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: + if isinstance(data, date | datetime): + if format_ == "iso8601": + return data.isoformat() + + if format_ == "custom" and format_template is not None: + return data.strftime(format_template) + + if format_ == "base64" and is_base64_file_input(data): + binary: str | bytes | None = None + + if isinstance(data, pathlib.Path): + binary = data.read_bytes() + elif isinstance(data, io.IOBase): + binary = data.read() + + if isinstance(binary, str): # type: ignore[unreachable] + binary = binary.encode() + + if not isinstance(binary, bytes): + raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") + + return base64.b64encode(binary).decode("ascii") + + return data + + +def _transform_typeddict( + data: Mapping[str, object], + expected_type: type, +) -> Mapping[str, object]: + result: dict[str, object] = {} + annotations = get_type_hints(expected_type, include_extras=True) + for key, value in data.items(): + type_ = annotations.get(key) + if type_ is None: + # we do not have a type annotation for this field, leave it as is + result[key] = value + else: + result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_) + return result + + +async def async_maybe_transform( + data: object, + expected_type: object, +) -> Any | None: + """Wrapper over `async_transform()` that allows `None` to be passed. + + See `async_transform()` for more details. + """ + if data is None: + return None + return await async_transform(data, expected_type) + + +async def async_transform( + data: _T, + expected_type: object, +) -> _T: + """Transform dictionaries based off of type information from the given type, for example: + + ```py + class Params(TypedDict, total=False): + card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] + + + transformed = transform({"card_id": ""}, Params) + # {'cardID': ''} + ``` + + Any keys / data that does not have type information given will be included as is. + + It should be noted that the transformations that this function does are not represented in the type system. + """ + transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type)) + return cast(_T, transformed) + + +async def _async_transform_recursive( + data: object, + *, + annotation: type, + inner_type: type | None = None, +) -> object: + """Transform the given data against the expected type. + + Args: + annotation: The direct type annotation given to the particular piece of data. + This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc + + inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type + is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in + the list can be transformed using the metadata from the container type. + + Defaults to the same value as the `annotation` argument. + """ + if inner_type is None: + inner_type = annotation + + stripped_type = strip_annotated_type(inner_type) + if is_typeddict(stripped_type) and is_mapping(data): + return await _async_transform_typeddict(data, stripped_type) + + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + ): + inner_type = extract_type_arg(stripped_type, 0) + return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] + + if is_union_type(stripped_type): + # For union types we run the transformation against all subtypes to ensure that everything is transformed. + # + # TODO: there may be edge cases where the same normalized field name will transform to two different names + # in different subtypes. + for subtype in get_args(stripped_type): + data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype) + return data + + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True) + + annotated_type = _get_annotated_type(annotation) + if annotated_type is None: + return data + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.format is not None: + return await _async_format_data(data, annotation.format, annotation.format_template) + + return data + + +async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: + if isinstance(data, date | datetime): + if format_ == "iso8601": + return data.isoformat() + + if format_ == "custom" and format_template is not None: + return data.strftime(format_template) + + if format_ == "base64" and is_base64_file_input(data): + binary: str | bytes | None = None + + if isinstance(data, pathlib.Path): + binary = await anyio.Path(data).read_bytes() + elif isinstance(data, io.IOBase): + binary = data.read() + + if isinstance(binary, str): # type: ignore[unreachable] + binary = binary.encode() + + if not isinstance(binary, bytes): + raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") + + return base64.b64encode(binary).decode("ascii") + + return data + + +async def _async_transform_typeddict( + data: Mapping[str, object], + expected_type: type, +) -> Mapping[str, object]: + result: dict[str, object] = {} + annotations = get_type_hints(expected_type, include_extras=True) + for key, value in data.items(): + type_ = annotations.get(key) + if type_ is None: + # we do not have a type annotation for this field, leave it as is + result[key] = value + else: + result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_) + return result diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py new file mode 100644 index 0000000000000..c7c54dcc37458 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +from collections import abc as _c_abc +from collections.abc import Iterable +from typing import Annotated, Any, TypeVar, cast, get_args, get_origin + +from typing_extensions import Required + +from .._base_compat import is_union as _is_union +from .._base_type import InheritsGeneric + + +def is_annotated_type(typ: type) -> bool: + return get_origin(typ) == Annotated + + +def is_list_type(typ: type) -> bool: + return (get_origin(typ) or typ) == list + + +def is_iterable_type(typ: type) -> bool: + """If the given type is `typing.Iterable[T]`""" + origin = get_origin(typ) or typ + return origin in {Iterable, _c_abc.Iterable} + + +def is_union_type(typ: type) -> bool: + return _is_union(get_origin(typ)) + + +def is_required_type(typ: type) -> bool: + return get_origin(typ) == Required + + +def is_typevar(typ: type) -> bool: + # type ignore is required because type checkers + # think this expression will always return False + return type(typ) == TypeVar # type: ignore + + +# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] +def strip_annotated_type(typ: type) -> type: + if is_required_type(typ) or is_annotated_type(typ): + return strip_annotated_type(cast(type, get_args(typ)[0])) + + return typ + + +def extract_type_arg(typ: type, index: int) -> type: + args = get_args(typ) + try: + return cast(type, args[index]) + except IndexError as err: + raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err + + +def extract_type_var_from_base( + typ: type, + *, + generic_bases: tuple[type, ...], + index: int, + failure_message: str | None = None, +) -> type: + """Given a type like `Foo[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyResponse(Foo[bytes]): + ... + + extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes + ``` + + And where a generic subclass is given: + ```py + _T = TypeVar('_T') + class MyResponse(Foo[_T]): + ... + + extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes + ``` + """ + cls = cast(object, get_origin(typ) or typ) + if cls in generic_bases: + # we're given the class directly + return extract_type_arg(typ, index) + + # if a subclass is given + # --- + # this is needed as __orig_bases__ is not present in the typeshed stubs + # because it is intended to be for internal use only, however there does + # not seem to be a way to resolve generic TypeVars for inherited subclasses + # without using it. + if isinstance(cls, InheritsGeneric): + target_base_class: Any | None = None + for base in cls.__orig_bases__: + if base.__origin__ in generic_bases: + target_base_class = base + break + + if target_base_class is None: + raise RuntimeError( + "Could not find the generic base class;\n" + "This should never happen;\n" + f"Does {cls} inherit from one of {generic_bases} ?" + ) + + extracted = extract_type_arg(target_base_class, index) + if is_typevar(extracted): + # If the extracted type argument is itself a type variable + # then that means the subclass itself is generic, so we have + # to resolve the type argument from the class itself, not + # the base class. + # + # Note: if there is more than 1 type argument, the subclass could + # change the ordering of the type arguments, this is not currently + # supported. + return extract_type_arg(typ, index) + + return extracted + + raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}") diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py new file mode 100644 index 0000000000000..ce5e7786aa293 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py @@ -0,0 +1,409 @@ +from __future__ import annotations + +import functools +import inspect +import os +import re +from collections.abc import Callable, Iterable, Mapping, Sequence +from pathlib import Path +from typing import ( + Any, + TypeGuard, + TypeVar, + Union, + cast, + overload, +) + +import sniffio + +from .._base_compat import parse_date as parse_date # noqa: PLC0414 +from .._base_compat import parse_datetime as parse_datetime # noqa: PLC0414 +from .._base_type import FileTypes, Headers, HeadersLike, NotGiven, NotGivenOr + + +def remove_notgiven_indict(obj): + if obj is None or (not isinstance(obj, Mapping)): + return obj + return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} + + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[object, ...]) +_MappingT = TypeVar("_MappingT", bound=Mapping[str, object]) +_SequenceT = TypeVar("_SequenceT", bound=Sequence[object]) +CallableT = TypeVar("CallableT", bound=Callable[..., Any]) + + +def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: + return [item for sublist in t for item in sublist] + + +def extract_files( + # TODO: this needs to take Dict but variance issues..... + # create protocol type ? + query: Mapping[str, object], + *, + paths: Sequence[Sequence[str]], +) -> list[tuple[str, FileTypes]]: + """Recursively extract files from the given dictionary based on specified paths. + + A path may look like this ['foo', 'files', '', 'data']. + + Note: this mutates the given dictionary. + """ + files: list[tuple[str, FileTypes]] = [] + for path in paths: + files.extend(_extract_items(query, path, index=0, flattened_key=None)) + return files + + +def _extract_items( + obj: object, + path: Sequence[str], + *, + index: int, + flattened_key: str | None, +) -> list[tuple[str, FileTypes]]: + try: + key = path[index] + except IndexError: + if isinstance(obj, NotGiven): + # no value was provided - we can safely ignore + return [] + + # cyclical import + from .._files import assert_is_file_content + + # We have exhausted the path, return the entry we found. + assert_is_file_content(obj, key=flattened_key) + assert flattened_key is not None + return [(flattened_key, cast(FileTypes, obj))] + + index += 1 + if is_dict(obj): + try: + # We are at the last entry in the path so we must remove the field + if (len(path)) == index: + item = obj.pop(key) + else: + item = obj[key] + except KeyError: + # Key was not present in the dictionary, this is not indicative of an error + # as the given path may not point to a required field. We also do not want + # to enforce required fields as the API may differ from the spec in some cases. + return [] + if flattened_key is None: + flattened_key = key + else: + flattened_key += f"[{key}]" + return _extract_items( + item, + path, + index=index, + flattened_key=flattened_key, + ) + elif is_list(obj): + if key != "": + return [] + + return flatten( + [ + _extract_items( + item, + path, + index=index, + flattened_key=flattened_key + "[]" if flattened_key is not None else "[]", + ) + for item in obj + ] + ) + + # Something unexpected was passed, just ignore it. + return [] + + +def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]: + return not isinstance(obj, NotGiven) + + +# Type safe methods for narrowing types with TypeVars. +# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], +# however this cause Pyright to rightfully report errors. As we know we don't +# care about the contained types we can safely use `object` in it's place. +# +# There are two separate functions defined, `is_*` and `is_*_t` for different use cases. +# `is_*` is for when you're dealing with an unknown input +# `is_*_t` is for when you're narrowing a known union type to a specific subset + + +def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]: + return isinstance(obj, tuple) + + +def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]: + return isinstance(obj, tuple) + + +def is_sequence(obj: object) -> TypeGuard[Sequence[object]]: + return isinstance(obj, Sequence) + + +def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]: + return isinstance(obj, Sequence) + + +def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]: + return isinstance(obj, Mapping) + + +def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]: + return isinstance(obj, Mapping) + + +def is_dict(obj: object) -> TypeGuard[dict[object, object]]: + return isinstance(obj, dict) + + +def is_list(obj: object) -> TypeGuard[list[object]]: + return isinstance(obj, list) + + +def is_iterable(obj: object) -> TypeGuard[Iterable[object]]: + return isinstance(obj, Iterable) + + +def deepcopy_minimal(item: _T) -> _T: + """Minimal reimplementation of copy.deepcopy() that will only copy certain object types: + + - mappings, e.g. `dict` + - list + + This is done for performance reasons. + """ + if is_mapping(item): + return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()}) + if is_list(item): + return cast(_T, [deepcopy_minimal(entry) for entry in item]) + return item + + +# copied from https://github.com/Rapptz/RoboDanny +def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str: + size = len(seq) + if size == 0: + return "" + + if size == 1: + return seq[0] + + if size == 2: + return f"{seq[0]} {final} {seq[1]}" + + return delim.join(seq[:-1]) + f" {final} {seq[-1]}" + + +def quote(string: str) -> str: + """Add single quotation marks around the given string. Does *not* do any escaping.""" + return f"'{string}'" + + +def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]: + """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function. + + Useful for enforcing runtime validation of overloaded functions. + + Example usage: + ```py + @overload + def foo(*, a: str) -> str: + ... + + + @overload + def foo(*, b: bool) -> str: + ... + + + # This enforces the same constraints that a static type checker would + # i.e. that either a or b must be passed to the function + @required_args(["a"], ["b"]) + def foo(*, a: str | None = None, b: bool | None = None) -> str: + ... + ``` + """ + + def inner(func: CallableT) -> CallableT: + params = inspect.signature(func).parameters + positional = [ + name + for name, param in params.items() + if param.kind + in { + param.POSITIONAL_ONLY, + param.POSITIONAL_OR_KEYWORD, + } + ] + + @functools.wraps(func) + def wrapper(*args: object, **kwargs: object) -> object: + given_params: set[str] = set() + for i, _ in enumerate(args): + try: + given_params.add(positional[i]) + except IndexError: + raise TypeError( + f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given" + ) from None + + given_params.update(kwargs.keys()) + + for variant in variants: + matches = all(param in given_params for param in variant) + if matches: + break + else: # no break + if len(variants) > 1: + variations = human_join( + ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants] + ) + msg = f"Missing required arguments; Expected either {variations} arguments to be given" + else: + # TODO: this error message is not deterministic + missing = list(set(variants[0]) - given_params) + if len(missing) > 1: + msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}" + else: + msg = f"Missing required argument: {quote(missing[0])}" + raise TypeError(msg) + return func(*args, **kwargs) + + return wrapper # type: ignore + + return inner + + +_K = TypeVar("_K") +_V = TypeVar("_V") + + +@overload +def strip_not_given(obj: None) -> None: ... + + +@overload +def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ... + + +@overload +def strip_not_given(obj: object) -> object: ... + + +def strip_not_given(obj: object | None) -> object: + """Remove all top-level keys where their values are instances of `NotGiven`""" + if obj is None: + return None + + if not is_mapping(obj): + return obj + + return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} + + +def coerce_integer(val: str) -> int: + return int(val, base=10) + + +def coerce_float(val: str) -> float: + return float(val) + + +def coerce_boolean(val: str) -> bool: + return val in {"true", "1", "on"} + + +def maybe_coerce_integer(val: str | None) -> int | None: + if val is None: + return None + return coerce_integer(val) + + +def maybe_coerce_float(val: str | None) -> float | None: + if val is None: + return None + return coerce_float(val) + + +def maybe_coerce_boolean(val: str | None) -> bool | None: + if val is None: + return None + return coerce_boolean(val) + + +def removeprefix(string: str, prefix: str) -> str: + """Remove a prefix from a string. + + Backport of `str.removeprefix` for Python < 3.9 + """ + if string.startswith(prefix): + return string[len(prefix) :] + return string + + +def removesuffix(string: str, suffix: str) -> str: + """Remove a suffix from a string. + + Backport of `str.removesuffix` for Python < 3.9 + """ + if string.endswith(suffix): + return string[: -len(suffix)] + return string + + +def file_from_path(path: str) -> FileTypes: + contents = Path(path).read_bytes() + file_name = os.path.basename(path) + return (file_name, contents) + + +def get_required_header(headers: HeadersLike, header: str) -> str: + lower_header = header.lower() + if isinstance(headers, Mapping): + headers = cast(Headers, headers) + for k, v in headers.items(): + if k.lower() == lower_header and isinstance(v, str): + return v + + """ to deal with the case where the header looks like Stainless-Event-Id """ + intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) + + for normalized_header in [header, lower_header, header.upper(), intercaps_header]: + value = headers.get(normalized_header) + if value: + return value + + raise ValueError(f"Could not find {header} header") + + +def get_async_library() -> str: + try: + return sniffio.current_async_library() + except Exception: + return "false" + + +def drop_prefix_image_data(content: Union[str, list[dict]]) -> Union[str, list[dict]]: + """ + 删除 ;base64, 前缀 + :param image_data: + :return: + """ + if isinstance(content, list): + for data in content: + if data.get("type") == "image_url": + image_data = data.get("image_url").get("url") + if image_data.startswith("data:image/"): + image_data = image_data.split("base64,")[-1] + data["image_url"]["url"] = image_data + + return content diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py new file mode 100644 index 0000000000000..e5fce94c00e9e --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py @@ -0,0 +1,78 @@ +import logging +import os +import time + +logger = logging.getLogger(__name__) + + +class LoggerNameFilter(logging.Filter): + def filter(self, record): + # return record.name.startswith("loom_core") or record.name in "ERROR" or ( + # record.name.startswith("uvicorn.error") + # and record.getMessage().startswith("Uvicorn running on") + # ) + return True + + +def get_log_file(log_path: str, sub_dir: str): + """ + sub_dir should contain a timestamp. + """ + log_dir = os.path.join(log_path, sub_dir) + # Here should be creating a new directory each time, so `exist_ok=False` + os.makedirs(log_dir, exist_ok=False) + return os.path.join(log_dir, "zhipuai.log") + + +def get_config_dict(log_level: str, log_file_path: str, log_backup_count: int, log_max_bytes: int) -> dict: + # for windows, the path should be a raw string. + log_file_path = log_file_path.encode("unicode-escape").decode() if os.name == "nt" else log_file_path + log_level = log_level.upper() + config_dict = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "formatter": {"format": ("%(asctime)s %(name)-12s %(process)d %(levelname)-8s %(message)s")}, + }, + "filters": { + "logger_name_filter": { + "()": __name__ + ".LoggerNameFilter", + }, + }, + "handlers": { + "stream_handler": { + "class": "logging.StreamHandler", + "formatter": "formatter", + "level": log_level, + # "stream": "ext://sys.stdout", + # "filters": ["logger_name_filter"], + }, + "file_handler": { + "class": "logging.handlers.RotatingFileHandler", + "formatter": "formatter", + "level": log_level, + "filename": log_file_path, + "mode": "a", + "maxBytes": log_max_bytes, + "backupCount": log_backup_count, + "encoding": "utf8", + }, + }, + "loggers": { + "loom_core": { + "handlers": ["stream_handler", "file_handler"], + "level": log_level, + "propagate": False, + } + }, + "root": { + "level": log_level, + "handlers": ["stream_handler", "file_handler"], + }, + } + return config_dict + + +def get_timestamp_ms(): + t = time.time() + return int(round(t * 1000)) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py new file mode 100644 index 0000000000000..7f0b1b91d9855 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py @@ -0,0 +1,62 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Any, Generic, Optional, TypeVar, cast + +from typing_extensions import Protocol, override, runtime_checkable + +from ._http_client import BasePage, BaseSyncPage, PageInfo + +__all__ = ["SyncPage", "SyncCursorPage"] + +_T = TypeVar("_T") + + +@runtime_checkable +class CursorPageItem(Protocol): + id: Optional[str] + + +class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): + """Note: no pagination actually occurs yet, this is for forwards-compatibility.""" + + data: list[_T] + object: str + + @override + def _get_page_items(self) -> list[_T]: + data = self.data + if not data: + return [] + return data + + @override + def next_page_info(self) -> None: + """ + This page represents a response that isn't actually paginated at the API level + so there will never be a next page. + """ + return None + + +class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): + data: list[_T] + + @override + def _get_page_items(self) -> list[_T]: + data = self.data + if not data: + return [] + return data + + @override + def next_page_info(self) -> Optional[PageInfo]: + data = self.data + if not data: + return None + + item = cast(Any, data[-1]) + if not isinstance(item, CursorPageItem) or item.id is None: + # TODO emit warning log + return None + + return PageInfo(params={"after": item.id}) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py new file mode 100644 index 0000000000000..9f941fb91c877 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py @@ -0,0 +1,5 @@ +from .assistant_completion import AssistantCompletion + +__all__ = [ + "AssistantCompletion", +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py new file mode 100644 index 0000000000000..cbfb6edaeb1f1 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py @@ -0,0 +1,40 @@ +from typing import Any, Optional + +from ...core import BaseModel +from .message import MessageContent + +__all__ = ["AssistantCompletion", "CompletionUsage"] + + +class ErrorInfo(BaseModel): + code: str # 错误码 + message: str # 错误信息 + + +class AssistantChoice(BaseModel): + index: int # 结果下标 + delta: MessageContent # 当前会话输出消息体 + finish_reason: str + """ + # 推理结束原因 stop代表推理自然结束或触发停止词。 sensitive 代表模型推理内容被安全审核接口拦截。请注意,针对此类内容,请用户自行判断并决定是否撤回已公开的内容。 + # network_error 代表模型推理服务异常。 + """ # noqa: E501 + metadata: dict # 元信息,拓展字段 + + +class CompletionUsage(BaseModel): + prompt_tokens: int # 输入的 tokens 数量 + completion_tokens: int # 输出的 tokens 数量 + total_tokens: int # 总 tokens 数量 + + +class AssistantCompletion(BaseModel): + id: str # 请求 ID + conversation_id: str # 会话 ID + assistant_id: str # 智能体 ID + created: int # 请求创建时间,Unix 时间戳 + status: str # 返回状态,包括:`completed` 表示生成结束`in_progress`表示生成中 `failed` 表示生成异常 + last_error: Optional[ErrorInfo] # 异常信息 + choices: list[AssistantChoice] # 增量返回的信息 + metadata: Optional[dict[str, Any]] # 元信息,拓展字段 + usage: Optional[CompletionUsage] # tokens 数量统计 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py new file mode 100644 index 0000000000000..03f14f4238f37 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py @@ -0,0 +1,7 @@ +from typing import TypedDict + + +class ConversationParameters(TypedDict, total=False): + assistant_id: str # 智能体 ID + page: int # 当前分页 + page_size: int # 分页数量 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py new file mode 100644 index 0000000000000..d1833d220a2e3 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py @@ -0,0 +1,29 @@ +from ...core import BaseModel + +__all__ = ["ConversationUsageListResp"] + + +class Usage(BaseModel): + prompt_tokens: int # 用户输入的 tokens 数量 + completion_tokens: int # 模型输入的 tokens 数量 + total_tokens: int # 总 tokens 数量 + + +class ConversationUsage(BaseModel): + id: str # 会话 id + assistant_id: str # 智能体Assistant id + create_time: int # 创建时间 + update_time: int # 更新时间 + usage: Usage # 会话中 tokens 数量统计 + + +class ConversationUsageList(BaseModel): + assistant_id: str # 智能体id + has_more: bool # 是否还有更多页 + conversation_list: list[ConversationUsage] # 返回的 + + +class ConversationUsageListResp(BaseModel): + code: int + msg: str + data: ConversationUsageList diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py new file mode 100644 index 0000000000000..2def1025cd2b3 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py @@ -0,0 +1,32 @@ +from typing import Optional, TypedDict, Union + + +class AssistantAttachments: + file_id: str + + +class MessageTextContent: + type: str # 目前支持 type = text + text: str + + +MessageContent = Union[MessageTextContent] + + +class ConversationMessage(TypedDict): + """会话消息体""" + + role: str # 用户的输入角色,例如 'user' + content: list[MessageContent] # 会话消息体的内容 + + +class AssistantParameters(TypedDict, total=False): + """智能体参数类""" + + assistant_id: str # 智能体 ID + conversation_id: Optional[str] # 会话 ID,不传则创建新会话 + model: str # 模型名称,默认为 'GLM-4-Assistant' + stream: bool # 是否支持流式 SSE,需要传入 True + messages: list[ConversationMessage] # 会话消息体 + attachments: Optional[list[AssistantAttachments]] # 会话指定的文件,非必填 + metadata: Optional[dict] # 元信息,拓展字段,非必填 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py new file mode 100644 index 0000000000000..0709cdbcad25e --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py @@ -0,0 +1,21 @@ +from ...core import BaseModel + +__all__ = ["AssistantSupportResp"] + + +class AssistantSupport(BaseModel): + assistant_id: str # 智能体的 Assistant id,用于智能体会话 + created_at: int # 创建时间 + updated_at: int # 更新时间 + name: str # 智能体名称 + avatar: str # 智能体头像 + description: str # 智能体描述 + status: str # 智能体状态,目前只有 publish + tools: list[str] # 智能体支持的工具名 + starter_prompts: list[str] # 智能体启动推荐的 prompt + + +class AssistantSupportResp(BaseModel): + code: int + msg: str + data: list[AssistantSupport] # 智能体列表 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py new file mode 100644 index 0000000000000..562e0151e53b4 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py @@ -0,0 +1,3 @@ +from .message_content import MessageContent + +__all__ = ["MessageContent"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py new file mode 100644 index 0000000000000..6a1a438a6fe03 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py @@ -0,0 +1,13 @@ +from typing import Annotated, TypeAlias, Union + +from ....core._utils import PropertyInfo +from .text_content_block import TextContentBlock +from .tools_delta_block import ToolsDeltaBlock + +__all__ = ["MessageContent"] + + +MessageContent: TypeAlias = Annotated[ + Union[ToolsDeltaBlock, TextContentBlock], + PropertyInfo(discriminator="type"), +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py new file mode 100644 index 0000000000000..865fb1139e2f7 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py @@ -0,0 +1,14 @@ +from typing import Literal + +from ....core import BaseModel + +__all__ = ["TextContentBlock"] + + +class TextContentBlock(BaseModel): + content: str + + role: str = "assistant" + + type: Literal["content"] = "content" + """Always `content`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py new file mode 100644 index 0000000000000..9d569b282ef9f --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py @@ -0,0 +1,27 @@ +from typing import Literal + +__all__ = ["CodeInterpreterToolBlock"] + +from .....core import BaseModel + + +class CodeInterpreterToolOutput(BaseModel): + """代码工具输出结果""" + + type: str # 代码执行日志,目前只有 logs + logs: str # 代码执行的日志结果 + error_msg: str # 错误信息 + + +class CodeInterpreter(BaseModel): + """代码解释器""" + + input: str # 生成的代码片段,输入给代码沙盒 + outputs: list[CodeInterpreterToolOutput] # 代码执行后的输出结果 + + +class CodeInterpreterToolBlock(BaseModel): + """代码工具块""" + + code_interpreter: CodeInterpreter # 代码解释器对象 + type: Literal["code_interpreter"] # 调用工具的类型,始终为 `code_interpreter` diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py new file mode 100644 index 0000000000000..0b6895556b616 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py @@ -0,0 +1,21 @@ +from typing import Literal + +from .....core import BaseModel + +__all__ = ["DrawingToolBlock"] + + +class DrawingToolOutput(BaseModel): + image: str + + +class DrawingTool(BaseModel): + input: str + outputs: list[DrawingToolOutput] + + +class DrawingToolBlock(BaseModel): + drawing_tool: DrawingTool + + type: Literal["drawing_tool"] + """Always `drawing_tool`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py new file mode 100644 index 0000000000000..c439bc4b3fbbb --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py @@ -0,0 +1,22 @@ +from typing import Literal, Union + +__all__ = ["FunctionToolBlock"] + +from .....core import BaseModel + + +class FunctionToolOutput(BaseModel): + content: str + + +class FunctionTool(BaseModel): + name: str + arguments: Union[str, dict] + outputs: list[FunctionToolOutput] + + +class FunctionToolBlock(BaseModel): + function: FunctionTool + + type: Literal["function"] + """Always `drawing_tool`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py new file mode 100644 index 0000000000000..4789e9378a8a3 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py @@ -0,0 +1,41 @@ +from typing import Literal + +from .....core import BaseModel + + +class RetrievalToolOutput(BaseModel): + """ + This class represents the output of a retrieval tool. + + Attributes: + - text (str): The text snippet retrieved from the knowledge base. + - document (str): The name of the document from which the text snippet was retrieved, returned only in intelligent configuration. + """ # noqa: E501 + + text: str + document: str + + +class RetrievalTool(BaseModel): + """ + This class represents the outputs of a retrieval tool. + + Attributes: + - outputs (List[RetrievalToolOutput]): A list of text snippets and their respective document names retrieved from the knowledge base. + """ # noqa: E501 + + outputs: list[RetrievalToolOutput] + + +class RetrievalToolBlock(BaseModel): + """ + This class represents a block for invoking the retrieval tool. + + Attributes: + - retrieval (RetrievalTool): An instance of the RetrievalTool class containing the retrieval outputs. + - type (Literal["retrieval"]): The type of tool being used, always set to "retrieval". + """ + + retrieval: RetrievalTool + type: Literal["retrieval"] + """Always `retrieval`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py new file mode 100644 index 0000000000000..98544053d4c83 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py @@ -0,0 +1,16 @@ +from typing import Annotated, TypeAlias, Union + +from .....core._utils import PropertyInfo +from .code_interpreter_delta_block import CodeInterpreterToolBlock +from .drawing_tool_delta_block import DrawingToolBlock +from .function_delta_block import FunctionToolBlock +from .retrieval_delta_black import RetrievalToolBlock +from .web_browser_delta_block import WebBrowserToolBlock + +__all__ = ["ToolsType"] + + +ToolsType: TypeAlias = Annotated[ + Union[DrawingToolBlock, CodeInterpreterToolBlock, WebBrowserToolBlock, RetrievalToolBlock, FunctionToolBlock], + PropertyInfo(discriminator="type"), +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py new file mode 100644 index 0000000000000..966e6fe0c84fe --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py @@ -0,0 +1,48 @@ +from typing import Literal + +from .....core import BaseModel + +__all__ = ["WebBrowserToolBlock"] + + +class WebBrowserOutput(BaseModel): + """ + This class represents the output of a web browser search result. + + Attributes: + - title (str): The title of the search result. + - link (str): The URL link to the search result's webpage. + - content (str): The textual content extracted from the search result. + - error_msg (str): Any error message encountered during the search or retrieval process. + """ + + title: str + link: str + content: str + error_msg: str + + +class WebBrowser(BaseModel): + """ + This class represents the input and outputs of a web browser search. + + Attributes: + - input (str): The input query for the web browser search. + - outputs (List[WebBrowserOutput]): A list of search results returned by the web browser. + """ + + input: str + outputs: list[WebBrowserOutput] + + +class WebBrowserToolBlock(BaseModel): + """ + This class represents a block for invoking the web browser tool. + + Attributes: + - web_browser (WebBrowser): An instance of the WebBrowser class containing the search input and outputs. + - type (Literal["web_browser"]): The type of tool being used, always set to "web_browser". + """ + + web_browser: WebBrowser + type: Literal["web_browser"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py new file mode 100644 index 0000000000000..781a1ab819c28 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py @@ -0,0 +1,16 @@ +from typing import Literal + +from ....core import BaseModel +from .tools.tools_type import ToolsType + +__all__ = ["ToolsDeltaBlock"] + + +class ToolsDeltaBlock(BaseModel): + tool_calls: list[ToolsType] + """The index of the content part in the message.""" + + role: str = "tool" + + type: Literal["tool_calls"] = "tool_calls" + """Always `tool_calls`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py new file mode 100644 index 0000000000000..560562915c9d3 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py @@ -0,0 +1,82 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import builtins +from typing import Literal, Optional + +from ..core import BaseModel +from .batch_error import BatchError +from .batch_request_counts import BatchRequestCounts + +__all__ = ["Batch", "Errors"] + + +class Errors(BaseModel): + data: Optional[list[BatchError]] = None + + object: Optional[str] = None + """这个类型,一直是`list`。""" + + +class Batch(BaseModel): + id: str + + completion_window: str + """用于执行请求的地址信息。""" + + created_at: int + """这是 Unix timestamp (in seconds) 表示的创建时间。""" + + endpoint: str + """这是ZhipuAI endpoint的地址。""" + + input_file_id: str + """标记为batch的输入文件的ID。""" + + object: Literal["batch"] + """这个类型,一直是`batch`.""" + + status: Literal[ + "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" + ] + """batch 的状态。""" + + cancelled_at: Optional[int] = None + """Unix timestamp (in seconds) 表示的取消时间。""" + + cancelling_at: Optional[int] = None + """Unix timestamp (in seconds) 表示发起取消的请求时间 """ + + completed_at: Optional[int] = None + """Unix timestamp (in seconds) 表示的完成时间。""" + + error_file_id: Optional[str] = None + """这个文件id包含了执行请求失败的请求的输出。""" + + errors: Optional[Errors] = None + + expired_at: Optional[int] = None + """Unix timestamp (in seconds) 表示的将在过期时间。""" + + expires_at: Optional[int] = None + """Unix timestamp (in seconds) 触发过期""" + + failed_at: Optional[int] = None + """Unix timestamp (in seconds) 表示的失败时间。""" + + finalizing_at: Optional[int] = None + """Unix timestamp (in seconds) 表示的最终时间。""" + + in_progress_at: Optional[int] = None + """Unix timestamp (in seconds) 表示的开始处理时间。""" + + metadata: Optional[builtins.object] = None + """ + key:value形式的元数据,以便将信息存储 + 结构化格式。键的长度是64个字符,值最长512个字符 + """ + + output_file_id: Optional[str] = None + """完成请求的输出文件的ID。""" + + request_counts: Optional[BatchRequestCounts] = None + """批次中不同状态的请求计数""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py new file mode 100644 index 0000000000000..3dae65ea46fcb --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from typing import Literal, Optional + +from typing_extensions import Required, TypedDict + +__all__ = ["BatchCreateParams"] + + +class BatchCreateParams(TypedDict, total=False): + completion_window: Required[str] + """The time frame within which the batch should be processed. + + Currently only `24h` is supported. + """ + + endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings"]] + """The endpoint to be used for all requests in the batch. + + Currently `/v1/chat/completions` and `/v1/embeddings` are supported. + """ + + input_file_id: Required[str] + """The ID of an uploaded file that contains requests for the new batch. + + See [upload file](https://platform.openai.com/docs/api-reference/files/create) + for how to upload a file. + + Your input file must be formatted as a + [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + and must be uploaded with the purpose `batch`. + """ + + metadata: Optional[dict[str, str]] + """Optional custom metadata for the batch.""" + + auto_delete_input_file: Optional[bool] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py new file mode 100644 index 0000000000000..f934db19781e4 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..core import BaseModel + +__all__ = ["BatchError"] + + +class BatchError(BaseModel): + code: Optional[str] = None + """定义的业务错误码""" + + line: Optional[int] = None + """文件中的行号""" + + message: Optional[str] = None + """关于对话文件中的错误的描述""" + + param: Optional[str] = None + """参数名称,如果有的话""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py new file mode 100644 index 0000000000000..1a681671320ec --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["BatchListParams"] + + +class BatchListParams(TypedDict, total=False): + after: str + """分页的游标,用于获取下一页的数据。 + + `after` 是一个指向当前页面的游标,用于获取下一页的数据。如果没有提供 `after`,则返回第一页的数据。 + list. + """ + + limit: int + """这个参数用于限制返回的结果数量。 + + Limit 用于限制返回的结果数量。默认值为 10 + """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py new file mode 100644 index 0000000000000..ca3ccae625052 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py @@ -0,0 +1,14 @@ +from ..core import BaseModel + +__all__ = ["BatchRequestCounts"] + + +class BatchRequestCounts(BaseModel): + completed: int + """这个数字表示已经完成的请求。""" + + failed: int + """这个数字表示失败的请求。""" + + total: int + """这个数字表示总的请求。""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py index a0645b0916882..c1eed070f32d9 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py @@ -1,10 +1,9 @@ from typing import Optional -from pydantic import BaseModel - +from ...core import BaseModel from .chat_completion import CompletionChoice, CompletionUsage -__all__ = ["AsyncTaskStatus"] +__all__ = ["AsyncTaskStatus", "AsyncCompletion"] class AsyncTaskStatus(BaseModel): diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py index 4b3a929a2b816..1945a826cda2d 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py @@ -1,6 +1,6 @@ from typing import Optional -from pydantic import BaseModel +from ...core import BaseModel __all__ = ["Completion", "CompletionUsage"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py index c250699741981..27fad0008a1dd 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py @@ -1,8 +1,9 @@ -from typing import Optional +from typing import Any, Optional -from pydantic import BaseModel +from ...core import BaseModel __all__ = [ + "CompletionUsage", "ChatCompletionChunk", "Choice", "ChoiceDelta", @@ -53,3 +54,4 @@ class ChatCompletionChunk(BaseModel): created: Optional[int] = None model: Optional[str] = None usage: Optional[CompletionUsage] = None + extra_json: dict[str, Any] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py new file mode 100644 index 0000000000000..666b38855cd63 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py @@ -0,0 +1,146 @@ +from typing import Literal, Optional + +from typing_extensions import Required, TypedDict + +__all__ = [ + "CodeGeexTarget", + "CodeGeexContext", + "CodeGeexExtra", +] + + +class CodeGeexTarget(TypedDict, total=False): + """补全的内容参数""" + + path: Optional[str] + """文件路径""" + language: Required[ + Literal[ + "c", + "c++", + "cpp", + "c#", + "csharp", + "c-sharp", + "css", + "cuda", + "dart", + "lua", + "objectivec", + "objective-c", + "objective-c++", + "python", + "perl", + "prolog", + "swift", + "lisp", + "java", + "scala", + "tex", + "jsx", + "tsx", + "vue", + "markdown", + "html", + "php", + "js", + "javascript", + "typescript", + "go", + "shell", + "rust", + "sql", + "kotlin", + "vb", + "ruby", + "pascal", + "r", + "fortran", + "lean", + "matlab", + "delphi", + "scheme", + "basic", + "assembly", + "groovy", + "abap", + "gdscript", + "haskell", + "julia", + "elixir", + "excel", + "clojure", + "actionscript", + "solidity", + "powershell", + "erlang", + "cobol", + "alloy", + "awk", + "thrift", + "sparql", + "augeas", + "cmake", + "f-sharp", + "stan", + "isabelle", + "dockerfile", + "rmarkdown", + "literate-agda", + "tcl", + "glsl", + "antlr", + "verilog", + "racket", + "standard-ml", + "elm", + "yaml", + "smalltalk", + "ocaml", + "idris", + "visual-basic", + "protocol-buffer", + "bluespec", + "applescript", + "makefile", + "tcsh", + "maple", + "systemverilog", + "literate-coffeescript", + "vhdl", + "restructuredtext", + "sas", + "literate-haskell", + "java-server-pages", + "coffeescript", + "emacs-lisp", + "mathematica", + "xslt", + "zig", + "common-lisp", + "stata", + "agda", + "ada", + ] + ] + """代码语言类型,如python""" + code_prefix: Required[str] + """补全位置的前文""" + code_suffix: Required[str] + """补全位置的后文""" + + +class CodeGeexContext(TypedDict, total=False): + """附加代码""" + + path: Required[str] + """附加代码文件的路径""" + code: Required[str] + """附加的代码内容""" + + +class CodeGeexExtra(TypedDict, total=False): + target: Required[CodeGeexTarget] + """补全的内容参数""" + contexts: Optional[list[CodeGeexContext]] + """附加代码""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py index e01f2c815fb38..8425b5c86688d 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py @@ -2,8 +2,7 @@ from typing import Optional -from pydantic import BaseModel - +from ..core import BaseModel from .chat.chat_completion import CompletionUsage __all__ = ["Embedding", "EmbeddingsResponded"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py new file mode 100644 index 0000000000000..bbaf59e4d7d17 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py @@ -0,0 +1,5 @@ +from .file_deleted import FileDeleted +from .file_object import FileObject, ListOfFileObject +from .upload_detail import UploadDetail + +__all__ = ["FileObject", "ListOfFileObject", "UploadDetail", "FileDeleted"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py new file mode 100644 index 0000000000000..4ef93b1c05aca --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from typing import Literal, Optional + +from typing_extensions import Required, TypedDict + +__all__ = ["FileCreateParams"] + +from ...core import FileTypes +from . import UploadDetail + + +class FileCreateParams(TypedDict, total=False): + file: FileTypes + """file和 upload_detail二选一必填""" + + upload_detail: list[UploadDetail] + """file和 upload_detail二选一必填""" + + purpose: Required[Literal["fine-tune", "retrieval", "batch"]] + """ + 上传文件的用途,支持 "fine-tune和 "retrieval" + retrieval支持上传Doc、Docx、PDF、Xlsx、URL类型文件,且单个文件的大小不超过 5MB。 + fine-tune支持上传.jsonl文件且当前单个文件的大小最大可为 100 MB ,文件中语料格式需满足微调指南中所描述的格式。 + """ + custom_separator: Optional[list[str]] + """ + 当 purpose 为 retrieval 且文件类型为 pdf, url, docx 时上传,切片规则默认为 `\n`。 + """ + knowledge_id: str + """ + 当文件上传目的为 retrieval 时,需要指定知识库ID进行上传。 + """ + + sentence_size: int + """ + 当文件上传目的为 retrieval 时,需要指定知识库ID进行上传。 + """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py new file mode 100644 index 0000000000000..a384b1a69a573 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py @@ -0,0 +1,13 @@ +from typing import Literal + +from ...core import BaseModel + +__all__ = ["FileDeleted"] + + +class FileDeleted(BaseModel): + id: str + + deleted: bool + + object: Literal["file"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/file_object.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_object.py similarity index 86% rename from api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/file_object.py rename to api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_object.py index 75f76fe969faf..8f9d0fbb8e6ce 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/file_object.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_object.py @@ -1,8 +1,8 @@ from typing import Optional -from pydantic import BaseModel +from ...core import BaseModel -__all__ = ["FileObject"] +__all__ = ["FileObject", "ListOfFileObject"] class FileObject(BaseModel): diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py new file mode 100644 index 0000000000000..8f1ca5ce5756a --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py @@ -0,0 +1,13 @@ +from typing import Optional + +from ...core import BaseModel + + +class UploadDetail(BaseModel): + url: str + knowledge_type: int + file_name: Optional[str] = None + sentence_size: Optional[int] = None + custom_separator: Optional[list[str]] = None + callback_url: Optional[str] = None + callback_header: Optional[dict[str, str]] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py index 1d3930286b89d..75c7553dbe35c 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py @@ -1,6 +1,6 @@ from typing import Optional, Union -from pydantic import BaseModel +from ...core import BaseModel __all__ = ["FineTuningJob", "Error", "Hyperparameters", "ListOfFineTuningJob"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py index e26b448534246..f996cff11430b 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py @@ -1,6 +1,6 @@ from typing import Optional, Union -from pydantic import BaseModel +from ...core import BaseModel __all__ = ["FineTuningJobEvent", "Metric", "JobEvent"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py new file mode 100644 index 0000000000000..57d0d2511dbc1 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py @@ -0,0 +1 @@ +from .fine_tuned_models import FineTunedModelsStatus diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py new file mode 100644 index 0000000000000..b286a5b5774d3 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py @@ -0,0 +1,13 @@ +from typing import ClassVar + +from ....core import PYDANTIC_V2, BaseModel, ConfigDict + +__all__ = ["FineTunedModelsStatus"] + + +class FineTunedModelsStatus(BaseModel): + if PYDANTIC_V2: + model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow", protected_namespaces=()) + request_id: str # 请求id + model_name: str # 模型名称 + delete_status: str # 删除状态 deleting(删除中), deleted (已删除) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py index b352ce0954ad5..3bcad0acabd21 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py @@ -2,7 +2,7 @@ from typing import Optional -from pydantic import BaseModel +from ..core import BaseModel __all__ = ["GeneratedImage", "ImagesResponded"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py new file mode 100644 index 0000000000000..8c81d703e214a --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py @@ -0,0 +1,8 @@ +from .knowledge import KnowledgeInfo +from .knowledge_used import KnowledgeStatistics, KnowledgeUsed + +__all__ = [ + "KnowledgeInfo", + "KnowledgeStatistics", + "KnowledgeUsed", +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py new file mode 100644 index 0000000000000..32e23e6dab307 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py @@ -0,0 +1,8 @@ +from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessinfo + +__all__ = [ + "DocumentData", + "DocumentObject", + "DocumentSuccessinfo", + "DocumentFailedInfo", +] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py new file mode 100644 index 0000000000000..b9a1646391ece --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py @@ -0,0 +1,51 @@ +from typing import Optional + +from ....core import BaseModel + +__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessinfo", "DocumentFailedInfo"] + + +class DocumentSuccessinfo(BaseModel): + documentId: Optional[str] = None + """文件id""" + filename: Optional[str] = None + """文件名称""" + + +class DocumentFailedInfo(BaseModel): + failReason: Optional[str] = None + """上传失败的原因,包括:文件格式不支持、文件大小超出限制、知识库容量已满、容量上限为 50 万字。""" + filename: Optional[str] = None + """文件名称""" + documentId: Optional[str] = None + """知识库id""" + + +class DocumentObject(BaseModel): + """文档信息""" + + successInfos: Optional[list[DocumentSuccessinfo]] = None + """上传成功的文件信息""" + failedInfos: Optional[list[DocumentFailedInfo]] = None + """上传失败的文件信息""" + + +class DocumentDataFailInfo(BaseModel): + """失败原因""" + + embedding_code: Optional[int] = ( + None # 失败码 10001:知识不可用,知识库空间已达上限 10002:知识不可用,知识库空间已达上限(字数超出限制) + ) + embedding_msg: Optional[str] = None # 失败原因 + + +class DocumentData(BaseModel): + id: str = None # 知识唯一id + custom_separator: list[str] = None # 切片规则 + sentence_size: str = None # 切片大小 + length: int = None # 文件大小(字节) + word_num: int = None # 文件字数 + name: str = None # 文件名 + url: str = None # 文件下载链接 + embedding_stat: int = None # 0:向量化中 1:向量化完成 2:向量化失败 + failInfo: Optional[DocumentDataFailInfo] = None # 失败原因 向量化失败embedding_stat=2的时候 会有此值 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py new file mode 100644 index 0000000000000..509cb3a451af5 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py @@ -0,0 +1,29 @@ +from typing import Optional, TypedDict + +__all__ = ["DocumentEditParams"] + + +class DocumentEditParams(TypedDict): + """ + 知识参数类型定义 + + Attributes: + id (str): 知识ID + knowledge_type (int): 知识类型: + 1:文章知识: 支持pdf,url,docx + 2.问答知识-文档: 支持pdf,url,docx + 3.问答知识-表格: 支持xlsx + 4.商品库-表格: 支持xlsx + 5.自定义: 支持pdf,url,docx + custom_separator (Optional[List[str]]): 当前知识类型为自定义(knowledge_type=5)时的切片规则,默认\n + sentence_size (Optional[int]): 当前知识类型为自定义(knowledge_type=5)时的切片字数,取值范围: 20-2000,默认300 + callback_url (Optional[str]): 回调地址 + callback_header (Optional[dict]): 回调时携带的header + """ + + id: str + knowledge_type: int + custom_separator: Optional[list[str]] + sentence_size: Optional[int] + callback_url: Optional[str] + callback_header: Optional[dict[str, str]] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py new file mode 100644 index 0000000000000..910c8c045e1b9 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from typing import Optional + +from typing_extensions import TypedDict + + +class DocumentListParams(TypedDict, total=False): + """ + 文件查询参数类型定义 + + Attributes: + purpose (Optional[str]): 文件用途 + knowledge_id (Optional[str]): 当文件用途为 retrieval 时,需要提供查询的知识库ID + page (Optional[int]): 页,默认1 + limit (Optional[int]): 查询文件列表数,默认10 + after (Optional[str]): 查询指定fileID之后的文件列表(当文件用途为 fine-tune 时需要) + order (Optional[str]): 排序规则,可选值['desc', 'asc'],默认desc(当文件用途为 fine-tune 时需要) + """ + + purpose: Optional[str] + knowledge_id: Optional[str] + page: Optional[int] + limit: Optional[int] + after: Optional[str] + order: Optional[str] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py new file mode 100644 index 0000000000000..acae4fad9ff36 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from ....core import BaseModel +from . import DocumentData + +__all__ = ["DocumentPage"] + + +class DocumentPage(BaseModel): + list: list[DocumentData] + object: str diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py new file mode 100644 index 0000000000000..bc6f159eb211e --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py @@ -0,0 +1,21 @@ +from typing import Optional + +from ...core import BaseModel + +__all__ = ["KnowledgeInfo"] + + +class KnowledgeInfo(BaseModel): + id: Optional[str] = None + """知识库唯一 id""" + embedding_id: Optional[str] = ( + None # 知识库绑定的向量化模型 见模型列表 [内部服务开放接口文档](https://lslfd0slxc.feishu.cn/docx/YauWdbBiMopV0FxB7KncPWCEn8f#H15NduiQZo3ugmxnWQFcfAHpnQ4) + ) + name: Optional[str] = None # 知识库名称 100字限制 + customer_identifier: Optional[str] = None # 用户标识 长度32位以内 + description: Optional[str] = None # 知识库描述 500字限制 + background: Optional[str] = None # 背景颜色(给枚举)'blue', 'red', 'orange', 'purple', 'sky' + icon: Optional[str] = ( + None # 知识库图标(给枚举) question: 问号、book: 书籍、seal: 印章、wrench: 扳手、tag: 标签、horn: 喇叭、house: 房子 # noqa: E501 + ) + bucket_id: Optional[str] = None # 桶id 限制32位 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py new file mode 100644 index 0000000000000..c3da201727c34 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import Literal, Optional + +from typing_extensions import TypedDict + +__all__ = ["KnowledgeBaseParams"] + + +class KnowledgeBaseParams(TypedDict): + """ + 知识库参数类型定义 + + Attributes: + embedding_id (int): 知识库绑定的向量化模型ID + name (str): 知识库名称,限制100字 + customer_identifier (Optional[str]): 用户标识,长度32位以内 + description (Optional[str]): 知识库描述,限制500字 + background (Optional[Literal['blue', 'red', 'orange', 'purple', 'sky']]): 背景颜色 + icon (Optional[Literal['question', 'book', 'seal', 'wrench', 'tag', 'horn', 'house']]): 知识库图标 + bucket_id (Optional[str]): 桶ID,限制32位 + """ + + embedding_id: int + name: str + customer_identifier: Optional[str] + description: Optional[str] + background: Optional[Literal["blue", "red", "orange", "purple", "sky"]] = None + icon: Optional[Literal["question", "book", "seal", "wrench", "tag", "horn", "house"]] = None + bucket_id: Optional[str] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py new file mode 100644 index 0000000000000..a221b28e4603b --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KnowledgeListParams"] + + +class KnowledgeListParams(TypedDict, total=False): + page: int = 1 + """ 页码,默认 1,第一页 + """ + + size: int = 10 + """每页数量 默认10 + """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py new file mode 100644 index 0000000000000..e462eddc550d6 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from ...core import BaseModel +from . import KnowledgeInfo + +__all__ = ["KnowledgePage"] + + +class KnowledgePage(BaseModel): + list: list[KnowledgeInfo] + object: str diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py new file mode 100644 index 0000000000000..cfda7097026c5 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py @@ -0,0 +1,21 @@ +from typing import Optional + +from ...core import BaseModel + +__all__ = ["KnowledgeStatistics", "KnowledgeUsed"] + + +class KnowledgeStatistics(BaseModel): + """ + 使用量统计 + """ + + word_num: Optional[int] = None + length: Optional[int] = None + + +class KnowledgeUsed(BaseModel): + used: Optional[KnowledgeStatistics] = None + """已使用量""" + total: Optional[KnowledgeStatistics] = None + """知识库总量""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py new file mode 100644 index 0000000000000..c9bd60419ce60 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py @@ -0,0 +1,3 @@ +from .sensitive_word_check import SensitiveWordCheckRequest + +__all__ = ["SensitiveWordCheckRequest"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py new file mode 100644 index 0000000000000..0c37d99e65329 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py @@ -0,0 +1,14 @@ +from typing import Optional + +from typing_extensions import TypedDict + + +class SensitiveWordCheckRequest(TypedDict, total=False): + type: Optional[str] + """敏感词类型,当前仅支持ALL""" + status: Optional[str] + """敏感词启用禁用状态 + 启用:ENABLE + 禁用:DISABLE + 备注:默认开启敏感词校验,如果要关闭敏感词校验,需联系商务获取对应权限,否则敏感词禁用不生效。 + """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py new file mode 100644 index 0000000000000..62f77344eee56 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py @@ -0,0 +1,9 @@ +from .web_search import ( + SearchIntent, + SearchRecommend, + SearchResult, + WebSearch, +) +from .web_search_chunk import WebSearchChunk + +__all__ = ["WebSearch", "SearchIntent", "SearchResult", "SearchRecommend", "WebSearchChunk"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py new file mode 100644 index 0000000000000..b3a3b26f07ee5 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import Optional, Union + +from typing_extensions import TypedDict + +__all__ = ["WebSearchParams"] + + +class WebSearchParams(TypedDict): + """ + 工具名:web-search-pro参数类型定义 + + Attributes: + :param model: str, 模型名称 + :param request_id: Optional[str], 请求ID + :param stream: Optional[bool], 是否流式 + :param messages: Union[str, List[str], List[int], object, None], + 包含历史对话上下文的内容,按照 {"role": "user", "content": "你好"} 的json 数组形式进行传参 + 当前版本仅支持 User Message 单轮对话,工具会理解User Message并进行搜索, + 请尽可能传入不带指令格式的用户原始提问,以提高搜索准确率。 + :param scope: Optional[str], 指定搜索范围,全网、学术等,默认全网 + :param location: Optional[str], 指定搜索用户地区 location 提高相关性 + :param recent_days: Optional[int],支持指定返回 N 天(1-30)更新的搜索结果 + + + """ + + model: str + request_id: Optional[str] + stream: Optional[bool] + messages: Union[str, list[str], list[int], object, None] + scope: Optional[str] = None + location: Optional[str] = None + recent_days: Optional[int] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py new file mode 100644 index 0000000000000..ac9fa3821e979 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py @@ -0,0 +1,71 @@ +from typing import Optional + +from ...core import BaseModel + +__all__ = [ + "WebSearch", + "SearchIntent", + "SearchResult", + "SearchRecommend", +] + + +class SearchIntent(BaseModel): + index: int + # 搜索轮次,默认为 0 + query: str + # 搜索优化 query + intent: str + # 判断的意图类型 + keywords: str + # 搜索关键词 + + +class SearchResult(BaseModel): + index: int + # 搜索轮次,默认为 0 + title: str + # 标题 + link: str + # 链接 + content: str + # 内容 + icon: str + # 图标 + media: str + # 来源媒体 + refer: str + # 角标序号 [ref_1] + + +class SearchRecommend(BaseModel): + index: int + # 搜索轮次,默认为 0 + query: str + # 推荐query + + +class WebSearchMessageToolCall(BaseModel): + id: str + search_intent: Optional[SearchIntent] + search_result: Optional[SearchResult] + search_recommend: Optional[SearchRecommend] + type: str + + +class WebSearchMessage(BaseModel): + role: str + tool_calls: Optional[list[WebSearchMessageToolCall]] = None + + +class WebSearchChoice(BaseModel): + index: int + finish_reason: str + message: WebSearchMessage + + +class WebSearch(BaseModel): + created: Optional[int] = None + choices: list[WebSearchChoice] + request_id: Optional[str] = None + id: Optional[str] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py new file mode 100644 index 0000000000000..7fb0e02bb5871 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py @@ -0,0 +1,33 @@ +from typing import Optional + +from ...core import BaseModel +from .web_search import SearchIntent, SearchRecommend, SearchResult + +__all__ = ["WebSearchChunk"] + + +class ChoiceDeltaToolCall(BaseModel): + index: int + id: Optional[str] = None + + search_intent: Optional[SearchIntent] = None + search_result: Optional[SearchResult] = None + search_recommend: Optional[SearchRecommend] = None + type: Optional[str] = None + + +class ChoiceDelta(BaseModel): + role: Optional[str] = None + tool_calls: Optional[list[ChoiceDeltaToolCall]] = None + + +class Choice(BaseModel): + delta: ChoiceDelta + finish_reason: Optional[str] = None + index: int + + +class WebSearchChunk(BaseModel): + id: Optional[str] = None + choices: list[Choice] + created: Optional[int] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py new file mode 100644 index 0000000000000..b14072b1a771a --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py @@ -0,0 +1,3 @@ +from .video_object import VideoObject, VideoResult + +__all__ = ["VideoObject", "VideoResult"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py new file mode 100644 index 0000000000000..f5489d708e722 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from typing import Optional + +from typing_extensions import TypedDict + +__all__ = ["VideoCreateParams"] + +from ..sensitive_word_check import SensitiveWordCheckRequest + + +class VideoCreateParams(TypedDict, total=False): + model: str + """模型编码""" + prompt: str + """所需视频的文本描述""" + image_url: str + """所需视频的文本描述""" + sensitive_word_check: Optional[SensitiveWordCheckRequest] + """支持 URL 或者 Base64、传入 image 奖进行图生视频 + * 图片格式: + * 图片大小:""" + request_id: str + """由用户端传参,需保证唯一性;用于区分每次请求的唯一标识,用户端不传时平台会默认生成。""" + + user_id: str + """用户端。""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py new file mode 100644 index 0000000000000..85c3844d8a791 --- /dev/null +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py @@ -0,0 +1,30 @@ +from typing import Optional + +from ...core import BaseModel + +__all__ = ["VideoObject", "VideoResult"] + + +class VideoResult(BaseModel): + url: str + """视频url""" + cover_image_url: str + """预览图""" + + +class VideoObject(BaseModel): + id: Optional[str] = None + """智谱 AI 开放平台生成的任务订单号,调用请求结果接口时请使用此订单号""" + + model: str + """模型名称""" + + video_result: list[VideoResult] + """视频生成结果""" + + task_status: str + """处理状态,PROCESSING(处理中),SUCCESS(成功),FAIL(失败) + 注:处理中状态需通过查询获取结果""" + + request_id: str + """用户在客户端请求时提交的任务编号或者平台生成的任务编号""" diff --git a/api/core/tools/provider/builtin/cogview/tools/cogview3.py b/api/core/tools/provider/builtin/cogview/tools/cogview3.py index 9039708588df1..085084ca38355 100644 --- a/api/core/tools/provider/builtin/cogview/tools/cogview3.py +++ b/api/core/tools/provider/builtin/cogview/tools/cogview3.py @@ -21,15 +21,22 @@ def _invoke( ) size_mapping = { "square": "1024x1024", - "vertical": "1024x1792", - "horizontal": "1792x1024", + "vertical_768": "768x1344", + "vertical_864": "864x1152", + "horizontal_1344": "1344x768", + "horizontal_1152": "1152x864", + "widescreen_1440": "1440x720", + "tallscreen_720": "720x1440", } # prompt prompt = tool_parameters.get("prompt", "") if not prompt: return self.create_text_message("Please input prompt") - # get size - size = size_mapping[tool_parameters.get("size", "square")] + # get size key + size_key = tool_parameters.get("size", "square") + # cogview-3-plus get size + if size_key != "cogview_3": + size = size_mapping[size_key] # get n n = tool_parameters.get("n", 1) # get quality @@ -43,16 +50,29 @@ def _invoke( # set extra body seed_id = tool_parameters.get("seed_id", self._generate_random_id(8)) extra_body = {"seed": seed_id} - response = client.images.generations( - prompt=prompt, - model="cogview-3", - size=size, - n=n, - extra_body=extra_body, - style=style, - quality=quality, - response_format="b64_json", - ) + # cogview-3-plus + if size_key != "cogview_3": + response = client.images.generations( + prompt=prompt, + model="cogview-3-plus", + size=size, + n=n, + extra_body=extra_body, + style=style, + quality=quality, + response_format="b64_json", + ) + # cogview-3 + else: + response = client.images.generations( + prompt=prompt, + model="cogview-3", + n=n, + extra_body=extra_body, + style=style, + quality=quality, + response_format="b64_json", + ) result = [] for image in response.data: result.append(self.create_image_message(image=image.url)) diff --git a/api/core/tools/provider/builtin/cogview/tools/cogview3.yaml b/api/core/tools/provider/builtin/cogview/tools/cogview3.yaml index 1de3f599b6ac0..9ab5c2729bf7a 100644 --- a/api/core/tools/provider/builtin/cogview/tools/cogview3.yaml +++ b/api/core/tools/provider/builtin/cogview/tools/cogview3.yaml @@ -42,21 +42,46 @@ parameters: pt_BR: Image size form: form options: + - value: cogview_3 + label: + en_US: Square_cogview_3(1024x1024) + zh_Hans: 方_cogview_3(1024x1024) + pt_BR: Square_cogview_3(1024x1024) - value: square label: - en_US: Squre(1024x1024) + en_US: Square(1024x1024) zh_Hans: 方(1024x1024) - pt_BR: Squre(1024x1024) - - value: vertical + pt_BR: Square(1024x1024) + - value: vertical_768 + label: + en_US: Vertical(768x1344) + zh_Hans: 竖屏(768x1344) + pt_BR: Vertical(768x1344) + - value: vertical_864 + label: + en_US: Vertical(864x1152) + zh_Hans: 竖屏(864x1152) + pt_BR: Vertical(864x1152) + - value: horizontal_1344 + label: + en_US: Horizontal(1344x768) + zh_Hans: 横屏(1344x768) + pt_BR: Horizontal(1344x768) + - value: horizontal_1152 + label: + en_US: Horizontal(1152x864) + zh_Hans: 横屏(1152x864) + pt_BR: Horizontal(1152x864) + - value: widescreen_1440 label: - en_US: Vertical(1024x1792) - zh_Hans: 竖屏(1024x1792) - pt_BR: Vertical(1024x1792) - - value: horizontal + en_US: Widescreen(1440x720) + zh_Hans: 宽屏(1440x720) + pt_BR: Widescreen(1440x720) + - value: tallscreen_720 label: - en_US: Horizontal(1792x1024) - zh_Hans: 横屏(1792x1024) - pt_BR: Horizontal(1792x1024) + en_US: Tallscreen(720x1440) + zh_Hans: 高屏(720x1440) + pt_BR: Tallscreen(720x1440) default: square - name: n type: number From c9f1e18df1e400ce5c6599d7a528ce4d50e1bdea Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Sun, 22 Sep 2024 10:14:33 +0800 Subject: [PATCH 017/235] Add model parameter translation (#8509) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: swingchen01 Co-authored-by: 陈长君 --- .../model_providers/ollama/llm/llm.py | 83 ++++++++++++------- .../openai_api_compatible/llm/llm.py | 35 ++++++-- 2 files changed, 82 insertions(+), 36 deletions(-) diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index 1ed77a2ee8ed4..ff732e69259e2 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -472,12 +472,13 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ParameterRule( name=DefaultParameterName.TEMPERATURE.value, use_template=DefaultParameterName.TEMPERATURE.value, - label=I18nObject(en_US="Temperature"), + label=I18nObject(en_US="Temperature", zh_Hans="温度"), type=ParameterType.FLOAT, help=I18nObject( en_US="The temperature of the model. " "Increasing the temperature will make the model answer " - "more creatively. (Default: 0.8)" + "more creatively. (Default: 0.8)", + zh_Hans="模型的温度。增加温度将使模型的回答更具创造性。(默认值:0.8)", ), default=0.1, min=0, @@ -486,12 +487,13 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ParameterRule( name=DefaultParameterName.TOP_P.value, use_template=DefaultParameterName.TOP_P.value, - label=I18nObject(en_US="Top P"), + label=I18nObject(en_US="Top P", zh_Hans="Top P"), type=ParameterType.FLOAT, help=I18nObject( en_US="Works together with top-k. A higher value (e.g., 0.95) will lead to " "more diverse text, while a lower value (e.g., 0.5) will generate more " - "focused and conservative text. (Default: 0.9)" + "focused and conservative text. (Default: 0.9)", + zh_Hans="与top-k一起工作。较高的值(例如,0.95)会导致生成更多样化的文本,而较低的值(例如,0.5)会生成更专注和保守的文本。(默认值:0.9)", ), default=0.9, min=0, @@ -499,12 +501,13 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ), ParameterRule( name="top_k", - label=I18nObject(en_US="Top K"), + label=I18nObject(en_US="Top K", zh_Hans="Top K"), type=ParameterType.INT, help=I18nObject( en_US="Reduces the probability of generating nonsense. " "A higher value (e.g. 100) will give more diverse answers, " - "while a lower value (e.g. 10) will be more conservative. (Default: 40)" + "while a lower value (e.g. 10) will be more conservative. (Default: 40)", + zh_Hans="减少生成无意义内容的可能性。较高的值(例如100)将提供更多样化的答案,而较低的值(例如10)将更为保守。(默认值:40)", ), min=1, max=100, @@ -516,7 +519,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode help=I18nObject( en_US="Sets how strongly to penalize repetitions. " "A higher value (e.g., 1.5) will penalize repetitions more strongly, " - "while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)" + "while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)", + zh_Hans="设置对重复内容的惩罚强度。一个较高的值(例如,1.5)会更强地惩罚重复内容,而一个较低的值(例如,0.9)则会相对宽容。(默认值:1.1)", ), min=-2, max=2, @@ -524,11 +528,12 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ParameterRule( name="num_predict", use_template="max_tokens", - label=I18nObject(en_US="Num Predict"), + label=I18nObject(en_US="Num Predict", zh_Hans="最大令牌数预测"), type=ParameterType.INT, help=I18nObject( en_US="Maximum number of tokens to predict when generating text. " - "(Default: 128, -1 = infinite generation, -2 = fill context)" + "(Default: 128, -1 = infinite generation, -2 = fill context)", + zh_Hans="生成文本时预测的最大令牌数。(默认值:128,-1 = 无限生成,-2 = 填充上下文)", ), default=(512 if int(credentials.get("max_tokens", 4096)) >= 768 else 128), min=-2, @@ -536,121 +541,137 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ), ParameterRule( name="mirostat", - label=I18nObject(en_US="Mirostat sampling"), + label=I18nObject(en_US="Mirostat sampling", zh_Hans="Mirostat 采样"), type=ParameterType.INT, help=I18nObject( en_US="Enable Mirostat sampling for controlling perplexity. " - "(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)" + "(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", + zh_Hans="启用 Mirostat 采样以控制困惑度。" + "(默认值:0,0 = 禁用,1 = Mirostat,2 = Mirostat 2.0)", ), min=0, max=2, ), ParameterRule( name="mirostat_eta", - label=I18nObject(en_US="Mirostat Eta"), + label=I18nObject(en_US="Mirostat Eta", zh_Hans="学习率"), type=ParameterType.FLOAT, help=I18nObject( en_US="Influences how quickly the algorithm responds to feedback from " "the generated text. A lower learning rate will result in slower adjustments, " "while a higher learning rate will make the algorithm more responsive. " - "(Default: 0.1)" + "(Default: 0.1)", + zh_Hans="影响算法对生成文本反馈响应的速度。较低的学习率会导致调整速度变慢,而较高的学习率会使得算法更加灵敏。(默认值:0.1)", ), precision=1, ), ParameterRule( name="mirostat_tau", - label=I18nObject(en_US="Mirostat Tau"), + label=I18nObject(en_US="Mirostat Tau", zh_Hans="文本连贯度"), type=ParameterType.FLOAT, help=I18nObject( en_US="Controls the balance between coherence and diversity of the output. " - "A lower value will result in more focused and coherent text. (Default: 5.0)" + "A lower value will result in more focused and coherent text. (Default: 5.0)", + zh_Hans="控制输出的连贯性和多样性之间的平衡。较低的值会导致更专注和连贯的文本。(默认值:5.0)", ), precision=1, ), ParameterRule( name="num_ctx", - label=I18nObject(en_US="Size of context window"), + label=I18nObject(en_US="Size of context window", zh_Hans="上下文窗口大小"), type=ParameterType.INT, help=I18nObject( - en_US="Sets the size of the context window used to generate the next token. (Default: 2048)" + en_US="Sets the size of the context window used to generate the next token. (Default: 2048)", + zh_Hans="设置用于生成下一个标记的上下文窗口大小。(默认值:2048)", ), default=2048, min=1, ), ParameterRule( name="num_gpu", - label=I18nObject(en_US="GPU Layers"), + label=I18nObject(en_US="GPU Layers", zh_Hans="GPU 层数"), type=ParameterType.INT, help=I18nObject( en_US="The number of layers to offload to the GPU(s). " "On macOS it defaults to 1 to enable metal support, 0 to disable." "As long as a model fits into one gpu it stays in one. " - "It does not set the number of GPU(s). " + "It does not set the number of GPU(s). ", + zh_Hans="加载到 GPU 的层数。在 macOS 上,默认为 1 以启用 Metal 支持,设置为 0 则禁用。" + "只要模型适合一个 GPU,它就保留在其中。它不设置 GPU 的数量。", ), min=-1, default=1, ), ParameterRule( name="num_thread", - label=I18nObject(en_US="Num Thread"), + label=I18nObject(en_US="Num Thread", zh_Hans="线程数"), type=ParameterType.INT, help=I18nObject( en_US="Sets the number of threads to use during computation. " "By default, Ollama will detect this for optimal performance. " "It is recommended to set this value to the number of physical CPU cores " - "your system has (as opposed to the logical number of cores)." + "your system has (as opposed to the logical number of cores).", + zh_Hans="设置计算过程中使用的线程数。默认情况下,Ollama会检测以获得最佳性能。建议将此值设置为系统拥有的物理CPU核心数(而不是逻辑核心数)。", ), min=1, ), ParameterRule( name="repeat_last_n", - label=I18nObject(en_US="Repeat last N"), + label=I18nObject(en_US="Repeat last N", zh_Hans="回溯内容"), type=ParameterType.INT, help=I18nObject( en_US="Sets how far back for the model to look back to prevent repetition. " - "(Default: 64, 0 = disabled, -1 = num_ctx)" + "(Default: 64, 0 = disabled, -1 = num_ctx)", + zh_Hans="设置模型回溯多远的内容以防止重复。(默认值:64,0 = 禁用,-1 = num_ctx)", ), min=-1, ), ParameterRule( name="tfs_z", - label=I18nObject(en_US="TFS Z"), + label=I18nObject(en_US="TFS Z", zh_Hans="减少标记影响"), type=ParameterType.FLOAT, help=I18nObject( en_US="Tail free sampling is used to reduce the impact of less probable tokens " "from the output. A higher value (e.g., 2.0) will reduce the impact more, " - "while a value of 1.0 disables this setting. (default: 1)" + "while a value of 1.0 disables this setting. (default: 1)", + zh_Hans="用于减少输出中不太可能的标记的影响。较高的值(例如,2.0)会更多地减少这种影响,而1.0的值则会禁用此设置。(默认值:1)", ), precision=1, ), ParameterRule( name="seed", - label=I18nObject(en_US="Seed"), + label=I18nObject(en_US="Seed", zh_Hans="随机数种子"), type=ParameterType.INT, help=I18nObject( en_US="Sets the random number seed to use for generation. Setting this to " "a specific number will make the model generate the same text for " - "the same prompt. (Default: 0)" + "the same prompt. (Default: 0)", + zh_Hans="设置用于生成的随机数种子。将此设置为特定数字将使模型对相同的提示生成相同的文本。(默认值:0)", ), ), ParameterRule( name="keep_alive", - label=I18nObject(en_US="Keep Alive"), + label=I18nObject(en_US="Keep Alive", zh_Hans="模型存活时间"), type=ParameterType.STRING, help=I18nObject( en_US="Sets how long the model is kept in memory after generating a response. " "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours)." " A negative number keeps the model loaded indefinitely, and '0' unloads the model" " immediately after generating a response." - " Valid time units are 's','m','h'. (Default: 5m)" + " Valid time units are 's','m','h'. (Default: 5m)", + zh_Hans="设置模型在生成响应后在内存中保留的时间。" + "这必须是一个带有单位的持续时间字符串(例如,'10m' 表示10分钟,'24h' 表示24小时)。" + "负数表示无限期地保留模型,'0'表示在生成响应后立即卸载模型。" + "有效的时间单位有 's'(秒)、'm'(分钟)、'h'(小时)。(默认值:5m)", ), ), ParameterRule( name="format", - label=I18nObject(en_US="Format"), + label=I18nObject(en_US="Format", zh_Hans="返回格式"), type=ParameterType.STRING, help=I18nObject( - en_US="the format to return a response in. Currently the only accepted value is json." + en_US="the format to return a response in. Currently the only accepted value is json.", + zh_Hans="返回响应的格式。目前唯一接受的值是json。", ), options=["json"], ), diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index 5a8a754f72422..c2ffe653c8b70 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -205,7 +205,13 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode parameter_rules=[ ParameterRule( name=DefaultParameterName.TEMPERATURE.value, - label=I18nObject(en_US="Temperature"), + label=I18nObject(en_US="Temperature", zh_Hans="温度"), + help=I18nObject( + en_US="Kernel sampling threshold. Used to determine the randomness of the results." + "The higher the value, the stronger the randomness." + "The higher the possibility of getting different answers to the same question.", + zh_Hans="核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。", + ), type=ParameterType.FLOAT, default=float(credentials.get("temperature", 0.7)), min=0, @@ -214,7 +220,13 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ), ParameterRule( name=DefaultParameterName.TOP_P.value, - label=I18nObject(en_US="Top P"), + label=I18nObject(en_US="Top P", zh_Hans="Top P"), + help=I18nObject( + en_US="The probability threshold of the nucleus sampling method during the generation process." + "The larger the value is, the higher the randomness of generation will be." + "The smaller the value is, the higher the certainty of generation will be.", + zh_Hans="生成过程中核采样方法概率阈值。取值越大,生成的随机性越高;取值越小,生成的确定性越高。", + ), type=ParameterType.FLOAT, default=float(credentials.get("top_p", 1)), min=0, @@ -223,7 +235,12 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ), ParameterRule( name=DefaultParameterName.FREQUENCY_PENALTY.value, - label=I18nObject(en_US="Frequency Penalty"), + label=I18nObject(en_US="Frequency Penalty", zh_Hans="频率惩罚"), + help=I18nObject( + en_US="For controlling the repetition rate of words used by the model." + "Increasing this can reduce the repetition of the same words in the model's output.", + zh_Hans="用于控制模型已使用字词的重复率。 提高此项可以降低模型在输出中重复相同字词的重复度。", + ), type=ParameterType.FLOAT, default=float(credentials.get("frequency_penalty", 0)), min=-2, @@ -231,7 +248,12 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ), ParameterRule( name=DefaultParameterName.PRESENCE_PENALTY.value, - label=I18nObject(en_US="Presence Penalty"), + label=I18nObject(en_US="Presence Penalty", zh_Hans="存在惩罚"), + help=I18nObject( + en_US="Used to control the repetition rate when generating models." + "Increasing this can reduce the repetition rate of model generation.", + zh_Hans="用于控制模型生成时的重复度。提高此项可以降低模型生成的重复度。", + ), type=ParameterType.FLOAT, default=float(credentials.get("presence_penalty", 0)), min=-2, @@ -239,7 +261,10 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode ), ParameterRule( name=DefaultParameterName.MAX_TOKENS.value, - label=I18nObject(en_US="Max Tokens"), + label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"), + help=I18nObject( + en_US="Maximum length of tokens for the model response.", zh_Hans="模型回答的tokens的最大长度。" + ), type=ParameterType.INT, default=512, min=1, From 6c2fa8defc8a2c6a66be45ade7b13e2388fb4b69 Mon Sep 17 00:00:00 2001 From: HJY <1398145450@qq.com> Date: Sun, 22 Sep 2024 10:14:43 +0800 Subject: [PATCH 018/235] fix: form input add tabIndex (#8478) --- web/app/activate/activateForm.tsx | 2 ++ web/app/signin/normalForm.tsx | 2 ++ 2 files changed, 4 insertions(+) diff --git a/web/app/activate/activateForm.tsx b/web/app/activate/activateForm.tsx index 3b1eed6f09a5d..8e9691b35476d 100644 --- a/web/app/activate/activateForm.tsx +++ b/web/app/activate/activateForm.tsx @@ -143,6 +143,7 @@ const ActivateForm = () => { onChange={e => setName(e.target.value)} placeholder={t('login.namePlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'} + tabIndex={1} />
@@ -159,6 +160,7 @@ const ActivateForm = () => { onChange={e => setPassword(e.target.value)} placeholder={t('login.passwordPlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'} + tabIndex={2} />
{t('login.error.passwordInvalid')}
diff --git a/web/app/signin/normalForm.tsx b/web/app/signin/normalForm.tsx index 7f23c7d22e5ff..816df8007d9e7 100644 --- a/web/app/signin/normalForm.tsx +++ b/web/app/signin/normalForm.tsx @@ -217,6 +217,7 @@ const NormalForm = () => { autoComplete="email" placeholder={t('login.emailPlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm'} + tabIndex={1} /> @@ -241,6 +242,7 @@ const NormalForm = () => { autoComplete="current-password" placeholder={t('login.passwordPlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'} + tabIndex={2} />
+ +
+ + + { + selectedProvider === DataSourceProvider.fireCrawl + ? sources.find(source => source.provider === DataSourceProvider.fireCrawl) + ? ( + + ) + : ( + + ) + : sources.find(source => source.provider === DataSourceProvider.jinaReader) + ? ( + + ) + : ( + + ) + } ) } diff --git a/web/app/components/datasets/create/website/jina-reader/header.tsx b/web/app/components/datasets/create/website/jina-reader/header.tsx new file mode 100644 index 0000000000000..85014a30ee2b1 --- /dev/null +++ b/web/app/components/datasets/create/website/jina-reader/header.tsx @@ -0,0 +1,42 @@ +'use client' +import type { FC } from 'react' +import React from 'react' +import { useTranslation } from 'react-i18next' +import { Settings01 } from '@/app/components/base/icons/src/vender/line/general' +import { BookOpen01 } from '@/app/components/base/icons/src/vender/line/education' + +const I18N_PREFIX = 'datasetCreation.stepOne.website' + +type Props = { + onSetting: () => void +} + +const Header: FC = ({ + onSetting, +}) => { + const { t } = useTranslation() + + return ( + + ) +} +export default React.memo(Header) diff --git a/web/app/components/datasets/create/website/jina-reader/index.tsx b/web/app/components/datasets/create/website/jina-reader/index.tsx new file mode 100644 index 0000000000000..51d77d712140b --- /dev/null +++ b/web/app/components/datasets/create/website/jina-reader/index.tsx @@ -0,0 +1,232 @@ +'use client' +import type { FC } from 'react' +import React, { useCallback, useEffect, useState } from 'react' +import { useTranslation } from 'react-i18next' +import UrlInput from '../base/url-input' +import OptionsWrap from '../base/options-wrap' +import CrawledResult from '../base/crawled-result' +import Crawling from '../base/crawling' +import ErrorMessage from '../base/error-message' +import Header from './header' +import Options from './options' +import cn from '@/utils/classnames' +import { useModalContext } from '@/context/modal-context' +import Toast from '@/app/components/base/toast' +import { checkJinaReaderTaskStatus, createJinaReaderTask } from '@/service/datasets' +import { sleep } from '@/utils' +import type { CrawlOptions, CrawlResultItem } from '@/models/datasets' + +const ERROR_I18N_PREFIX = 'common.errorMsg' +const I18N_PREFIX = 'datasetCreation.stepOne.website' + +type Props = { + onPreview: (payload: CrawlResultItem) => void + checkedCrawlResult: CrawlResultItem[] + onCheckedCrawlResultChange: (payload: CrawlResultItem[]) => void + onJobIdChange: (jobId: string) => void + crawlOptions: CrawlOptions + onCrawlOptionsChange: (payload: CrawlOptions) => void +} + +enum Step { + init = 'init', + running = 'running', + finished = 'finished', +} + +const JinaReader: FC = ({ + onPreview, + checkedCrawlResult, + onCheckedCrawlResultChange, + onJobIdChange, + crawlOptions, + onCrawlOptionsChange, +}) => { + const { t } = useTranslation() + const [step, setStep] = useState(Step.init) + const [controlFoldOptions, setControlFoldOptions] = useState(0) + useEffect(() => { + if (step !== Step.init) + setControlFoldOptions(Date.now()) + }, [step]) + const { setShowAccountSettingModal } = useModalContext() + const handleSetting = useCallback(() => { + setShowAccountSettingModal({ + payload: 'data-source', + }) + }, [setShowAccountSettingModal]) + + const checkValid = useCallback((url: string) => { + let errorMsg = '' + if (!url) { + errorMsg = t(`${ERROR_I18N_PREFIX}.fieldRequired`, { + field: 'url', + }) + } + + if (!errorMsg && !((url.startsWith('http://') || url.startsWith('https://')))) + errorMsg = t(`${ERROR_I18N_PREFIX}.urlError`) + + if (!errorMsg && (crawlOptions.limit === null || crawlOptions.limit === undefined || crawlOptions.limit === '')) { + errorMsg = t(`${ERROR_I18N_PREFIX}.fieldRequired`, { + field: t(`${I18N_PREFIX}.limit`), + }) + } + + return { + isValid: !errorMsg, + errorMsg, + } + }, [crawlOptions, t]) + + const isInit = step === Step.init + const isCrawlFinished = step === Step.finished + const isRunning = step === Step.running + const [crawlResult, setCrawlResult] = useState<{ + current: number + total: number + data: CrawlResultItem[] + time_consuming: number | string + } | undefined>(undefined) + const [crawlErrorMessage, setCrawlErrorMessage] = useState('') + const showError = isCrawlFinished && crawlErrorMessage + + const waitForCrawlFinished = useCallback(async (jobId: string) => { + try { + const res = await checkJinaReaderTaskStatus(jobId) as any + console.log('res', res) + if (res.status === 'completed') { + return { + isError: false, + data: { + ...res, + total: Math.min(res.total, parseFloat(crawlOptions.limit as string)), + }, + } + } + if (res.status === 'failed' || !res.status) { + return { + isError: true, + errorMessage: res.message, + data: { + data: [], + }, + } + } + // update the progress + setCrawlResult({ + ...res, + total: Math.min(res.total, parseFloat(crawlOptions.limit as string)), + }) + onCheckedCrawlResultChange(res.data || []) // default select the crawl result + await sleep(2500) + return await waitForCrawlFinished(jobId) + } + catch (e: any) { + const errorBody = await e.json() + return { + isError: true, + errorMessage: errorBody.message, + data: { + data: [], + }, + } + } + }, [crawlOptions.limit]) + + const handleRun = useCallback(async (url: string) => { + const { isValid, errorMsg } = checkValid(url) + if (!isValid) { + Toast.notify({ + message: errorMsg!, + type: 'error', + }) + return + } + setStep(Step.running) + try { + const startTime = Date.now() + const res = await createJinaReaderTask({ + url, + options: crawlOptions, + }) as any + + if (res.data) { + const data = { + current: 1, + total: 1, + data: [{ + title: res.data.title, + markdown: res.data.content, + description: res.data.description, + source_url: res.data.url, + }], + time_consuming: (Date.now() - startTime) / 1000, + } + setCrawlResult(data) + onCheckedCrawlResultChange(data.data || []) + setCrawlErrorMessage('') + } + else if (res.job_id) { + const jobId = res.job_id + onJobIdChange(jobId) + const { isError, data, errorMessage } = await waitForCrawlFinished(jobId) + if (isError) { + setCrawlErrorMessage(errorMessage || t(`${I18N_PREFIX}.unknownError`)) + } + else { + setCrawlResult(data) + onCheckedCrawlResultChange(data.data || []) // default select the crawl result + setCrawlErrorMessage('') + } + } + } + catch (e) { + setCrawlErrorMessage(t(`${I18N_PREFIX}.unknownError`)!) + console.log(e) + } + finally { + setStep(Step.finished) + } + }, [checkValid, crawlOptions, onJobIdChange, t, waitForCrawlFinished]) + + return ( +
+
+
+ + + + + + {!isInit && ( +
+ {isRunning + && } + {showError && ( + + )} + {isCrawlFinished && !showError + && + } +
+ )} +
+
+ ) +} +export default React.memo(JinaReader) diff --git a/web/app/components/datasets/create/website/jina-reader/options.tsx b/web/app/components/datasets/create/website/jina-reader/options.tsx new file mode 100644 index 0000000000000..52cfaa8b3b40f --- /dev/null +++ b/web/app/components/datasets/create/website/jina-reader/options.tsx @@ -0,0 +1,59 @@ +'use client' +import type { FC } from 'react' +import React, { useCallback } from 'react' +import { useTranslation } from 'react-i18next' +import CheckboxWithLabel from '../base/checkbox-with-label' +import Field from '../base/field' +import cn from '@/utils/classnames' +import type { CrawlOptions } from '@/models/datasets' + +const I18N_PREFIX = 'datasetCreation.stepOne.website' + +type Props = { + className?: string + payload: CrawlOptions + onChange: (payload: CrawlOptions) => void +} + +const Options: FC = ({ + className = '', + payload, + onChange, +}) => { + const { t } = useTranslation() + + const handleChange = useCallback((key: keyof CrawlOptions) => { + return (value: any) => { + onChange({ + ...payload, + [key]: value, + }) + } + }, [payload, onChange]) + return ( +
+ + +
+ +
+
+ ) +} +export default React.memo(Options) diff --git a/web/app/components/datasets/create/website/no-data.tsx b/web/app/components/datasets/create/website/no-data.tsx index 13e5ee7dfbd50..8a508a48c6bb8 100644 --- a/web/app/components/datasets/create/website/no-data.tsx +++ b/web/app/components/datasets/create/website/no-data.tsx @@ -2,35 +2,56 @@ import type { FC } from 'react' import React from 'react' import { useTranslation } from 'react-i18next' +import s from './index.module.css' import { Icon3Dots } from '@/app/components/base/icons/src/vender/line/others' import Button from '@/app/components/base/button' +import { DataSourceProvider } from '@/models/common' const I18N_PREFIX = 'datasetCreation.stepOne.website' type Props = { onConfig: () => void + provider: DataSourceProvider } const NoData: FC = ({ onConfig, + provider, }) => { const { t } = useTranslation() + const providerConfig = { + [DataSourceProvider.jinaReader]: { + emoji: , + title: t(`${I18N_PREFIX}.jinaReaderNotConfigured`), + description: t(`${I18N_PREFIX}.jinaReaderNotConfiguredDescription`), + }, + [DataSourceProvider.fireCrawl]: { + emoji: '🔥', + title: t(`${I18N_PREFIX}.fireCrawlNotConfigured`), + description: t(`${I18N_PREFIX}.fireCrawlNotConfiguredDescription`), + }, + } + + const currentProvider = providerConfig[provider] + return ( -
-
- 🔥 -
-
- {t(`${I18N_PREFIX}.fireCrawlNotConfigured`)} -
- {t(`${I18N_PREFIX}.fireCrawlNotConfiguredDescription`)} + <> +
+
+ {currentProvider.emoji} +
+
+ {currentProvider.title} +
+ {currentProvider.description} +
+
- -
+ ) } export default React.memo(NoData) diff --git a/web/app/components/header/account-setting/data-source-page/data-source-website/config-firecrawl-modal.tsx b/web/app/components/header/account-setting/data-source-page/data-source-website/config-firecrawl-modal.tsx index d68fc79b0d5a7..a4a8b9b63722b 100644 --- a/web/app/components/header/account-setting/data-source-page/data-source-website/config-firecrawl-modal.tsx +++ b/web/app/components/header/account-setting/data-source-page/data-source-website/config-firecrawl-modal.tsx @@ -9,7 +9,7 @@ import { import { Lock01 } from '@/app/components/base/icons/src/vender/solid/security' import Button from '@/app/components/base/button' import type { FirecrawlConfig } from '@/models/common' -import Field from '@/app/components/datasets/create/website/firecrawl/base/field' +import Field from '@/app/components/datasets/create/website/base/field' import Toast from '@/app/components/base/toast' import { createDataSourceApiKeyBinding } from '@/service/datasets' import { LinkExternal02 } from '@/app/components/base/icons/src/vender/line/general' diff --git a/web/app/components/header/account-setting/data-source-page/data-source-website/config-jina-reader-modal.tsx b/web/app/components/header/account-setting/data-source-page/data-source-website/config-jina-reader-modal.tsx new file mode 100644 index 0000000000000..c6d6ad02565cb --- /dev/null +++ b/web/app/components/header/account-setting/data-source-page/data-source-website/config-jina-reader-modal.tsx @@ -0,0 +1,140 @@ +'use client' +import type { FC } from 'react' +import React, { useCallback, useState } from 'react' +import { useTranslation } from 'react-i18next' +import { + PortalToFollowElem, + PortalToFollowElemContent, +} from '@/app/components/base/portal-to-follow-elem' +import { Lock01 } from '@/app/components/base/icons/src/vender/solid/security' +import Button from '@/app/components/base/button' +import { DataSourceProvider } from '@/models/common' +import Field from '@/app/components/datasets/create/website/base/field' +import Toast from '@/app/components/base/toast' +import { createDataSourceApiKeyBinding } from '@/service/datasets' +import { LinkExternal02 } from '@/app/components/base/icons/src/vender/line/general' +type Props = { + onCancel: () => void + onSaved: () => void +} + +const I18N_PREFIX = 'datasetCreation.jinaReader' + +const ConfigJinaReaderModal: FC = ({ + onCancel, + onSaved, +}) => { + const { t } = useTranslation() + const [isSaving, setIsSaving] = useState(false) + const [apiKey, setApiKey] = useState('') + + const handleSave = useCallback(async () => { + if (isSaving) + return + let errorMsg = '' + if (!errorMsg) { + if (!apiKey) { + errorMsg = t('common.errorMsg.fieldRequired', { + field: 'API Key', + }) + } + } + + if (errorMsg) { + Toast.notify({ + type: 'error', + message: errorMsg, + }) + return + } + const postData = { + category: 'website', + provider: DataSourceProvider.jinaReader, + credentials: { + auth_type: 'bearer', + config: { + api_key: apiKey, + }, + }, + } + try { + setIsSaving(true) + await createDataSourceApiKeyBinding(postData) + Toast.notify({ + type: 'success', + message: t('common.api.success'), + }) + } + finally { + setIsSaving(false) + } + + onSaved() + }, [apiKey, onSaved, t, isSaving]) + + return ( + + +
+
+
+
+
{t(`${I18N_PREFIX}.configJinaReader`)}
+
+ +
+ setApiKey(value as string)} + placeholder={t(`${I18N_PREFIX}.apiKeyPlaceholder`)!} + /> +
+
+ + {t(`${I18N_PREFIX}.getApiKeyLinkText`)} + + +
+ + +
+ +
+
+
+
+ + {t('common.modelProvider.encrypted.front')} + + PKCS1_OAEP + + {t('common.modelProvider.encrypted.back')} +
+
+
+
+
+
+ ) +} +export default React.memo(ConfigJinaReaderModal) diff --git a/web/app/components/header/account-setting/data-source-page/data-source-website/index.tsx b/web/app/components/header/account-setting/data-source-page/data-source-website/index.tsx index 21f7660ef1dd1..628510c5dd387 100644 --- a/web/app/components/header/account-setting/data-source-page/data-source-website/index.tsx +++ b/web/app/components/header/account-setting/data-source-page/data-source-website/index.tsx @@ -2,11 +2,12 @@ import type { FC } from 'react' import React, { useCallback, useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' -import { useBoolean } from 'ahooks' import Panel from '../panel' import { DataSourceType } from '../panel/types' import ConfigFirecrawlModal from './config-firecrawl-modal' +import ConfigJinaReaderModal from './config-jina-reader-modal' import cn from '@/utils/classnames' +import s from '@/app/components/datasets/create/website/index.module.css' import { fetchDataSources, removeDataSourceApiKeyBinding } from '@/service/datasets' import type { @@ -19,9 +20,11 @@ import { } from '@/models/common' import Toast from '@/app/components/base/toast' -type Props = {} +type Props = { + provider: DataSourceProvider +} -const DataSourceWebsite: FC = () => { +const DataSourceWebsite: FC = ({ provider }) => { const { t } = useTranslation() const { isCurrentWorkspaceManager } = useAppContext() const [sources, setSources] = useState([]) @@ -36,22 +39,26 @@ const DataSourceWebsite: FC = () => { // eslint-disable-next-line react-hooks/exhaustive-deps }, []) - const [isShowConfig, { - setTrue: showConfig, - setFalse: hideConfig, - }] = useBoolean(false) + const [configTarget, setConfigTarget] = useState(null) + const showConfig = useCallback((provider: DataSourceProvider) => { + setConfigTarget(provider) + }, [setConfigTarget]) + + const hideConfig = useCallback(() => { + setConfigTarget(null) + }, [setConfigTarget]) const handleAdded = useCallback(() => { checkSetApiKey() hideConfig() }, [checkSetApiKey, hideConfig]) - const getIdByProvider = (provider: string): string | undefined => { + const getIdByProvider = (provider: DataSourceProvider): string | undefined => { const source = sources.find(item => item.provider === provider) return source?.id } - const handleRemove = useCallback((provider: string) => { + const handleRemove = useCallback((provider: DataSourceProvider) => { return async () => { const dataSourceId = getIdByProvider(provider) if (dataSourceId) { @@ -69,22 +76,34 @@ const DataSourceWebsite: FC = () => { <> 0} - onConfigure={showConfig} + provider={provider} + isConfigured={sources.find(item => item.provider === provider) !== undefined} + onConfigure={() => showConfig(provider)} readOnly={!isCurrentWorkspaceManager} - configuredList={sources.map(item => ({ + configuredList={sources.filter(item => item.provider === provider).map(item => ({ id: item.id, logo: ({ className }: { className: string }) => ( -
🔥
+ item.provider === DataSourceProvider.fireCrawl + ? ( +
🔥
+ ) + : ( +
+ +
+ ) ), - name: 'Firecrawl', + name: item.provider === DataSourceProvider.fireCrawl ? 'Firecrawl' : 'Jina Reader', isActive: true, }))} - onRemove={handleRemove(DataSourceProvider.fireCrawl)} + onRemove={handleRemove(provider)} /> - {isShowConfig && ( + {configTarget === DataSourceProvider.fireCrawl && ( )} + {configTarget === DataSourceProvider.jinaReader && ( + + )} ) diff --git a/web/app/components/header/account-setting/data-source-page/index.tsx b/web/app/components/header/account-setting/data-source-page/index.tsx index ede83152b223e..c3da977ca4e20 100644 --- a/web/app/components/header/account-setting/data-source-page/index.tsx +++ b/web/app/components/header/account-setting/data-source-page/index.tsx @@ -3,6 +3,7 @@ import { useTranslation } from 'react-i18next' import DataSourceNotion from './data-source-notion' import DataSourceWebsite from './data-source-website' import { fetchDataSource } from '@/service/common' +import { DataSourceProvider } from '@/models/common' export default function DataSourcePage() { const { t } = useTranslation() @@ -13,7 +14,8 @@ export default function DataSourcePage() {
{t('common.dataSource.add')}
- + +
) } diff --git a/web/app/components/header/account-setting/data-source-page/panel/index.tsx b/web/app/components/header/account-setting/data-source-page/panel/index.tsx index 988aedcaf7476..4a810020b440e 100644 --- a/web/app/components/header/account-setting/data-source-page/panel/index.tsx +++ b/web/app/components/header/account-setting/data-source-page/panel/index.tsx @@ -8,10 +8,12 @@ import ConfigItem from './config-item' import s from './style.module.css' import { DataSourceType } from './types' +import { DataSourceProvider } from '@/models/common' import cn from '@/utils/classnames' type Props = { type: DataSourceType + provider: DataSourceProvider isConfigured: boolean onConfigure: () => void readOnly: boolean @@ -25,6 +27,7 @@ type Props = { const Panel: FC = ({ type, + provider, isConfigured, onConfigure, readOnly, @@ -46,7 +49,7 @@ const Panel: FC = ({
{t(`common.dataSource.${type}.title`)}
{isWebsite && (
- {t('common.dataSource.website.with')} 🔥 Firecrawl + {t('common.dataSource.website.with')} { provider === DataSourceProvider.fireCrawl ? '🔥 Firecrawl' : 'Jina Reader'}
)}
diff --git a/web/i18n/en-US/dataset-creation.ts b/web/i18n/en-US/dataset-creation.ts index 32f9d596ca6ac..1849b12757f9d 100644 --- a/web/i18n/en-US/dataset-creation.ts +++ b/web/i18n/en-US/dataset-creation.ts @@ -16,6 +16,11 @@ const translation = { apiKeyPlaceholder: 'API key from firecrawl.dev', getApiKeyLinkText: 'Get your API key from firecrawl.dev', }, + jinaReader: { + configJinaReader: 'Configure Jina Reader', + apiKeyPlaceholder: 'API key from jina.ai', + getApiKeyLinkText: 'Get your free API key at jina.ai', + }, stepOne: { filePreview: 'File Preview', pagePreview: 'Page Preview', @@ -56,13 +61,21 @@ const translation = { failed: 'Creation failed', }, website: { + chooseProvider: 'Select a provider', fireCrawlNotConfigured: 'Firecrawl is not configured', fireCrawlNotConfiguredDescription: 'Configure Firecrawl with API key to use it.', + jinaReaderNotConfigured: 'Jina Reader is not configured', + jinaReaderNotConfiguredDescription: 'Set up Jina Reader by entering your free API key for access.', configure: 'Configure', run: 'Run', firecrawlTitle: 'Extract web content with 🔥Firecrawl', firecrawlDoc: 'Firecrawl docs', firecrawlDocLink: 'https://docs.dify.ai/guides/knowledge-base/sync-from-website', + jinaReaderTitle: 'Convert the entire site to Markdown', + jinaReaderDoc: 'Learn more about Jina Reader', + jinaReaderDocLink: 'https://jina.ai/reader', + useSitemap: 'Use sitemap', + useSitemapTooltip: 'Follow the sitemap to crawl the site. If not, Jina Reader will crawl iteratively based on page relevance, yielding fewer but higher-quality pages.', options: 'Options', crawlSubPage: 'Crawl sub-pages', limit: 'Limit', @@ -70,7 +83,7 @@ const translation = { excludePaths: 'Exclude paths', includeOnlyPaths: 'Include only paths', extractOnlyMainContent: 'Extract only main content (no headers, navs, footers, etc.)', - exceptionErrorTitle: 'An exception occurred while running Firecrawl job:', + exceptionErrorTitle: 'An exception occurred while running crawling job:', unknownError: 'Unknown error', totalPageScraped: 'Total pages scraped:', selectAll: 'Select All', diff --git a/web/i18n/zh-Hans/dataset-creation.ts b/web/i18n/zh-Hans/dataset-creation.ts index 78f5170791877..4f6786a1919b0 100644 --- a/web/i18n/zh-Hans/dataset-creation.ts +++ b/web/i18n/zh-Hans/dataset-creation.ts @@ -16,6 +16,11 @@ const translation = { apiKeyPlaceholder: '从 firecrawl.dev 获取 API Key', getApiKeyLinkText: '从 firecrawl.dev 获取您的 API Key', }, + jinaReader: { + configJinaReader: '配置 Jina Reader', + apiKeyPlaceholder: '从 jina.ai 获取 API Key', + getApiKeyLinkText: '从 jina.ai 获取您的免费 API Key', + }, stepOne: { filePreview: '文件预览', pagePreview: '页面预览', @@ -56,13 +61,21 @@ const translation = { failed: '创建失败', }, website: { + chooseProvider: '选择工具', fireCrawlNotConfigured: 'Firecrawl 未配置', fireCrawlNotConfiguredDescription: '请配置 Firecrawl 的 API 密钥以使用它。', + jinaReaderNotConfigured: 'Jina Reader 未配置', + jinaReaderNotConfiguredDescription: '请配置 Jina Reader 的免费 API 密钥以访问它。', configure: '配置', run: '运行', firecrawlTitle: '使用 🔥Firecrawl 提取网页内容', firecrawlDoc: 'Firecrawl 文档', firecrawlDocLink: 'https://docs.dify.ai/v/zh-hans/guides/knowledge-base/sync-from-website', + jinaReaderTitle: '将整个站点内容转换为 Markdown 格式', + jinaReaderDoc: '了解更多关于 Jina Reader', + jinaReaderDocLink: 'https://jina.ai/reader', + useSitemap: '使用 sitemap', + useSitemapTooltip: '根据 sitemap 爬取站点。否则,Jina Reader 将基于页面相关性迭代爬取,抓取较少的页面,但质量更高。', options: '选项', crawlSubPage: '爬取子页面', limit: '限制数量', @@ -70,7 +83,7 @@ const translation = { excludePaths: '排除路径', includeOnlyPaths: '仅包含路径', extractOnlyMainContent: '仅提取主要内容(无标题、导航、页脚等)', - exceptionErrorTitle: '运行 Firecrawl 时发生异常:', + exceptionErrorTitle: '运行时发生异常:', unknownError: '未知错误', totalPageScraped: '抓取页面总数:', selectAll: '全选', diff --git a/web/models/common.ts b/web/models/common.ts index 78f09bee09649..204e89ed9b6b7 100644 --- a/web/models/common.ts +++ b/web/models/common.ts @@ -177,6 +177,7 @@ export enum DataSourceCategory { } export enum DataSourceProvider { fireCrawl = 'firecrawl', + jinaReader = 'jinareader', } export type FirecrawlConfig = { diff --git a/web/models/datasets.ts b/web/models/datasets.ts index 23d1fe6136b85..9358f6fcb9fb4 100644 --- a/web/models/datasets.ts +++ b/web/models/datasets.ts @@ -49,6 +49,7 @@ export type CrawlOptions = { excludes: string limit: number | string max_depth: number | string + use_sitemap: boolean } export type CrawlResultItem = { diff --git a/web/service/datasets.ts b/web/service/datasets.ts index 4ca269a7d6e21..689ac7c4035f1 100644 --- a/web/service/datasets.ts +++ b/web/service/datasets.ts @@ -23,7 +23,7 @@ import type { SegmentsResponse, createDocumentResponse, } from '@/models/datasets' -import type { CommonResponse, DataSourceNotionWorkspace } from '@/models/common' +import { type CommonResponse, type DataSourceNotionWorkspace, DataSourceProvider } from '@/models/common' import type { ApiKeysListResponse, CreateApiKeyResponse, @@ -253,7 +253,7 @@ export const createFirecrawlTask: Fetcher> = return post('website/crawl', { body: { ...body, - provider: 'firecrawl', + provider: DataSourceProvider.fireCrawl, }, }) } @@ -261,7 +261,26 @@ export const createFirecrawlTask: Fetcher> = export const checkFirecrawlTaskStatus: Fetcher = (jobId: string) => { return get(`website/crawl/status/${jobId}`, { params: { - provider: 'firecrawl', + provider: DataSourceProvider.fireCrawl, + }, + }, { + silent: true, + }) +} + +export const createJinaReaderTask: Fetcher> = (body) => { + return post('website/crawl', { + body: { + ...body, + provider: DataSourceProvider.jinaReader, + }, + }) +} + +export const checkJinaReaderTaskStatus: Fetcher = (jobId: string) => { + return get(`website/crawl/status/${jobId}`, { + params: { + provider: 'jinareader', }, }, { silent: true, From 3af65b2f452ae765cde4916507350b8c59ea2aea Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 30 Sep 2024 11:12:26 +0800 Subject: [PATCH 109/235] feat(api): add version comparison logic (#8902) --- api/controllers/console/version.py | 49 +++++++++++++++++-- .../controllers/test_compare_versions.py | 38 ++++++++++++++ 2 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 api/tests/unit_tests/controllers/test_compare_versions.py diff --git a/api/controllers/console/version.py b/api/controllers/console/version.py index 76adbfe6a9d6c..deda1a0d02730 100644 --- a/api/controllers/console/version.py +++ b/api/controllers/console/version.py @@ -38,11 +38,52 @@ def get(self): return result content = json.loads(response.content) - result["version"] = content["version"] - result["release_date"] = content["releaseDate"] - result["release_notes"] = content["releaseNotes"] - result["can_auto_update"] = content["canAutoUpdate"] + if _has_new_version(latest_version=content["version"], current_version=f"{args.get('current_version')}"): + result["version"] = content["version"] + result["release_date"] = content["releaseDate"] + result["release_notes"] = content["releaseNotes"] + result["can_auto_update"] = content["canAutoUpdate"] return result +def _has_new_version(*, latest_version: str, current_version: str) -> bool: + def parse_version(version: str) -> tuple: + # Split version into parts and pre-release suffix if any + parts = version.split("-") + version_parts = parts[0].split(".") + pre_release = parts[1] if len(parts) > 1 else None + + # Validate version format + if len(version_parts) != 3: + raise ValueError(f"Invalid version format: {version}") + + try: + # Convert version parts to integers + major, minor, patch = map(int, version_parts) + return (major, minor, patch, pre_release) + except ValueError: + raise ValueError(f"Invalid version format: {version}") + + latest = parse_version(latest_version) + current = parse_version(current_version) + + # Compare major, minor, and patch versions + for latest_part, current_part in zip(latest[:3], current[:3]): + if latest_part > current_part: + return True + elif latest_part < current_part: + return False + + # If versions are equal, check pre-release suffixes + if latest[3] is None and current[3] is not None: + return True + elif latest[3] is not None and current[3] is None: + return False + elif latest[3] is not None and current[3] is not None: + # Simple string comparison for pre-release versions + return latest[3] > current[3] + + return False + + api.add_resource(VersionApi, "/version") diff --git a/api/tests/unit_tests/controllers/test_compare_versions.py b/api/tests/unit_tests/controllers/test_compare_versions.py new file mode 100644 index 0000000000000..87902b6d44e66 --- /dev/null +++ b/api/tests/unit_tests/controllers/test_compare_versions.py @@ -0,0 +1,38 @@ +import pytest + +from controllers.console.version import _has_new_version + + +@pytest.mark.parametrize( + ("latest_version", "current_version", "expected"), + [ + ("1.0.1", "1.0.0", True), + ("1.1.0", "1.0.0", True), + ("2.0.0", "1.9.9", True), + ("1.0.0", "1.0.0", False), + ("1.0.0", "1.0.1", False), + ("1.0.0", "2.0.0", False), + ("1.0.1", "1.0.0-beta", True), + ("1.0.0", "1.0.0-alpha", True), + ("1.0.0-beta", "1.0.0-alpha", True), + ("1.0.0", "1.0.0-rc1", True), + ("1.0.0", "0.9.9", True), + ("1.0.0", "1.0.0-dev", True), + ], +) +def test_has_new_version(latest_version, current_version, expected): + assert _has_new_version(latest_version=latest_version, current_version=current_version) == expected + + +def test_has_new_version_invalid_input(): + with pytest.raises(ValueError): + _has_new_version(latest_version="1.0", current_version="1.0.0") + + with pytest.raises(ValueError): + _has_new_version(latest_version="1.0.0", current_version="1.0") + + with pytest.raises(ValueError): + _has_new_version(latest_version="invalid", current_version="1.0.0") + + with pytest.raises(ValueError): + _has_new_version(latest_version="1.0.0", current_version="invalid") From ada9d408ac66bc8775d0a2c72568c43666946b1a Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 30 Sep 2024 12:48:58 +0800 Subject: [PATCH 110/235] refactor(api/variables): VariableError as a ValueError. (#8554) --- api/core/app/segments/exc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/app/segments/exc.py b/api/core/app/segments/exc.py index d15d6d500ffa4..5cf67c3baccac 100644 --- a/api/core/app/segments/exc.py +++ b/api/core/app/segments/exc.py @@ -1,2 +1,2 @@ -class VariableError(Exception): +class VariableError(ValueError): pass From 503561f464c50a9a8469614603407f8accfd025f Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:52:18 +0800 Subject: [PATCH 111/235] fix: fix the data validation consistency issue in keyword content review (#8908) --- api/core/moderation/keywords/keywords.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/api/core/moderation/keywords/keywords.py b/api/core/moderation/keywords/keywords.py index dc6a7ec5644a7..4846da8f93076 100644 --- a/api/core/moderation/keywords/keywords.py +++ b/api/core/moderation/keywords/keywords.py @@ -18,8 +18,12 @@ def validate_config(cls, tenant_id: str, config: dict) -> None: if not config.get("keywords"): raise ValueError("keywords is required") - if len(config.get("keywords")) > 1000: - raise ValueError("keywords length must be less than 1000") + if len(config.get("keywords")) > 10000: + raise ValueError("keywords length must be less than 10000") + + keywords_row_len = config["keywords"].split("\n") + if len(keywords_row_len) > 100: + raise ValueError("the number of rows for the keywords must be less than 100") def moderation_for_inputs(self, inputs: dict, query: str = "") -> ModerationInputsResult: flagged = False From 77aef9ff1db220dfbab5da6e48061051b98b7cbd Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:55:01 +0800 Subject: [PATCH 112/235] refactor: optimize the calculation of rerank threshold and the logic for forbidden characters in model_uid (#8879) --- .../model_providers/xinference/llm/llm.py | 3 ++- .../model_providers/xinference/rerank/rerank.py | 8 +++----- .../xinference/speech2text/speech2text.py | 3 ++- .../xinference/text_embedding/text_embedding.py | 4 ++-- .../model_providers/xinference/tts/tts.py | 4 ++-- .../model_providers/xinference/xinference_helper.py | 13 +++++++++++++ 6 files changed, 24 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index 286640079b02a..0c9d08679ad04 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -59,6 +59,7 @@ from core.model_runtime.model_providers.xinference.xinference_helper import ( XinferenceHelper, XinferenceModelExtraParameter, + validate_model_uid, ) from core.model_runtime.utils import helper @@ -114,7 +115,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: } """ try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: + if not validate_model_uid(credentials): raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") extra_param = XinferenceHelper.get_xinference_extra_parameter( diff --git a/api/core/model_runtime/model_providers/xinference/rerank/rerank.py b/api/core/model_runtime/model_providers/xinference/rerank/rerank.py index 8f18bc42d2339..6368cd76dc97b 100644 --- a/api/core/model_runtime/model_providers/xinference/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/xinference/rerank/rerank.py @@ -15,6 +15,7 @@ ) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.rerank_model import RerankModel +from core.model_runtime.model_providers.xinference.xinference_helper import validate_model_uid class XinferenceRerankModel(RerankModel): @@ -77,10 +78,7 @@ def _invoke( ) # score threshold check - if score_threshold is not None: - if result["relevance_score"] >= score_threshold: - rerank_documents.append(rerank_document) - else: + if score_threshold is None or result["relevance_score"] >= score_threshold: rerank_documents.append(rerank_document) return RerankResult(model=model, docs=rerank_documents) @@ -94,7 +92,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: :return: """ try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: + if not validate_model_uid(credentials): raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") credentials["server_url"] = credentials["server_url"].removesuffix("/") diff --git a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py index a6c5b8a0a571e..c5ad38391185a 100644 --- a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py +++ b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py @@ -14,6 +14,7 @@ ) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel +from core.model_runtime.model_providers.xinference.xinference_helper import validate_model_uid class XinferenceSpeech2TextModel(Speech2TextModel): @@ -42,7 +43,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: :return: """ try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: + if not validate_model_uid(credentials): raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") credentials["server_url"] = credentials["server_url"].removesuffix("/") diff --git a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py index 16272391320d5..ddc21b365c1b7 100644 --- a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py @@ -17,7 +17,7 @@ ) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper +from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper, validate_model_uid class XinferenceTextEmbeddingModel(TextEmbeddingModel): @@ -110,7 +110,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: :return: """ try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: + if not validate_model_uid(credentials): raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/xinference/tts/tts.py b/api/core/model_runtime/model_providers/xinference/tts/tts.py index 81dbe397d2f10..6290e8551d802 100644 --- a/api/core/model_runtime/model_providers/xinference/tts/tts.py +++ b/api/core/model_runtime/model_providers/xinference/tts/tts.py @@ -15,7 +15,7 @@ ) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.tts_model import TTSModel -from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper +from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper, validate_model_uid class XinferenceText2SpeechModel(TTSModel): @@ -70,7 +70,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: :return: """ try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: + if not validate_model_uid(credentials): raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") credentials["server_url"] = credentials["server_url"].removesuffix("/") diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py index 619ee1492a927..baa3ccbe8adbc 100644 --- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py +++ b/api/core/model_runtime/model_providers/xinference/xinference_helper.py @@ -132,3 +132,16 @@ def _get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: st context_length=context_length, model_family=model_family, ) + + +def validate_model_uid(credentials: dict) -> bool: + """ + Validate the model_uid within the credentials dictionary to ensure it does not + contain forbidden characters ("/", "?", "#"). + + param credentials: model credentials + :return: True if the model_uid does not contain forbidden characters ("/", "?", "#"), else False. + """ + forbidden_characters = ["/", "?", "#"] + model_uid = credentials.get("model_uid", "") + return not any(char in forbidden_characters for char in model_uid) From 9d221a5e19f8613fa1a47984649378b22a0fd66e Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Mon, 30 Sep 2024 15:38:43 +0800 Subject: [PATCH 113/235] external knowledge api (#8913) Co-authored-by: Yi --- api/controllers/console/__init__.py | 11 +- api/controllers/console/datasets/datasets.py | 54 +- api/controllers/console/datasets/external.py | 239 +++ .../console/datasets/hit_testing.py | 2 + .../service_api/dataset/dataset.py | 27 +- .../index_tool_callback_handler.py | 2 +- api/core/rag/datasource/retrieval_service.py | 14 + api/core/rag/entities/context_entities.py | 10 + api/core/rag/models/document.py | 2 + api/core/rag/rerank/rerank_model.py | 17 +- api/core/rag/retrieval/dataset_retrieval.py | 300 ++-- .../knowledge_retrieval_node.py | 44 +- api/fields/dataset_fields.py | 13 + api/fields/external_dataset_fields.py | 11 + ...-6af6a521a53e_update_retrieval_resource.py | 48 + ...434-33f5fac87f29_external_knowledge_api.py | 73 + ...fca025d3b60f_add_dataset_retrival_model.py | 2 +- api/models/dataset.py | 106 ++ api/models/model.py | 6 +- api/poetry.lock | 1542 +++++++++-------- api/pyproject.toml | 1 + api/schedule/clean_unused_messages_task.py | 92 + api/services/dataset_service.py | 191 +- .../external_knowledge_entities.py | 26 + api/services/external_knowledge_service.py | 274 +++ api/services/hit_testing_service.py | 68 +- api/tasks/external_document_indexing_task.py | 93 + .../[datasetId]/layout.tsx | 26 +- web/app/(commonLayout)/datasets/Container.tsx | 20 +- .../(commonLayout)/datasets/DatasetCard.tsx | 29 +- .../datasets/NewDatasetCard.tsx | 27 +- .../(commonLayout)/datasets/connect/page.tsx | 8 + web/app/(commonLayout)/datasets/layout.tsx | 14 + web/app/(commonLayout)/datasets/page.tsx | 4 +- web/app/(commonLayout)/datasets/store.ts | 11 + web/app/components/app-sidebar/basic.tsx | 7 +- web/app/components/app-sidebar/index.tsx | 4 +- .../dataset-config/card-item/item.tsx | 10 +- .../params-config/config-content.tsx | 20 +- .../dataset-config/params-config/index.tsx | 15 +- .../dataset-config/select-dataset/index.tsx | 7 +- .../dataset-config/settings-modal/index.tsx | 148 +- .../components/base/corner-label/index.tsx | 21 + .../solid/development/api-connection-mod.svg | 5 + .../assets/vender/solid/shapes/corner.svg | 3 + .../solid/development/ApiConnectionMod.json | 38 + .../solid/development/ApiConnectionMod.tsx | 16 + .../src/vender/solid/development/index.ts | 1 + .../icons/src/vender/solid/shapes/Corner.json | 27 + .../icons/src/vender/solid/shapes/Corner.tsx | 16 + .../icons/src/vender/solid/shapes/index.ts | 1 + web/app/components/base/modal/index.css | 2 +- web/app/components/base/param-item/index.tsx | 1 + web/app/components/base/select/index.tsx | 2 +- .../detail/completed/SegmentCard.tsx | 20 +- .../datasets/external-api/declarations.ts | 16 + .../external-api/external-api-modal/Form.tsx | 90 + .../external-api/external-api-modal/index.tsx | 218 +++ .../external-api/external-api-panel/index.tsx | 90 + .../external-knowledge-api-card/index.tsx | 151 ++ .../connector/index.tsx | 36 + .../create/ExternalApiSelect.tsx | 110 ++ .../create/ExternalApiSelection.tsx | 96 + .../create/InfoPanel.tsx | 33 + .../create/KnowledgeBaseInfo.tsx | 65 + .../create/RetrievalSettings.tsx | 67 + .../create/declarations.ts | 12 + .../external-knowledge-base/create/index.tsx | 128 ++ .../datasets/hit-testing/hit-detail.tsx | 10 +- .../components/datasets/hit-testing/index.tsx | 111 +- .../modify-external-retrieval-modal.tsx | 71 + .../datasets/hit-testing/textarea.tsx | 91 +- .../datasets/rename-modal/index.tsx | 15 +- .../datasets/settings/form/index.tsx | 122 +- .../components/header/dataset-nav/index.tsx | 2 +- .../components/dataset-item.tsx | 58 +- .../nodes/knowledge-retrieval/use-config.ts | 10 +- .../nodes/knowledge-retrieval/utils.ts | 31 +- web/context/external-api-panel-context.tsx | 28 + .../external-knowledge-api-context.tsx | 46 + web/context/modal-context.tsx | 39 + web/i18n/en-US/dataset-hit-testing.ts | 4 +- web/i18n/en-US/dataset-settings.ts | 11 +- web/i18n/en-US/dataset.ts | 74 + web/i18n/zh-Hans/dataset-hit-testing.ts | 4 +- web/i18n/zh-Hans/dataset-settings.ts | 6 +- web/i18n/zh-Hans/dataset.ts | 74 + web/i18n/zh-Hant/dataset-hit-testing.ts | 1 + web/models/datasets.ts | 81 + web/service/datasets.ts | 42 +- 90 files changed, 4633 insertions(+), 1181 deletions(-) create mode 100644 api/controllers/console/datasets/external.py create mode 100644 api/core/rag/entities/context_entities.py create mode 100644 api/fields/external_dataset_fields.py create mode 100644 api/migrations/versions/2024_09_24_0922-6af6a521a53e_update_retrieval_resource.py create mode 100644 api/migrations/versions/2024_09_25_0434-33f5fac87f29_external_knowledge_api.py create mode 100644 api/schedule/clean_unused_messages_task.py create mode 100644 api/services/entities/external_knowledge_entities/external_knowledge_entities.py create mode 100644 api/services/external_knowledge_service.py create mode 100644 api/tasks/external_document_indexing_task.py create mode 100644 web/app/(commonLayout)/datasets/connect/page.tsx create mode 100644 web/app/(commonLayout)/datasets/layout.tsx create mode 100644 web/app/(commonLayout)/datasets/store.ts create mode 100644 web/app/components/base/corner-label/index.tsx create mode 100644 web/app/components/base/icons/assets/vender/solid/development/api-connection-mod.svg create mode 100644 web/app/components/base/icons/assets/vender/solid/shapes/corner.svg create mode 100644 web/app/components/base/icons/src/vender/solid/development/ApiConnectionMod.json create mode 100644 web/app/components/base/icons/src/vender/solid/development/ApiConnectionMod.tsx create mode 100644 web/app/components/base/icons/src/vender/solid/shapes/Corner.json create mode 100644 web/app/components/base/icons/src/vender/solid/shapes/Corner.tsx create mode 100644 web/app/components/datasets/external-api/declarations.ts create mode 100644 web/app/components/datasets/external-api/external-api-modal/Form.tsx create mode 100644 web/app/components/datasets/external-api/external-api-modal/index.tsx create mode 100644 web/app/components/datasets/external-api/external-api-panel/index.tsx create mode 100644 web/app/components/datasets/external-api/external-knowledge-api-card/index.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/connector/index.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/create/ExternalApiSelect.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/create/ExternalApiSelection.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/create/InfoPanel.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/create/KnowledgeBaseInfo.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/create/RetrievalSettings.tsx create mode 100644 web/app/components/datasets/external-knowledge-base/create/declarations.ts create mode 100644 web/app/components/datasets/external-knowledge-base/create/index.tsx create mode 100644 web/app/components/datasets/hit-testing/modify-external-retrieval-modal.tsx create mode 100644 web/context/external-api-panel-context.tsx create mode 100644 web/context/external-knowledge-api-context.tsx diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py index eb7c1464d3972..c7282fcf14531 100644 --- a/api/controllers/console/__init__.py +++ b/api/controllers/console/__init__.py @@ -37,7 +37,16 @@ from .billing import billing # Import datasets controllers -from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing, website +from .datasets import ( + data_source, + datasets, + datasets_document, + datasets_segments, + external, + file, + hit_testing, + website, +) # Import explore controllers from .explore import ( diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 5a763b3457c69..9561fd8b70e4b 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -49,7 +49,7 @@ def get(self): page = request.args.get("page", default=1, type=int) limit = request.args.get("limit", default=20, type=int) ids = request.args.getlist("ids") - provider = request.args.get("provider", default="vendor") + # provider = request.args.get("provider", default="vendor") search = request.args.get("keyword", default=None, type=str) tag_ids = request.args.getlist("tag_ids") @@ -57,7 +57,7 @@ def get(self): datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id) else: datasets, total = DatasetService.get_datasets( - page, limit, provider, current_user.current_tenant_id, current_user, search, tag_ids + page, limit, current_user.current_tenant_id, current_user, search, tag_ids ) # check embedding setting @@ -110,6 +110,26 @@ def post(self): nullable=True, help="Invalid indexing technique.", ) + parser.add_argument( + "external_knowledge_api_id", + type=str, + nullable=True, + required=False, + ) + parser.add_argument( + "provider", + type=str, + nullable=True, + choices=Dataset.PROVIDER_LIST, + required=False, + default="vendor", + ) + parser.add_argument( + "external_knowledge_id", + type=str, + nullable=True, + required=False, + ) args = parser.parse_args() # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator @@ -123,6 +143,9 @@ def post(self): indexing_technique=args["indexing_technique"], account=current_user, permission=DatasetPermissionEnum.ONLY_ME, + provider=args["provider"], + external_knowledge_api_id=args["external_knowledge_api_id"], + external_knowledge_id=args["external_knowledge_id"], ) except services.errors.dataset.DatasetNameDuplicateError: raise DatasetNameDuplicateError() @@ -211,6 +234,33 @@ def patch(self, dataset_id): ) parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.") parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.") + + parser.add_argument( + "external_retrieval_model", + type=dict, + required=False, + nullable=True, + location="json", + help="Invalid external retrieval model.", + ) + + parser.add_argument( + "external_knowledge_id", + type=str, + required=False, + nullable=True, + location="json", + help="Invalid external knowledge id.", + ) + + parser.add_argument( + "external_knowledge_api_id", + type=str, + required=False, + nullable=True, + location="json", + help="Invalid external knowledge api id.", + ) args = parser.parse_args() data = request.get_json() diff --git a/api/controllers/console/datasets/external.py b/api/controllers/console/datasets/external.py new file mode 100644 index 0000000000000..dcef979360ba6 --- /dev/null +++ b/api/controllers/console/datasets/external.py @@ -0,0 +1,239 @@ +from flask import request +from flask_login import current_user +from flask_restful import Resource, marshal, reqparse +from werkzeug.exceptions import Forbidden, InternalServerError, NotFound + +import services +from controllers.console import api +from controllers.console.datasets.error import DatasetNameDuplicateError +from controllers.console.setup import setup_required +from controllers.console.wraps import account_initialization_required +from fields.dataset_fields import dataset_detail_fields +from libs.login import login_required +from services.dataset_service import DatasetService +from services.external_knowledge_service import ExternalDatasetService +from services.hit_testing_service import HitTestingService + + +def _validate_name(name): + if not name or len(name) < 1 or len(name) > 100: + raise ValueError("Name must be between 1 to 100 characters.") + return name + + +def _validate_description_length(description): + if description and len(description) > 400: + raise ValueError("Description cannot exceed 400 characters.") + return description + + +class ExternalApiTemplateListApi(Resource): + @setup_required + @login_required + @account_initialization_required + def get(self): + page = request.args.get("page", default=1, type=int) + limit = request.args.get("limit", default=20, type=int) + search = request.args.get("keyword", default=None, type=str) + + external_knowledge_apis, total = ExternalDatasetService.get_external_knowledge_apis( + page, limit, current_user.current_tenant_id, search + ) + response = { + "data": [item.to_dict() for item in external_knowledge_apis], + "has_more": len(external_knowledge_apis) == limit, + "limit": limit, + "total": total, + "page": page, + } + return response, 200 + + @setup_required + @login_required + @account_initialization_required + def post(self): + parser = reqparse.RequestParser() + parser.add_argument( + "name", + nullable=False, + required=True, + help="Name is required. Name must be between 1 to 100 characters.", + type=_validate_name, + ) + parser.add_argument( + "settings", + type=dict, + location="json", + nullable=False, + required=True, + ) + args = parser.parse_args() + + ExternalDatasetService.validate_api_list(args["settings"]) + + # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator + if not current_user.is_dataset_editor: + raise Forbidden() + + try: + external_knowledge_api = ExternalDatasetService.create_external_knowledge_api( + tenant_id=current_user.current_tenant_id, user_id=current_user.id, args=args + ) + except services.errors.dataset.DatasetNameDuplicateError: + raise DatasetNameDuplicateError() + + return external_knowledge_api.to_dict(), 201 + + +class ExternalApiTemplateApi(Resource): + @setup_required + @login_required + @account_initialization_required + def get(self, external_knowledge_api_id): + external_knowledge_api_id = str(external_knowledge_api_id) + external_knowledge_api = ExternalDatasetService.get_external_knowledge_api(external_knowledge_api_id) + if external_knowledge_api is None: + raise NotFound("API template not found.") + + return external_knowledge_api.to_dict(), 200 + + @setup_required + @login_required + @account_initialization_required + def patch(self, external_knowledge_api_id): + external_knowledge_api_id = str(external_knowledge_api_id) + + parser = reqparse.RequestParser() + parser.add_argument( + "name", + nullable=False, + required=True, + help="type is required. Name must be between 1 to 100 characters.", + type=_validate_name, + ) + parser.add_argument( + "settings", + type=dict, + location="json", + nullable=False, + required=True, + ) + args = parser.parse_args() + ExternalDatasetService.validate_api_list(args["settings"]) + + external_knowledge_api = ExternalDatasetService.update_external_knowledge_api( + tenant_id=current_user.current_tenant_id, + user_id=current_user.id, + external_knowledge_api_id=external_knowledge_api_id, + args=args, + ) + + return external_knowledge_api.to_dict(), 200 + + @setup_required + @login_required + @account_initialization_required + def delete(self, external_knowledge_api_id): + external_knowledge_api_id = str(external_knowledge_api_id) + + # The role of the current user in the ta table must be admin, owner, or editor + if not current_user.is_editor or current_user.is_dataset_operator: + raise Forbidden() + + ExternalDatasetService.delete_external_knowledge_api(current_user.current_tenant_id, external_knowledge_api_id) + return {"result": "success"}, 200 + + +class ExternalApiUseCheckApi(Resource): + @setup_required + @login_required + @account_initialization_required + def get(self, external_knowledge_api_id): + external_knowledge_api_id = str(external_knowledge_api_id) + + external_knowledge_api_is_using, count = ExternalDatasetService.external_knowledge_api_use_check( + external_knowledge_api_id + ) + return {"is_using": external_knowledge_api_is_using, "count": count}, 200 + + +class ExternalDatasetCreateApi(Resource): + @setup_required + @login_required + @account_initialization_required + def post(self): + # The role of the current user in the ta table must be admin, owner, or editor + if not current_user.is_editor: + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument("external_knowledge_api_id", type=str, required=True, nullable=False, location="json") + parser.add_argument("external_knowledge_id", type=str, required=True, nullable=False, location="json") + parser.add_argument( + "name", + nullable=False, + required=True, + help="name is required. Name must be between 1 to 100 characters.", + type=_validate_name, + ) + parser.add_argument("description", type=str, required=False, nullable=True, location="json") + parser.add_argument("external_retrieval_model", type=dict, required=False, location="json") + + args = parser.parse_args() + + # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator + if not current_user.is_dataset_editor: + raise Forbidden() + + try: + dataset = ExternalDatasetService.create_external_dataset( + tenant_id=current_user.current_tenant_id, + user_id=current_user.id, + args=args, + ) + except services.errors.dataset.DatasetNameDuplicateError: + raise DatasetNameDuplicateError() + + return marshal(dataset, dataset_detail_fields), 201 + + +class ExternalKnowledgeHitTestingApi(Resource): + @setup_required + @login_required + @account_initialization_required + def post(self, dataset_id): + dataset_id_str = str(dataset_id) + dataset = DatasetService.get_dataset(dataset_id_str) + if dataset is None: + raise NotFound("Dataset not found.") + + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + parser = reqparse.RequestParser() + parser.add_argument("query", type=str, location="json") + parser.add_argument("external_retrieval_model", type=dict, required=False, location="json") + args = parser.parse_args() + + HitTestingService.hit_testing_args_check(args) + + try: + response = HitTestingService.external_retrieve( + dataset=dataset, + query=args["query"], + account=current_user, + external_retrieval_model=args["external_retrieval_model"], + ) + + return response + except Exception as e: + raise InternalServerError(str(e)) + + +api.add_resource(ExternalKnowledgeHitTestingApi, "/datasets//external-hit-testing") +api.add_resource(ExternalDatasetCreateApi, "/datasets/external") +api.add_resource(ExternalApiTemplateListApi, "/datasets/external-knowledge-api") +api.add_resource(ExternalApiTemplateApi, "/datasets/external-knowledge-api/") +api.add_resource(ExternalApiUseCheckApi, "/datasets/external-knowledge-api//use-check") diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index 0b4a7be986e16..6e6d8c0bd76f2 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -47,6 +47,7 @@ def post(self, dataset_id): parser = reqparse.RequestParser() parser.add_argument("query", type=str, location="json") parser.add_argument("retrieval_model", type=dict, required=False, location="json") + parser.add_argument("external_retrieval_model", type=dict, required=False, location="json") args = parser.parse_args() HitTestingService.hit_testing_args_check(args) @@ -57,6 +58,7 @@ def post(self, dataset_id): query=args["query"], account=current_user, retrieval_model=args["retrieval_model"], + external_retrieval_model=args["external_retrieval_model"], limit=10, ) diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index c2c0672a0346d..f076cff6c8770 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -28,11 +28,11 @@ def get(self, tenant_id): page = request.args.get("page", default=1, type=int) limit = request.args.get("limit", default=20, type=int) - provider = request.args.get("provider", default="vendor") + # provider = request.args.get("provider", default="vendor") search = request.args.get("keyword", default=None, type=str) tag_ids = request.args.getlist("tag_ids") - datasets, total = DatasetService.get_datasets(page, limit, provider, tenant_id, current_user, search, tag_ids) + datasets, total = DatasetService.get_datasets(page, limit, tenant_id, current_user, search, tag_ids) # check embedding setting provider_manager = ProviderManager() configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id) @@ -82,6 +82,26 @@ def post(self, tenant_id): required=False, nullable=False, ) + parser.add_argument( + "external_knowledge_api_id", + type=str, + nullable=True, + required=False, + default="_validate_name", + ) + parser.add_argument( + "provider", + type=str, + nullable=True, + required=False, + default="vendor", + ) + parser.add_argument( + "external_knowledge_id", + type=str, + nullable=True, + required=False, + ) args = parser.parse_args() try: @@ -91,6 +111,9 @@ def post(self, tenant_id): indexing_technique=args["indexing_technique"], account=current_user, permission=args["permission"], + provider=args["provider"], + external_knowledge_api_id=args["external_knowledge_api_id"], + external_knowledge_id=args["external_knowledge_id"], ) except services.errors.dataset.DatasetNameDuplicateError: raise DatasetNameDuplicateError() diff --git a/api/core/callback_handler/index_tool_callback_handler.py b/api/core/callback_handler/index_tool_callback_handler.py index 7cf472d984a38..23dc092895c8c 100644 --- a/api/core/callback_handler/index_tool_callback_handler.py +++ b/api/core/callback_handler/index_tool_callback_handler.py @@ -59,7 +59,7 @@ def return_retriever_resource_info(self, resource: list): for item in resource: dataset_retriever_resource = DatasetRetrieverResource( message_id=self._message_id, - position=item.get("position"), + position=item.get("position") or 0, dataset_id=item.get("dataset_id"), dataset_name=item.get("dataset_name"), document_id=item.get("document_id"), diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index afac1bf30086e..d3fd0c672ad27 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -10,6 +10,7 @@ from core.rag.retrieval.retrieval_methods import RetrievalMethod from extensions.ext_database import db from models.dataset import Dataset +from services.external_knowledge_service import ExternalDatasetService default_retrieval_model = { "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, @@ -34,6 +35,9 @@ def retrieve( weights: Optional[dict] = None, ): dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first() + if not dataset: + return [] + if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0: return [] all_documents = [] @@ -108,6 +112,16 @@ def retrieve( ) return all_documents + @classmethod + def external_retrieve(cls, dataset_id: str, query: str, external_retrieval_model: Optional[dict] = None): + dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first() + if not dataset: + return [] + all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval( + dataset.tenant_id, dataset_id, query, external_retrieval_model + ) + return all_documents + @classmethod def keyword_search( cls, flask_app: Flask, dataset_id: str, query: str, top_k: int, all_documents: list, exceptions: list diff --git a/api/core/rag/entities/context_entities.py b/api/core/rag/entities/context_entities.py new file mode 100644 index 0000000000000..dde3beccf6e46 --- /dev/null +++ b/api/core/rag/entities/context_entities.py @@ -0,0 +1,10 @@ +from pydantic import BaseModel + + +class DocumentContext(BaseModel): + """ + Model class for document context. + """ + + content: str + score: float diff --git a/api/core/rag/models/document.py b/api/core/rag/models/document.py index 0ff1fdb81cb87..1e9aaa24f04c9 100644 --- a/api/core/rag/models/document.py +++ b/api/core/rag/models/document.py @@ -17,6 +17,8 @@ class Document(BaseModel): """ metadata: Optional[dict] = Field(default_factory=dict) + provider: Optional[str] = "dify" + class BaseDocumentTransformer(ABC): """Abstract base class for document transformation systems. diff --git a/api/core/rag/rerank/rerank_model.py b/api/core/rag/rerank/rerank_model.py index 6356ff87ab8ef..27f86aed34a81 100644 --- a/api/core/rag/rerank/rerank_model.py +++ b/api/core/rag/rerank/rerank_model.py @@ -28,11 +28,16 @@ def run( docs = [] doc_id = [] unique_documents = [] - for document in documents: + dify_documents = [item for item in documents if item.provider == "dify"] + external_documents = [item for item in documents if item.provider == "external"] + for document in dify_documents: if document.metadata["doc_id"] not in doc_id: doc_id.append(document.metadata["doc_id"]) docs.append(document.page_content) unique_documents.append(document) + for document in external_documents: + docs.append(document.page_content) + unique_documents.append(document) documents = unique_documents @@ -46,14 +51,10 @@ def run( # format document rerank_document = Document( page_content=result.text, - metadata={ - "doc_id": documents[result.index].metadata["doc_id"], - "doc_hash": documents[result.index].metadata["doc_hash"], - "document_id": documents[result.index].metadata["document_id"], - "dataset_id": documents[result.index].metadata["dataset_id"], - "score": result.score, - }, + metadata=documents[result.index].metadata, + provider=documents[result.index].provider, ) + rerank_document.metadata["score"] = result.score rerank_documents.append(rerank_document) return rerank_documents diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 4603957d68599..ae61ba7112243 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -20,6 +20,7 @@ from core.rag.data_post_processor.data_post_processor import DataPostProcessor from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.retrieval_service import RetrievalService +from core.rag.entities.context_entities import DocumentContext from core.rag.models.document import Document from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.rag.retrieval.router.multi_dataset_function_call_router import FunctionCallMultiDatasetRouter @@ -30,6 +31,7 @@ from extensions.ext_database import db from models.dataset import Dataset, DatasetQuery, DocumentSegment from models.dataset import Document as DatasetDocument +from services.external_knowledge_service import ExternalDatasetService default_retrieval_model = { "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, @@ -110,7 +112,7 @@ def retrieve( continue # pass if dataset is not available - if dataset and dataset.available_document_count == 0: + if dataset and dataset.available_document_count == 0 and dataset.provider != "external": continue available_datasets.append(dataset) @@ -146,69 +148,93 @@ def retrieve( message_id, ) - document_score_list = {} - for item in all_documents: - if item.metadata.get("score"): - document_score_list[item.metadata["doc_id"]] = item.metadata["score"] - + dify_documents = [item for item in all_documents if item.provider == "dify"] + external_documents = [item for item in all_documents if item.provider == "external"] document_context_list = [] - index_node_ids = [document.metadata["doc_id"] for document in all_documents] - segments = DocumentSegment.query.filter( - DocumentSegment.dataset_id.in_(dataset_ids), - DocumentSegment.completed_at.isnot(None), - DocumentSegment.status == "completed", - DocumentSegment.enabled == True, - DocumentSegment.index_node_id.in_(index_node_ids), - ).all() - - if segments: - index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)} - sorted_segments = sorted( - segments, key=lambda segment: index_node_id_to_position.get(segment.index_node_id, float("inf")) - ) - for segment in sorted_segments: - if segment.answer: - document_context_list.append(f"question:{segment.get_sign_content()} answer:{segment.answer}") - else: - document_context_list.append(segment.get_sign_content()) - if show_retrieve_source: - context_list = [] - resource_number = 1 + retrieval_resource_list = [] + # deal with external documents + for item in external_documents: + document_context_list.append(DocumentContext(content=item.page_content, score=item.metadata.get("score"))) + source = { + "dataset_id": item.metadata.get("dataset_id"), + "dataset_name": item.metadata.get("dataset_name"), + "document_name": item.metadata.get("title"), + "data_source_type": "external", + "retriever_from": invoke_from.to_source(), + "score": item.metadata.get("score"), + "content": item.page_content, + } + retrieval_resource_list.append(source) + document_score_list = {} + # deal with dify documents + if dify_documents: + for item in dify_documents: + if item.metadata.get("score"): + document_score_list[item.metadata["doc_id"]] = item.metadata["score"] + + index_node_ids = [document.metadata["doc_id"] for document in dify_documents] + segments = DocumentSegment.query.filter( + DocumentSegment.dataset_id.in_(dataset_ids), + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + DocumentSegment.index_node_id.in_(index_node_ids), + ).all() + + if segments: + index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)} + sorted_segments = sorted( + segments, key=lambda segment: index_node_id_to_position.get(segment.index_node_id, float("inf")) + ) for segment in sorted_segments: - dataset = Dataset.query.filter_by(id=segment.dataset_id).first() - document = DatasetDocument.query.filter( - DatasetDocument.id == segment.document_id, - DatasetDocument.enabled == True, - DatasetDocument.archived == False, - ).first() - if dataset and document: - source = { - "position": resource_number, - "dataset_id": dataset.id, - "dataset_name": dataset.name, - "document_id": document.id, - "document_name": document.name, - "data_source_type": document.data_source_type, - "segment_id": segment.id, - "retriever_from": invoke_from.to_source(), - "score": document_score_list.get(segment.index_node_id, None), - } - - if invoke_from.to_source() == "dev": - source["hit_count"] = segment.hit_count - source["word_count"] = segment.word_count - source["segment_position"] = segment.position - source["index_node_hash"] = segment.index_node_hash - if segment.answer: - source["content"] = f"question:{segment.content} \nanswer:{segment.answer}" - else: - source["content"] = segment.content - context_list.append(source) - resource_number += 1 - if hit_callback: - hit_callback.return_retriever_resource_info(context_list) - - return str("\n".join(document_context_list)) + if segment.answer: + document_context_list.append( + DocumentContext( + content=f"question:{segment.get_sign_content()} answer:{segment.answer}", + score=document_score_list.get(segment.index_node_id, None), + ) + ) + else: + document_context_list.append( + DocumentContext( + content=segment.get_sign_content(), + score=document_score_list.get(segment.index_node_id, None), + ) + ) + if show_retrieve_source: + for segment in sorted_segments: + dataset = Dataset.query.filter_by(id=segment.dataset_id).first() + document = DatasetDocument.query.filter( + DatasetDocument.id == segment.document_id, + DatasetDocument.enabled == True, + DatasetDocument.archived == False, + ).first() + if dataset and document: + source = { + "dataset_id": dataset.id, + "dataset_name": dataset.name, + "document_id": document.id, + "document_name": document.name, + "data_source_type": document.data_source_type, + "segment_id": segment.id, + "retriever_from": invoke_from.to_source(), + "score": document_score_list.get(segment.index_node_id, None), + } + + if invoke_from.to_source() == "dev": + source["hit_count"] = segment.hit_count + source["word_count"] = segment.word_count + source["segment_position"] = segment.position + source["index_node_hash"] = segment.index_node_hash + if segment.answer: + source["content"] = f"question:{segment.content} \nanswer:{segment.answer}" + else: + source["content"] = segment.content + retrieval_resource_list.append(source) + if hit_callback and retrieval_resource_list: + hit_callback.return_retriever_resource_info(retrieval_resource_list) + if document_context_list: + document_context_list = sorted(document_context_list, key=lambda x: x.score, reverse=True) + return str("\n".join([document_context.content for document_context in document_context_list])) return "" def single_retrieve( @@ -256,36 +282,58 @@ def single_retrieve( # get retrieval model config dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first() if dataset: - retrieval_model_config = dataset.retrieval_model or default_retrieval_model - - # get top k - top_k = retrieval_model_config["top_k"] - # get retrieval method - if dataset.indexing_technique == "economy": - retrieval_method = "keyword_search" - else: - retrieval_method = retrieval_model_config["search_method"] - # get reranking model - reranking_model = ( - retrieval_model_config["reranking_model"] if retrieval_model_config["reranking_enable"] else None - ) - # get score threshold - score_threshold = 0.0 - score_threshold_enabled = retrieval_model_config.get("score_threshold_enabled") - if score_threshold_enabled: - score_threshold = retrieval_model_config.get("score_threshold") - - with measure_time() as timer: - results = RetrievalService.retrieve( - retrieval_method=retrieval_method, - dataset_id=dataset.id, + results = [] + if dataset.provider == "external": + external_documents = ExternalDatasetService.fetch_external_knowledge_retrieval( + tenant_id=dataset.tenant_id, + dataset_id=dataset_id, query=query, - top_k=top_k, - score_threshold=score_threshold, - reranking_model=reranking_model, - reranking_mode=retrieval_model_config.get("reranking_mode", "reranking_model"), - weights=retrieval_model_config.get("weights", None), + external_retrieval_parameters=dataset.retrieval_model, + ) + for external_document in external_documents: + document = Document( + page_content=external_document.get("content"), + metadata=external_document.get("metadata"), + provider="external", + ) + document.metadata["score"] = external_document.get("score") + document.metadata["title"] = external_document.get("title") + document.metadata["dataset_id"] = dataset_id + document.metadata["dataset_name"] = dataset.name + results.append(document) + else: + retrieval_model_config = dataset.retrieval_model or default_retrieval_model + + # get top k + top_k = retrieval_model_config["top_k"] + # get retrieval method + if dataset.indexing_technique == "economy": + retrieval_method = "keyword_search" + else: + retrieval_method = retrieval_model_config["search_method"] + # get reranking model + reranking_model = ( + retrieval_model_config["reranking_model"] + if retrieval_model_config["reranking_enable"] + else None ) + # get score threshold + score_threshold = 0.0 + score_threshold_enabled = retrieval_model_config.get("score_threshold_enabled") + if score_threshold_enabled: + score_threshold = retrieval_model_config.get("score_threshold") + + with measure_time() as timer: + results = RetrievalService.retrieve( + retrieval_method=retrieval_method, + dataset_id=dataset.id, + query=query, + top_k=top_k, + score_threshold=score_threshold, + reranking_model=reranking_model, + reranking_mode=retrieval_model_config.get("reranking_mode", "reranking_model"), + weights=retrieval_model_config.get("weights", None), + ) self._on_query(query, [dataset_id], app_id, user_from, user_id) if results: @@ -356,7 +404,8 @@ def _on_retrieval_end( self, documents: list[Document], message_id: Optional[str] = None, timer: Optional[dict] = None ) -> None: """Handle retrieval end.""" - for document in documents: + dify_documents = [document for document in documents if document.provider == "dify"] + for document in dify_documents: query = db.session.query(DocumentSegment).filter( DocumentSegment.index_node_id == document.metadata["doc_id"] ) @@ -409,35 +458,54 @@ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, top_k: int, if not dataset: return [] - # get retrieval model , if the model is not setting , using default - retrieval_model = dataset.retrieval_model or default_retrieval_model - - if dataset.indexing_technique == "economy": - # use keyword table query - documents = RetrievalService.retrieve( - retrieval_method="keyword_search", dataset_id=dataset.id, query=query, top_k=top_k + if dataset.provider == "external": + external_documents = ExternalDatasetService.fetch_external_knowledge_retrieval( + tenant_id=dataset.tenant_id, + dataset_id=dataset_id, + query=query, + external_retrieval_parameters=dataset.retrieval_model, ) - if documents: - all_documents.extend(documents) + for external_document in external_documents: + document = Document( + page_content=external_document.get("content"), + metadata=external_document.get("metadata"), + provider="external", + ) + document.metadata["score"] = external_document.get("score") + document.metadata["title"] = external_document.get("title") + document.metadata["dataset_id"] = dataset_id + document.metadata["dataset_name"] = dataset.name + all_documents.append(document) else: - if top_k > 0: - # retrieval source + # get retrieval model , if the model is not setting , using default + retrieval_model = dataset.retrieval_model or default_retrieval_model + + if dataset.indexing_technique == "economy": + # use keyword table query documents = RetrievalService.retrieve( - retrieval_method=retrieval_model["search_method"], - dataset_id=dataset.id, - query=query, - top_k=retrieval_model.get("top_k") or 2, - score_threshold=retrieval_model.get("score_threshold", 0.0) - if retrieval_model["score_threshold_enabled"] - else 0.0, - reranking_model=retrieval_model.get("reranking_model", None) - if retrieval_model["reranking_enable"] - else None, - reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", - weights=retrieval_model.get("weights", None), + retrieval_method="keyword_search", dataset_id=dataset.id, query=query, top_k=top_k ) - - all_documents.extend(documents) + if documents: + all_documents.extend(documents) + else: + if top_k > 0: + # retrieval source + documents = RetrievalService.retrieve( + retrieval_method=retrieval_model["search_method"], + dataset_id=dataset.id, + query=query, + top_k=retrieval_model.get("top_k") or 2, + score_threshold=retrieval_model.get("score_threshold", 0.0) + if retrieval_model["score_threshold_enabled"] + else 0.0, + reranking_model=retrieval_model.get("reranking_model", None) + if retrieval_model["reranking_enable"] + else None, + reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", + weights=retrieval_model.get("weights", None), + ) + + all_documents.extend(documents) def to_dataset_retriever_tool( self, diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index af55688a52c9b..c08dc143a6f32 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -156,16 +156,34 @@ def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: weights, node_data.multiple_retrieval_config.reranking_enable, ) - - context_list = [] - if all_documents: + dify_documents = [item for item in all_documents if item.provider == "dify"] + external_documents = [item for item in all_documents if item.provider == "external"] + retrieval_resource_list = [] + # deal with external documents + for item in external_documents: + source = { + "metadata": { + "_source": "knowledge", + "dataset_id": item.metadata.get("dataset_id"), + "dataset_name": item.metadata.get("dataset_name"), + "document_name": item.metadata.get("title"), + "data_source_type": "external", + "retriever_from": "workflow", + "score": item.metadata.get("score"), + }, + "title": item.metadata.get("title"), + "content": item.page_content, + } + retrieval_resource_list.append(source) + document_score_list = {} + # deal with dify documents + if dify_documents: document_score_list = {} - page_number_list = {} - for item in all_documents: + for item in dify_documents: if item.metadata.get("score"): document_score_list[item.metadata["doc_id"]] = item.metadata["score"] - index_node_ids = [document.metadata["doc_id"] for document in all_documents] + index_node_ids = [document.metadata["doc_id"] for document in dify_documents] segments = DocumentSegment.query.filter( DocumentSegment.dataset_id.in_(dataset_ids), DocumentSegment.completed_at.isnot(None), @@ -186,13 +204,10 @@ def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: Document.enabled == True, Document.archived == False, ).first() - - resource_number = 1 if dataset and document: source = { "metadata": { "_source": "knowledge", - "position": resource_number, "dataset_id": dataset.id, "dataset_name": dataset.name, "document_id": document.id, @@ -212,9 +227,14 @@ def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: source["content"] = f"question:{segment.get_sign_content()} \nanswer:{segment.answer}" else: source["content"] = segment.get_sign_content() - context_list.append(source) - resource_number += 1 - return context_list + retrieval_resource_list.append(source) + if retrieval_resource_list: + retrieval_resource_list = sorted(retrieval_resource_list, key=lambda x: x.get("score"), reverse=True) + position = 1 + for item in retrieval_resource_list: + item["metadata"]["position"] = position + position += 1 + return retrieval_resource_list @classmethod def _extract_variable_selector_to_variable_mapping( diff --git a/api/fields/dataset_fields.py b/api/fields/dataset_fields.py index 9cf8da7acdc98..b32423f10c9dd 100644 --- a/api/fields/dataset_fields.py +++ b/api/fields/dataset_fields.py @@ -38,9 +38,20 @@ "score_threshold_enabled": fields.Boolean, "score_threshold": fields.Float, } +external_retrieval_model_fields = { + "top_k": fields.Integer, + "score_threshold": fields.Float, +} tag_fields = {"id": fields.String, "name": fields.String, "type": fields.String} +external_knowledge_info_fields = { + "external_knowledge_id": fields.String, + "external_knowledge_api_id": fields.String, + "external_knowledge_api_name": fields.String, + "external_knowledge_api_endpoint": fields.String, +} + dataset_detail_fields = { "id": fields.String, "name": fields.String, @@ -61,6 +72,8 @@ "embedding_available": fields.Boolean, "retrieval_model_dict": fields.Nested(dataset_retrieval_model_fields), "tags": fields.List(fields.Nested(tag_fields)), + "external_knowledge_info": fields.Nested(external_knowledge_info_fields), + "external_retrieval_model": fields.Nested(external_retrieval_model_fields, allow_null=True), } dataset_query_detail_fields = { diff --git a/api/fields/external_dataset_fields.py b/api/fields/external_dataset_fields.py new file mode 100644 index 0000000000000..2281460fe2214 --- /dev/null +++ b/api/fields/external_dataset_fields.py @@ -0,0 +1,11 @@ +from flask_restful import fields + +from libs.helper import TimestampField + +external_knowledge_api_query_detail_fields = { + "id": fields.String, + "name": fields.String, + "setting": fields.String, + "created_by": fields.String, + "created_at": TimestampField, +} diff --git a/api/migrations/versions/2024_09_24_0922-6af6a521a53e_update_retrieval_resource.py b/api/migrations/versions/2024_09_24_0922-6af6a521a53e_update_retrieval_resource.py new file mode 100644 index 0000000000000..5337b340db769 --- /dev/null +++ b/api/migrations/versions/2024_09_24_0922-6af6a521a53e_update_retrieval_resource.py @@ -0,0 +1,48 @@ +"""update-retrieval-resource + +Revision ID: 6af6a521a53e +Revises: ec3df697ebbb +Create Date: 2024-09-24 09:22:43.570120 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '6af6a521a53e' +down_revision = 'd57ba9ebb251' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('dataset_retriever_resources', schema=None) as batch_op: + batch_op.alter_column('document_id', + existing_type=sa.UUID(), + nullable=True) + batch_op.alter_column('data_source_type', + existing_type=sa.TEXT(), + nullable=True) + batch_op.alter_column('segment_id', + existing_type=sa.UUID(), + nullable=True) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('dataset_retriever_resources', schema=None) as batch_op: + batch_op.alter_column('segment_id', + existing_type=sa.UUID(), + nullable=False) + batch_op.alter_column('data_source_type', + existing_type=sa.TEXT(), + nullable=False) + batch_op.alter_column('document_id', + existing_type=sa.UUID(), + nullable=False) + + # ### end Alembic commands ### diff --git a/api/migrations/versions/2024_09_25_0434-33f5fac87f29_external_knowledge_api.py b/api/migrations/versions/2024_09_25_0434-33f5fac87f29_external_knowledge_api.py new file mode 100644 index 0000000000000..3cb76e72c1ebb --- /dev/null +++ b/api/migrations/versions/2024_09_25_0434-33f5fac87f29_external_knowledge_api.py @@ -0,0 +1,73 @@ +"""external_knowledge_api + +Revision ID: 33f5fac87f29 +Revises: 6af6a521a53e +Create Date: 2024-09-25 04:34:57.249436 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '33f5fac87f29' +down_revision = '6af6a521a53e' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('external_knowledge_apis', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.String(length=255), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('settings', sa.Text(), nullable=True), + sa.Column('created_by', models.types.StringUUID(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.Column('updated_by', models.types.StringUUID(), nullable=True), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.PrimaryKeyConstraint('id', name='external_knowledge_apis_pkey') + ) + with op.batch_alter_table('external_knowledge_apis', schema=None) as batch_op: + batch_op.create_index('external_knowledge_apis_name_idx', ['name'], unique=False) + batch_op.create_index('external_knowledge_apis_tenant_idx', ['tenant_id'], unique=False) + + op.create_table('external_knowledge_bindings', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('external_knowledge_api_id', models.types.StringUUID(), nullable=False), + sa.Column('dataset_id', models.types.StringUUID(), nullable=False), + sa.Column('external_knowledge_id', sa.Text(), nullable=False), + sa.Column('created_by', models.types.StringUUID(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.Column('updated_by', models.types.StringUUID(), nullable=True), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.PrimaryKeyConstraint('id', name='external_knowledge_bindings_pkey') + ) + with op.batch_alter_table('external_knowledge_bindings', schema=None) as batch_op: + batch_op.create_index('external_knowledge_bindings_dataset_idx', ['dataset_id'], unique=False) + batch_op.create_index('external_knowledge_bindings_external_knowledge_api_idx', ['external_knowledge_api_id'], unique=False) + batch_op.create_index('external_knowledge_bindings_external_knowledge_idx', ['external_knowledge_id'], unique=False) + batch_op.create_index('external_knowledge_bindings_tenant_idx', ['tenant_id'], unique=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('external_knowledge_bindings', schema=None) as batch_op: + batch_op.drop_index('external_knowledge_bindings_tenant_idx') + batch_op.drop_index('external_knowledge_bindings_external_knowledge_idx') + batch_op.drop_index('external_knowledge_bindings_external_knowledge_api_idx') + batch_op.drop_index('external_knowledge_bindings_dataset_idx') + + op.drop_table('external_knowledge_bindings') + with op.batch_alter_table('external_knowledge_apis', schema=None) as batch_op: + batch_op.drop_index('external_knowledge_apis_tenant_idx') + batch_op.drop_index('external_knowledge_apis_name_idx') + + op.drop_table('external_knowledge_apis') + # ### end Alembic commands ### diff --git a/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py b/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py index 1f8250c3eb42a..52495be60a62e 100644 --- a/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py +++ b/api/migrations/versions/fca025d3b60f_add_dataset_retrival_model.py @@ -1,4 +1,4 @@ -"""add-dataset-retrival-model +"""add-dataset-retrieval-model Revision ID: fca025d3b60f Revises: b3a09c049e8e diff --git a/api/models/dataset.py b/api/models/dataset.py index a2d2a3454d493..4224ee5e9cadd 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -38,6 +38,7 @@ class Dataset(db.Model): ) INDEXING_TECHNIQUE_LIST = ["high_quality", "economy", None] + PROVIDER_LIST = ["vendor", "external", None] id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()")) tenant_id = db.Column(StringUUID, nullable=False) @@ -71,6 +72,14 @@ def dataset_keyword_table(self): def index_struct_dict(self): return json.loads(self.index_struct) if self.index_struct else None + @property + def external_retrieval_model(self): + default_retrieval_model = { + "top_k": 2, + "score_threshold": 0.0, + } + return self.retrieval_model or default_retrieval_model + @property def created_by_account(self): return db.session.get(Account, self.created_by) @@ -162,6 +171,29 @@ def tags(self): return tags or [] + @property + def external_knowledge_info(self): + if self.provider != "external": + return None + external_knowledge_binding = ( + db.session.query(ExternalKnowledgeBindings).filter(ExternalKnowledgeBindings.dataset_id == self.id).first() + ) + if not external_knowledge_binding: + return None + external_knowledge_api = ( + db.session.query(ExternalKnowledgeApis) + .filter(ExternalKnowledgeApis.id == external_knowledge_binding.external_knowledge_api_id) + .first() + ) + if not external_knowledge_api: + return None + return { + "external_knowledge_id": external_knowledge_binding.external_knowledge_id, + "external_knowledge_api_id": external_knowledge_api.id, + "external_knowledge_api_name": external_knowledge_api.name, + "external_knowledge_api_endpoint": json.loads(external_knowledge_api.settings).get("endpoint", ""), + } + @staticmethod def gen_collection_name_by_id(dataset_id: str) -> str: normalized_dataset_id = dataset_id.replace("-", "_") @@ -687,3 +719,77 @@ class DatasetPermission(db.Model): tenant_id = db.Column(StringUUID, nullable=False) has_permission = db.Column(db.Boolean, nullable=False, server_default=db.text("true")) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) + + +class ExternalKnowledgeApis(db.Model): + __tablename__ = "external_knowledge_apis" + __table_args__ = ( + db.PrimaryKeyConstraint("id", name="external_knowledge_apis_pkey"), + db.Index("external_knowledge_apis_tenant_idx", "tenant_id"), + db.Index("external_knowledge_apis_name_idx", "name"), + ) + + id = db.Column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + name = db.Column(db.String(255), nullable=False) + description = db.Column(db.String(255), nullable=False) + tenant_id = db.Column(StringUUID, nullable=False) + settings = db.Column(db.Text, nullable=True) + created_by = db.Column(StringUUID, nullable=False) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) + updated_by = db.Column(StringUUID, nullable=True) + updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) + + def to_dict(self): + return { + "id": self.id, + "tenant_id": self.tenant_id, + "name": self.name, + "description": self.description, + "settings": self.settings_dict, + "dataset_bindings": self.dataset_bindings, + "created_by": self.created_by, + "created_at": self.created_at.isoformat(), + } + + @property + def settings_dict(self): + try: + return json.loads(self.settings) if self.settings else None + except JSONDecodeError: + return None + + @property + def dataset_bindings(self): + external_knowledge_bindings = ( + db.session.query(ExternalKnowledgeBindings) + .filter(ExternalKnowledgeBindings.external_knowledge_api_id == self.id) + .all() + ) + dataset_ids = [binding.dataset_id for binding in external_knowledge_bindings] + datasets = db.session.query(Dataset).filter(Dataset.id.in_(dataset_ids)).all() + dataset_bindings = [] + for dataset in datasets: + dataset_bindings.append({"id": dataset.id, "name": dataset.name}) + + return dataset_bindings + + +class ExternalKnowledgeBindings(db.Model): + __tablename__ = "external_knowledge_bindings" + __table_args__ = ( + db.PrimaryKeyConstraint("id", name="external_knowledge_bindings_pkey"), + db.Index("external_knowledge_bindings_tenant_idx", "tenant_id"), + db.Index("external_knowledge_bindings_dataset_idx", "dataset_id"), + db.Index("external_knowledge_bindings_external_knowledge_idx", "external_knowledge_id"), + db.Index("external_knowledge_bindings_external_knowledge_api_idx", "external_knowledge_api_id"), + ) + + id = db.Column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + tenant_id = db.Column(StringUUID, nullable=False) + external_knowledge_api_id = db.Column(StringUUID, nullable=False) + dataset_id = db.Column(StringUUID, nullable=False) + external_knowledge_id = db.Column(db.Text, nullable=False) + created_by = db.Column(StringUUID, nullable=False) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) + updated_by = db.Column(StringUUID, nullable=True) + updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) diff --git a/api/models/model.py b/api/models/model.py index 53940a5a16cc8..0ac93343217d4 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -1423,10 +1423,10 @@ class DatasetRetrieverResource(db.Model): position = db.Column(db.Integer, nullable=False) dataset_id = db.Column(StringUUID, nullable=False) dataset_name = db.Column(db.Text, nullable=False) - document_id = db.Column(StringUUID, nullable=False) + document_id = db.Column(StringUUID, nullable=True) document_name = db.Column(db.Text, nullable=False) - data_source_type = db.Column(db.Text, nullable=False) - segment_id = db.Column(StringUUID, nullable=False) + data_source_type = db.Column(db.Text, nullable=True) + segment_id = db.Column(StringUUID, nullable=True) score = db.Column(db.Float, nullable=True) content = db.Column(db.Text, nullable=False) hit_count = db.Column(db.Integer, nullable=True) diff --git a/api/poetry.lock b/api/poetry.lock index 85c68cd75f129..4fb8d54f11852 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "aiohappyeyeballs" -version = "2.4.0" +version = "2.4.2" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, - {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, + {file = "aiohappyeyeballs-2.4.2-py3-none-any.whl", hash = "sha256:8522691d9a154ba1145b157d6d5c15e5c692527ce6a53c5e5f9876977f6dab2f"}, + {file = "aiohappyeyeballs-2.4.2.tar.gz", hash = "sha256:4ca893e6c5c1f5bf3888b04cb5a3bee24995398efef6e0b9f747b5e89d84fd74"}, ] [[package]] @@ -153,13 +153,13 @@ frozenlist = ">=1.1.0" [[package]] name = "alembic" -version = "1.13.2" +version = "1.13.3" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.8" files = [ - {file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"}, - {file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"}, + {file = "alembic-1.13.3-py3-none-any.whl", hash = "sha256:908e905976d15235fae59c9ac42c4c5b75cfcefe3d27c0fbf7ae15a37715d80e"}, + {file = "alembic-1.13.3.tar.gz", hash = "sha256:203503117415561e203aa14541740643a611f641517f0209fcae63e9fa09f1a2"}, ] [package.dependencies] @@ -293,13 +293,13 @@ alibabacloud-tea = "*" [[package]] name = "alibabacloud-tea" -version = "0.3.9" +version = "0.3.10" description = "The tea module of alibabaCloud Python SDK." optional = false python-versions = ">=3.6" files = [ - {file = "alibabacloud-tea-0.3.9.tar.gz", hash = "sha256:a9689770003fa9313d1995812f9fe36a2be315e5cdfc8d58de0d96808219ced9"}, - {file = "alibabacloud_tea-0.3.9-py3-none-any.whl", hash = "sha256:402fd2a92e6729f228d8c0300b182f80019edce19d83afa497aeb15fd7947f9a"}, + {file = "alibabacloud-tea-0.3.10.tar.gz", hash = "sha256:bcf972416af5d8b5e671078c2ec20296dbc792e85e68acd685730a0a016afd2a"}, + {file = "alibabacloud_tea-0.3.10-py3-none-any.whl", hash = "sha256:9136f302a3baea8a1528f500bf5d47c3727b827a09b5c14b283ca53578e30082"}, ] [package.dependencies] @@ -321,17 +321,17 @@ alibabacloud-tea = ">=0.0.1" [[package]] name = "alibabacloud-tea-openapi" -version = "0.3.11" +version = "0.3.12" description = "Alibaba Cloud openapi SDK Library for Python" optional = false python-versions = ">=3.6" files = [ - {file = "alibabacloud_tea_openapi-0.3.11.tar.gz", hash = "sha256:3f5cace1b1aeb8a64587574097403cfd066b86ee4c3c9abde587f9abfcad38de"}, + {file = "alibabacloud_tea_openapi-0.3.12.tar.gz", hash = "sha256:2e14809f357438e62c1ef4976a7655110dd54a75bbfa7d905fa3798355cfd974"}, ] [package.dependencies] -alibabacloud_credentials = ">=0.3.1,<1.0.0" -alibabacloud_gateway_spi = ">=0.0.1,<1.0.0" +alibabacloud_credentials = ">=0.3.5,<1.0.0" +alibabacloud_gateway_spi = ">=0.0.2,<1.0.0" alibabacloud_openapi_util = ">=0.2.1,<1.0.0" alibabacloud_tea_util = ">=0.3.13,<1.0.0" alibabacloud_tea_xml = ">=0.0.2,<1.0.0" @@ -455,13 +455,13 @@ vertex = ["google-auth (>=2,<3)"] [[package]] name = "anyio" -version = "4.4.0" +version = "4.6.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, ] [package.dependencies] @@ -471,9 +471,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "arxiv" @@ -698,23 +698,23 @@ msrest = ">=0.6.21" [[package]] name = "azure-storage-file-share" -version = "12.17.0" +version = "12.18.0" description = "Microsoft Azure Azure File Share Storage Client Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-storage-file-share-12.17.0.tar.gz", hash = "sha256:f7b2c6cfc1b7cb80097a53b1ed2efa9e545b49a291430d369cdb49fafbc841d6"}, - {file = "azure_storage_file_share-12.17.0-py3-none-any.whl", hash = "sha256:c4652759a9d529bf08881bb53275bf38774bb643746b849d27c47118f9cf923d"}, + {file = "azure_storage_file_share-12.18.0-py3-none-any.whl", hash = "sha256:23ca35206a0cb8af0decd9d1363d0ad8ab31584a5e55cfc64528d7192ff748a9"}, + {file = "azure_storage_file_share-12.18.0.tar.gz", hash = "sha256:0a81daee5e13598accde3c73b1aeccc6edfdcec5e957bf40150c0622fb95dc76"}, ] [package.dependencies] -azure-core = ">=1.28.0" +azure-core = ">=1.30.0" cryptography = ">=2.1.4" isodate = ">=0.6.1" typing-extensions = ">=4.6.0" [package.extras] -aio = ["azure-core[aio] (>=1.28.0)"] +aio = ["azure-core[aio] (>=1.30.0)"] [[package]] name = "backoff" @@ -787,13 +787,13 @@ lxml = ["lxml"] [[package]] name = "billiard" -version = "4.2.0" +version = "4.2.1" description = "Python multiprocessing fork with improvements and bugfixes" optional = false python-versions = ">=3.7" files = [ - {file = "billiard-4.2.0-py3-none-any.whl", hash = "sha256:07aa978b308f334ff8282bd4a746e681b3513db5c9a514cbdd810cbbdc19714d"}, - {file = "billiard-4.2.0.tar.gz", hash = "sha256:9a3c3184cb275aa17a732f93f65b20c525d3d9f253722d26a82194803ade5a2c"}, + {file = "billiard-4.2.1-py3-none-any.whl", hash = "sha256:40b59a4ac8806ba2c2369ea98d876bc6108b051c227baffd928c644d15d8f3cb"}, + {file = "billiard-4.2.1.tar.gz", hash = "sha256:12b641b0c539073fc8d3f5b8b7be998956665c4233c7c1fcd66a7e677c4fb36f"}, ] [[package]] @@ -828,13 +828,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.19" +version = "1.35.29" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.19-py3-none-any.whl", hash = "sha256:c83f7f0cacfe7c19b109b363ebfa8736e570d24922f16ed371681f58ebab44a9"}, - {file = "botocore-1.35.19.tar.gz", hash = "sha256:42d6d8db7250cbd7899f786f9861e02cab17dc238f64d6acb976098ed9809625"}, + {file = "botocore-1.35.29-py3-none-any.whl", hash = "sha256:f8e3ae0d84214eff3fb69cb4dc51cea6c43d3bde82027a94d00c52b941d6c3d5"}, + {file = "botocore-1.35.29.tar.gz", hash = "sha256:4ed28ab03675bb008a290c452c5ddd7aaa5d4e3fa1912aadbdf93057ee84362b"}, ] [package.dependencies] @@ -2077,13 +2077,13 @@ tokenizer = ["tiktoken"] [[package]] name = "dataclass-wizard" -version = "0.22.3" +version = "0.23.0" description = "Marshal dataclasses to/from JSON. Use field properties with initial values. Construct a dataclass schema with JSON input." optional = false python-versions = "*" files = [ - {file = "dataclass-wizard-0.22.3.tar.gz", hash = "sha256:4c46591782265058f1148cfd1f54a3a91221e63986fdd04c9d59f4ced61f4424"}, - {file = "dataclass_wizard-0.22.3-py2.py3-none-any.whl", hash = "sha256:63751203e54b9b9349212cc185331da73c1adc99c51312575eb73bb5c00c1962"}, + {file = "dataclass-wizard-0.23.0.tar.gz", hash = "sha256:da29ec19846d46a1eef0692ba7c59c8a86ecd3a9eaddc0511cfc7485ad6d9c50"}, + {file = "dataclass_wizard-0.23.0-py2.py3-none-any.whl", hash = "sha256:50207dec6d36494421366b49b7a9ba6a4d831e2650c0af25cb4c057103d4a97c"}, ] [package.extras] @@ -2164,13 +2164,13 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -2241,78 +2241,94 @@ typing_extensions = ">=4.0,<5.0" [[package]] name = "duckdb" -version = "1.1.0" +version = "1.1.1" description = "DuckDB in-process database" optional = false python-versions = ">=3.7.0" files = [ - {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5e4cbc408e6e41146dea89b9044dae7356e353db0c96b183e5583ee02bc6ae5d"}, - {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:6370ae27ec8167ccfbefb94f58ad9fdc7bac142399960549d6d367f233189868"}, - {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4e1c3414f7fd01f4810dc8b335deffc91933a159282d65fef11c1286bc0ded04"}, - {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6bc2a58689adf5520303c5f68b065b9f980bd31f1366c541b8c7490abaf55cd"}, - {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d02be208d2885ca085d4c852b911493b8cdac9d6eae893259da32bd72a437c25"}, - {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:655df442ceebfc6f3fd6c8766e04b60d44dddedfa90275d794f9fab2d3180879"}, - {file = "duckdb-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6e183729bb64be7798ccbfda6283ebf423c869268c25af2b56929e48f763be2f"}, - {file = "duckdb-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:61fb838da51e07ceb0222c4406b059b90e10efcc453c19a3650b73c0112138c4"}, - {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:7807e2f0d3344668e433f0dc1f54bfaddd410589611393e9a7ed56f8dec9514f"}, - {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:3da30b7b466f710d52caa1fdc3ef0bf4176ad7f115953cd9f8b0fbf0f723778f"}, - {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:b9b6a77ef0183f561b1fc2945fcc762a71570ffd33fea4e3a855d413ed596fe4"}, - {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16243e66a9fd0e64ee265f2634d137adc6593f54ddf3ef55cb8a29e1decf6e54"}, - {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42b910a149e00f40a1766dc74fa309d4255b912a5d2fdcc387287658048650f6"}, - {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47849d546dc4238c0f20e95fe53b621aa5b08684e68fff91fd84a7092be91a17"}, - {file = "duckdb-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:11ec967b67159361ceade34095796a8d19368ea5c30cad988f44896b082b0816"}, - {file = "duckdb-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:510b5885ed6c267b9c0e1e7c6138fdffc2dd6f934a5a95b76da85da127213338"}, - {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:657bc7ac64d5faf069a782ae73afac51ef30ae2e5d0e09ce6a09d03db84ab35e"}, - {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:89f3de8cba57d19b41cd3c47dd06d979bd2a2ffead115480e37afbe72b02896d"}, - {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:f6486323ab20656d22ffa8f3c6e109dde30d0b327b7c831f22ebcfe747f97fb0"}, - {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78a4510f82431ee3f14db689fe8727a4a9062c8f2fbb3bcfe3bfad3c1a198004"}, - {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64bf2a6e23840d662bd2ac09206a9bd4fa657418884d69e5c352d4456dc70b3c"}, - {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23fc9aa0af74e3803ed90c8d98280fd5bcac8c940592bf6288e8fd60fb051d00"}, - {file = "duckdb-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1f3aea31341ce400640dd522e4399b941f66df17e39884f446638fe958d6117c"}, - {file = "duckdb-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:3db4ab31c20de4edaef152930836b38e7662cd71370748fdf2c38ba9cf854dc4"}, - {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3b6b4fe1edfe35f64f403a9f0ab75258cee35abd964356893ee37424174b7e4"}, - {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad02f50d5a2020822d1638fc1a9bcf082056f11d2e15ccfc1c1ed4d0f85a3be"}, - {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb66e9e7391801928ea134dcab12d2e4c97f2ce0391c603a3e480bbb15830bc8"}, - {file = "duckdb-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:069fb7bca459e31edb32a61f0eea95d7a8a766bef7b8318072563abf8e939593"}, - {file = "duckdb-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e39f9b7b62e64e10d421ff04480290a70129c38067d1a4f600e9212b10542c5a"}, - {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:55ef98bcc7ba745752607f1b926e8d9b7ce32c42c423bbad10c44820aefe23a7"}, - {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:e2a08175e43b865c1e9611efd18cacd29ddd69093de442b1ebdf312071df7719"}, - {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:0e3644b1f034012d82b9baa12a7ea306fe71dc6623731b28c753c4a617ff9499"}, - {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:211a33c1ddb5cc609f75eb43772b0b03b45d2fa89bec107e4715267ca907806a"}, - {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e74b6f8a5145abbf7e6c1a2a61f0adbcd493c19b358f524ec9a3cebdf362abb"}, - {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:58f1633dd2c5af5088ae2d119418e200855d0699d84f2fae9d46d30f404bcead"}, - {file = "duckdb-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d18caea926b1e301c29b140418fca697aad728129e269b4f82c2795a184549e1"}, - {file = "duckdb-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:cd9fb1408942411ad360f8414bc3fbf0091c396ca903d947a10f2e31324d5cbd"}, - {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bd11bc899cebf5ff936d1276a2dfb7b7db08aba3bcc42924afeafc2163bddb43"}, - {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:53825a63193c582a78c152ea53de8d145744ddbeea18f452625a82ebc33eb14a"}, - {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:29dc18087de47563b3859a6b98bbed96e1c96ce5db829646dc3b16a916997e7d"}, - {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecb19319883564237a7a03a104dbe7f445e73519bb67108fcab3d19b6b91fe30"}, - {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aac2fcabe2d5072c252d0b3087365f431de812d8199705089fb073e4d039d19c"}, - {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d89eaaa5df8a57e7d2bc1f4c46493bb1fee319a00155f2015810ad2ace6570ae"}, - {file = "duckdb-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d86a6926313913cd2cc7e08816d3e7f72ba340adf2959279b1a80058be6526d9"}, - {file = "duckdb-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8333f3e85fa2a0f1c222b752c2bd42ea875235ff88492f7bcbb6867d0f644eb"}, - {file = "duckdb-1.1.0.tar.gz", hash = "sha256:b4d4c12b1f98732151bd31377753e0da1a20f6423016d2d097d2e31953ec7c23"}, + {file = "duckdb-1.1.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e310610b692d30aa7f1f40d7878b26978a5b191f23fa8fa082bd17092c67c2fd"}, + {file = "duckdb-1.1.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:7acc97c3cc995850a4fa59dfa6ce713d7ea187c9696632161aa09d898f001a2b"}, + {file = "duckdb-1.1.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:c0a09d78daea0de7ddf3d6d1113e80ceed8c15537e93f8efaad53024ffbde245"}, + {file = "duckdb-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50c3b1667b0c73cb076b1b1f8fa0fd88fcef5c2bbb2b9acdef79e2eae429c248"}, + {file = "duckdb-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1499a9b159d4675ea46786b7ebdbabd8287c62b6b116ccfd529112318d47184e"}, + {file = "duckdb-1.1.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:876deda2ce97f4a9005a9ac862f0ebee9e5956d51d589a24955802ca91726d49"}, + {file = "duckdb-1.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:40be901b38c709076f699b0c2f42a0c5663a496647eba350530e3a77f46a239b"}, + {file = "duckdb-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:5cb7642c5b21b8165b60029c274fc931c7c29cae3124b9a95ed73d050dd23584"}, + {file = "duckdb-1.1.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:959716b65cf1c94fc117ac9c9692eea0bd64ae53bc8ab6538d459087b474dbeb"}, + {file = "duckdb-1.1.1-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:6ff3c52ce0f8d25478155eb01de043ad0a25badbd10e684a2cd74363f1b86cde"}, + {file = "duckdb-1.1.1-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:430294cf11ce866d3b726cf4530462316e20b773fed3cf2de3cf63eb89650da6"}, + {file = "duckdb-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc9d48f772fafeea52568a0568cd11314cd79a10214069f3700dbcb31ebdf511"}, + {file = "duckdb-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:572095739024d9a5aa2dd8336c289af6a624c203004213e49b7e2469275e940f"}, + {file = "duckdb-1.1.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:660d9baf637b9a15e1ba74bbe02d3b4a20d82e8cbbd7d0712e0d59e3e9d6efea"}, + {file = "duckdb-1.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b91973605c8a30a38c4381a27895e7768cb3caa6700b2534ab76cc6b72cac390"}, + {file = "duckdb-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:f57c9e070cecf42d379145a75f325ec57fb1d410d6ff6592b5a28c2ff2b5792c"}, + {file = "duckdb-1.1.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:926a99b81c50b9a4a43ca26dcb781f934d35e773d22913548396601ab8d44c12"}, + {file = "duckdb-1.1.1-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:55a2632d27b5a965f1d9fc74b03383e80a3f8e3dc9596807dfb02c8db08cfcb7"}, + {file = "duckdb-1.1.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:8d8174fe47caf48d830dc477a45cedc8c970722df09dc1456bddc760ff6ccf68"}, + {file = "duckdb-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ad84023399002222fa8d5264a8dc2083053027910df728da92cabb07494a489"}, + {file = "duckdb-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c8adbc8b37444424c72043288f1521c860555a4f151ee4b744e6125f5d05729"}, + {file = "duckdb-1.1.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:550524c1b423eeb7ca0fdf1c2e6d29e723d7ec7cfab3050b9feb55a620ae927f"}, + {file = "duckdb-1.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4064243e4d3f445975b78773677de0ccbe924f9c7058a7c2cfedb24bba2ba939"}, + {file = "duckdb-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:4f64516dc62dd0fcbb9785c5bc7532a4fca3e6016bbcc92a2b235aa972c631f6"}, + {file = "duckdb-1.1.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4bf75a64c927470b6618496adcfbf0f316ef09d46a44cfe8e38b78e9ff40c8a0"}, + {file = "duckdb-1.1.1-cp313-cp313-macosx_12_0_universal2.whl", hash = "sha256:5c8cd6fd7107299b9a243836cd8163e4c08d6228f18cbee4ed9f535f53300096"}, + {file = "duckdb-1.1.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:fc81c02b4d73533a438a9bbae19499531d85b752233c905facc4df41bbde043c"}, + {file = "duckdb-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baff4014caf6553b624a296e4db2926602670bd9be6e0fc75f3e970b085631b0"}, + {file = "duckdb-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e21b75a9a60f10b5b5033138c317d929018c92f355fadae5949b310a9179e0a7"}, + {file = "duckdb-1.1.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8220f039c5ea06dc126232464ab9b77197f80ae53d4611b0a41f73c54f6f3931"}, + {file = "duckdb-1.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:07384414ceae585d4106a7dc154331ae42f45390ed675ec81e3d01f2252a6b01"}, + {file = "duckdb-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:82776b3999e71a962db0bdc3f0258407ef41453f63eb47c33da29b644f8eb530"}, + {file = "duckdb-1.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35d4323655be4053fb90d47e85222c93fd56aea0e8ab0ac44bd8f7249ba85697"}, + {file = "duckdb-1.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990d0799e0f543a4369413dc6caf7782cbbab49955c08c28ac56d5dab5ccef11"}, + {file = "duckdb-1.1.1-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ef3ba36b317abe000f502702eaaefdd8c3651a25aa0ad409f9487b286e2fb28"}, + {file = "duckdb-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c6e513a572967cd2bab0f20ce265f8eaf95ea7b554eecf1c233717c38569abc"}, + {file = "duckdb-1.1.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:567471cb964a0e54a7874c578e81af7b6ab474676ae6469ae1c33c2353f76fb1"}, + {file = "duckdb-1.1.1-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:a41d8eb4dc538d17660b78f2f4ecd0ba29666a396453bb71d6f4972bf2b3959e"}, + {file = "duckdb-1.1.1-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:31be0b9bc1909fb60abda7cd30615fe0224d1e451160d79e8e0313d6205417b0"}, + {file = "duckdb-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:541fb49da108e080d4f2984d2fdabaee36d65967a33642f8bce03373b29952f0"}, + {file = "duckdb-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1c54f836dac5eddbe369fa654811e979bb07688638a52d1c006172feb5b75a5"}, + {file = "duckdb-1.1.1-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:afb97970ee72e554b507c6f2e40b356bdbf8fc1f466e7c4d1797183eb66c0809"}, + {file = "duckdb-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:a2cdcb68247f02017a35a0b617ceb1d36a02a7c0588d7e2ed91c9a4e9f14c3f6"}, + {file = "duckdb-1.1.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:36d71969cb98d10dc2391d8755921258d197995cc8c69e6c82fc377c2f71940a"}, + {file = "duckdb-1.1.1-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:3693f464409379a21aff4e35b5f67eb6c96fc402649d9ffddbda4ee9ee9ba9b6"}, + {file = "duckdb-1.1.1-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:06ca7f4ca785cc86e9f9aa23d16b67b82dc454b14c396b2e0ff4c09698c7838e"}, + {file = "duckdb-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ed92f3229bf70897a742e7648f648aa8b0c81a7489072aec5515c5635f3303c"}, + {file = "duckdb-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80ebf52c03f81265b67720abc06a5c7770d08df82b30cabbe266012bd526229"}, + {file = "duckdb-1.1.1-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:402a42b992227ebb371a48681ce71b6d1c0661385454b269e6aa379f77a8a83a"}, + {file = "duckdb-1.1.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a182d3cbf2e352aaddf392887331bbac460c473cbd55c65d6b6121ef7b43f174"}, + {file = "duckdb-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:fafc7d1ec4401787597a5f983d4ef8a9b0638f31e1674a458c57383911166f27"}, + {file = "duckdb-1.1.1.tar.gz", hash = "sha256:74fb07c1334a73e0ead1b0a03646d349921dac655762d916c8e45194c8218d30"}, ] [[package]] name = "duckduckgo-search" -version = "6.2.12" +version = "6.2.13" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = false python-versions = ">=3.8" files = [ - {file = "duckduckgo_search-6.2.12-py3-none-any.whl", hash = "sha256:0d379c1f845b632a41553efb13d571788f19ad289229e641a27b5710d92097a6"}, - {file = "duckduckgo_search-6.2.12.tar.gz", hash = "sha256:04f9f1459763668d268344c7a32d943173d0e060dad53a5c2df4b4d3ca9a74cf"}, + {file = "duckduckgo_search-6.2.13-py3-none-any.whl", hash = "sha256:a6618fb2744fa1d081b1bf2e47ef8051de993276a15c20f4e879a150726472de"}, + {file = "duckduckgo_search-6.2.13.tar.gz", hash = "sha256:f89a9782f0f47d18c01a761c83453d0aef7a4335d1b6466fc47709652d5ca791"}, ] [package.dependencies] click = ">=8.1.7" -primp = ">=0.6.2" +primp = ">=0.6.3" [package.extras] dev = ["mypy (>=1.11.1)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff (>=0.6.1)"] lxml = ["lxml (>=5.2.2)"] +[[package]] +name = "durationpy" +version = "0.7" +description = "Module for converting between datetime.timedelta and Go's Duration strings." +optional = false +python-versions = "*" +files = [ + {file = "durationpy-0.7.tar.gz", hash = "sha256:8447c43df4f1a0b434e70c15a38d77f5c9bd17284bfc1ff1d430f233d5083732"}, +] + [[package]] name = "elastic-transport" version = "8.15.0" @@ -2356,18 +2372,15 @@ vectorstore-mmr = ["numpy (>=1)", "simsimd (>=3)"] [[package]] name = "emoji" -version = "2.12.1" +version = "2.13.2" description = "Emoji for Python" optional = false python-versions = ">=3.7" files = [ - {file = "emoji-2.12.1-py3-none-any.whl", hash = "sha256:a00d62173bdadc2510967a381810101624a2f0986145b8da0cffa42e29430235"}, - {file = "emoji-2.12.1.tar.gz", hash = "sha256:4aa0488817691aa58d83764b6c209f8a27c0b3ab3f89d1b8dceca1a62e4973eb"}, + {file = "emoji-2.13.2-py3-none-any.whl", hash = "sha256:ef6f2ee63b245e934c763b1a9a0637713955aa3d9e322432e036bb60559de4d6"}, + {file = "emoji-2.13.2.tar.gz", hash = "sha256:f95d10d96c5f21299ed2c4b32511611ba890b8c07f5f2bf5b04d5d3eee91fd19"}, ] -[package.dependencies] -typing-extensions = ">=4.7.0" - [package.extras] dev = ["coverage", "pytest (>=7.4.4)"] @@ -2432,13 +2445,13 @@ test = ["pytest (>=6)"] [[package]] name = "fastapi" -version = "0.114.2" +version = "0.115.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.114.2-py3-none-any.whl", hash = "sha256:44474a22913057b1acb973ab90f4b671ba5200482e7622816d79105dcece1ac5"}, - {file = "fastapi-0.114.2.tar.gz", hash = "sha256:0adb148b62edb09e8c6eeefa3ea934e8f276dabc038c5a82989ea6346050c3da"}, + {file = "fastapi-0.115.0-py3-none-any.whl", hash = "sha256:17ea427674467486e997206a5ab25760f6b09e069f099b96f5b55a32fb6f1631"}, + {file = "fastapi-0.115.0.tar.gz", hash = "sha256:f93b4ca3529a8ebc6fc3fcf710e5efa8de3df9b41570958abf1d97d843138004"}, ] [package.dependencies] @@ -2527,18 +2540,18 @@ sgmllib3k = "*" [[package]] name = "filelock" -version = "3.16.0" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.16.0-py3-none-any.whl", hash = "sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609"}, - {file = "filelock-3.16.0.tar.gz", hash = "sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.1.1)", "pytest (>=8.3.2)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.3)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] typing = ["typing-extensions (>=4.12.2)"] [[package]] @@ -2701,53 +2714,59 @@ files = [ [[package]] name = "fonttools" -version = "4.53.1" +version = "4.54.1" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.53.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397"}, - {file = "fonttools-4.53.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8bf06b94694251861ba7fdeea15c8ec0967f84c3d4143ae9daf42bbc7717fe3"}, - {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b96cd370a61f4d083c9c0053bf634279b094308d52fdc2dd9a22d8372fdd590d"}, - {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1c7c5aa18dd3b17995898b4a9b5929d69ef6ae2af5b96d585ff4005033d82f0"}, - {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e013aae589c1c12505da64a7d8d023e584987e51e62006e1bb30d72f26522c41"}, - {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9efd176f874cb6402e607e4cc9b4a9cd584d82fc34a4b0c811970b32ba62501f"}, - {file = "fonttools-4.53.1-cp310-cp310-win32.whl", hash = "sha256:c8696544c964500aa9439efb6761947393b70b17ef4e82d73277413f291260a4"}, - {file = "fonttools-4.53.1-cp310-cp310-win_amd64.whl", hash = "sha256:8959a59de5af6d2bec27489e98ef25a397cfa1774b375d5787509c06659b3671"}, - {file = "fonttools-4.53.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:da33440b1413bad53a8674393c5d29ce64d8c1a15ef8a77c642ffd900d07bfe1"}, - {file = "fonttools-4.53.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ff7e5e9bad94e3a70c5cd2fa27f20b9bb9385e10cddab567b85ce5d306ea923"}, - {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6e7170d675d12eac12ad1a981d90f118c06cf680b42a2d74c6c931e54b50719"}, - {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee32ea8765e859670c4447b0817514ca79054463b6b79784b08a8df3a4d78e3"}, - {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6e08f572625a1ee682115223eabebc4c6a2035a6917eac6f60350aba297ccadb"}, - {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b21952c092ffd827504de7e66b62aba26fdb5f9d1e435c52477e6486e9d128b2"}, - {file = "fonttools-4.53.1-cp311-cp311-win32.whl", hash = "sha256:9dfdae43b7996af46ff9da520998a32b105c7f098aeea06b2226b30e74fbba88"}, - {file = "fonttools-4.53.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4d0096cb1ac7a77b3b41cd78c9b6bc4a400550e21dc7a92f2b5ab53ed74eb02"}, - {file = "fonttools-4.53.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d92d3c2a1b39631a6131c2fa25b5406855f97969b068e7e08413325bc0afba58"}, - {file = "fonttools-4.53.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b3c8ebafbee8d9002bd8f1195d09ed2bd9ff134ddec37ee8f6a6375e6a4f0e8"}, - {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f029c095ad66c425b0ee85553d0dc326d45d7059dbc227330fc29b43e8ba60"}, - {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f5e6c3510b79ea27bb1ebfcc67048cde9ec67afa87c7dd7efa5c700491ac7f"}, - {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f677ce218976496a587ab17140da141557beb91d2a5c1a14212c994093f2eae2"}, - {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9e6ceba2a01b448e36754983d376064730690401da1dd104ddb543519470a15f"}, - {file = "fonttools-4.53.1-cp312-cp312-win32.whl", hash = "sha256:791b31ebbc05197d7aa096bbc7bd76d591f05905d2fd908bf103af4488e60670"}, - {file = "fonttools-4.53.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ed170b5e17da0264b9f6fae86073be3db15fa1bd74061c8331022bca6d09bab"}, - {file = "fonttools-4.53.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c818c058404eb2bba05e728d38049438afd649e3c409796723dfc17cd3f08749"}, - {file = "fonttools-4.53.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:651390c3b26b0c7d1f4407cad281ee7a5a85a31a110cbac5269de72a51551ba2"}, - {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e54f1bba2f655924c1138bbc7fa91abd61f45c68bd65ab5ed985942712864bbb"}, - {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9cd19cf4fe0595ebdd1d4915882b9440c3a6d30b008f3cc7587c1da7b95be5f"}, - {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2af40ae9cdcb204fc1d8f26b190aa16534fcd4f0df756268df674a270eab575d"}, - {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:35250099b0cfb32d799fb5d6c651220a642fe2e3c7d2560490e6f1d3f9ae9169"}, - {file = "fonttools-4.53.1-cp38-cp38-win32.whl", hash = "sha256:f08df60fbd8d289152079a65da4e66a447efc1d5d5a4d3f299cdd39e3b2e4a7d"}, - {file = "fonttools-4.53.1-cp38-cp38-win_amd64.whl", hash = "sha256:7b6b35e52ddc8fb0db562133894e6ef5b4e54e1283dff606fda3eed938c36fc8"}, - {file = "fonttools-4.53.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75a157d8d26c06e64ace9df037ee93a4938a4606a38cb7ffaf6635e60e253b7a"}, - {file = "fonttools-4.53.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4824c198f714ab5559c5be10fd1adf876712aa7989882a4ec887bf1ef3e00e31"}, - {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:becc5d7cb89c7b7afa8321b6bb3dbee0eec2b57855c90b3e9bf5fb816671fa7c"}, - {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ec3fb43befb54be490147b4a922b5314e16372a643004f182babee9f9c3407"}, - {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:73379d3ffdeecb376640cd8ed03e9d2d0e568c9d1a4e9b16504a834ebadc2dfb"}, - {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02569e9a810f9d11f4ae82c391ebc6fb5730d95a0657d24d754ed7763fb2d122"}, - {file = "fonttools-4.53.1-cp39-cp39-win32.whl", hash = "sha256:aae7bd54187e8bf7fd69f8ab87b2885253d3575163ad4d669a262fe97f0136cb"}, - {file = "fonttools-4.53.1-cp39-cp39-win_amd64.whl", hash = "sha256:e5b708073ea3d684235648786f5f6153a48dc8762cdfe5563c57e80787c29fbb"}, - {file = "fonttools-4.53.1-py3-none-any.whl", hash = "sha256:f1f8758a2ad110bd6432203a344269f445a2907dc24ef6bccfd0ac4e14e0d71d"}, - {file = "fonttools-4.53.1.tar.gz", hash = "sha256:e128778a8e9bc11159ce5447f76766cefbd876f44bd79aff030287254e4752c4"}, + {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"}, + {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"}, + {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"}, + {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"}, + {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"}, + {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"}, + {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"}, + {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"}, + {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"}, + {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"}, + {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"}, + {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"}, + {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"}, + {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"}, + {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"}, + {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"}, + {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"}, + {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"}, + {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"}, + {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"}, + {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"}, + {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"}, + {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"}, + {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"}, + {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"}, + {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"}, + {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"}, + {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"}, + {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"}, + {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"}, + {file = "fonttools-4.54.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ed2f80ca07025551636c555dec2b755dd005e2ea8fbeb99fc5cdff319b70b23b"}, + {file = "fonttools-4.54.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9dc080e5a1c3b2656caff2ac2633d009b3a9ff7b5e93d0452f40cd76d3da3b3c"}, + {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d152d1be65652fc65e695e5619e0aa0982295a95a9b29b52b85775243c06556"}, + {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8583e563df41fdecef31b793b4dd3af8a9caa03397be648945ad32717a92885b"}, + {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d1d353ef198c422515a3e974a1e8d5b304cd54a4c2eebcae708e37cd9eeffb1"}, + {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fda582236fee135d4daeca056c8c88ec5f6f6d88a004a79b84a02547c8f57386"}, + {file = "fonttools-4.54.1-cp38-cp38-win32.whl", hash = "sha256:e7d82b9e56716ed32574ee106cabca80992e6bbdcf25a88d97d21f73a0aae664"}, + {file = "fonttools-4.54.1-cp38-cp38-win_amd64.whl", hash = "sha256:ada215fd079e23e060157aab12eba0d66704316547f334eee9ff26f8c0d7b8ab"}, + {file = "fonttools-4.54.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5b8a096e649768c2f4233f947cf9737f8dbf8728b90e2771e2497c6e3d21d13"}, + {file = "fonttools-4.54.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e10d2e0a12e18f4e2dd031e1bf7c3d7017be5c8dbe524d07706179f355c5dac"}, + {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31c32d7d4b0958600eac75eaf524b7b7cb68d3a8c196635252b7a2c30d80e986"}, + {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c39287f5c8f4a0c5a55daf9eaf9ccd223ea59eed3f6d467133cc727d7b943a55"}, + {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a7a310c6e0471602fe3bf8efaf193d396ea561486aeaa7adc1f132e02d30c4b9"}, + {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d3b659d1029946f4ff9b6183984578041b520ce0f8fb7078bb37ec7445806b33"}, + {file = "fonttools-4.54.1-cp39-cp39-win32.whl", hash = "sha256:e96bc94c8cda58f577277d4a71f51c8e2129b8b36fd05adece6320dd3d57de8a"}, + {file = "fonttools-4.54.1-cp39-cp39-win_amd64.whl", hash = "sha256:e8a4b261c1ef91e7188a30571be6ad98d1c6d9fa2427244c545e2fa0a2494dd7"}, + {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"}, + {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"}, ] [package.extras] @@ -3211,30 +3230,30 @@ xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] [[package]] name = "google-cloud-bigquery" -version = "3.25.0" +version = "3.26.0" description = "Google BigQuery API client library" optional = false python-versions = ">=3.7" files = [ - {file = "google-cloud-bigquery-3.25.0.tar.gz", hash = "sha256:5b2aff3205a854481117436836ae1403f11f2594e6810a98886afd57eda28509"}, - {file = "google_cloud_bigquery-3.25.0-py2.py3-none-any.whl", hash = "sha256:7f0c371bc74d2a7fb74dacbc00ac0f90c8c2bec2289b51dd6685a275873b1ce9"}, + {file = "google_cloud_bigquery-3.26.0-py2.py3-none-any.whl", hash = "sha256:e0e9ad28afa67a18696e624cbccab284bf2c0a3f6eeb9eeb0426c69b943793a8"}, + {file = "google_cloud_bigquery-3.26.0.tar.gz", hash = "sha256:edbdc788beea659e04c0af7fe4dcd6d9155344b98951a0d5055bd2f15da4ba23"}, ] [package.dependencies] -google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-api-core = {version = ">=2.11.1,<3.0.0dev", extras = ["grpc"]} google-auth = ">=2.14.1,<3.0.0dev" -google-cloud-core = ">=1.6.0,<3.0.0dev" -google-resumable-media = ">=0.6.0,<3.0dev" +google-cloud-core = ">=2.4.1,<3.0.0dev" +google-resumable-media = ">=2.0.0,<3.0dev" packaging = ">=20.0.0" -python-dateutil = ">=2.7.2,<3.0dev" +python-dateutil = ">=2.7.3,<3.0dev" requests = ">=2.21.0,<3.0.0dev" [package.extras] -all = ["Shapely (>=1.8.4,<3.0.0dev)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] -bigquery-v2 = ["proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)"] +all = ["Shapely (>=1.8.4,<3.0.0dev)", "bigquery-magics (>=0.1.0)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.22.3,<2.0.0dev)", "protobuf (>=3.20.2,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] +bigquery-v2 = ["proto-plus (>=1.22.3,<2.0.0dev)", "protobuf (>=3.20.2,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev)"] bqstorage = ["google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"] geopandas = ["Shapely (>=1.8.4,<3.0.0dev)", "geopandas (>=0.9.0,<1.0dev)"] -ipython = ["ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)"] +ipython = ["bigquery-magics (>=0.1.0)"] ipywidgets = ["ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)"] opentelemetry = ["opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)"] pandas = ["db-dtypes (>=0.3.0,<2.0.0dev)", "importlib-metadata (>=1.0.0)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)"] @@ -3413,77 +3432,84 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "greenlet" -version = "3.1.0" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a814dc3100e8a046ff48faeaa909e80cdb358411a3d6dd5293158425c684eda8"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a771dc64fa44ebe58d65768d869fcfb9060169d203446c1d446e844b62bdfdca"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e49a65d25d7350cca2da15aac31b6f67a43d867448babf997fe83c7505f57bc"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cd8518eade968bc52262d8c46727cfc0826ff4d552cf0430b8d65aaf50bb91d"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76dc19e660baea5c38e949455c1181bc018893f25372d10ffe24b3ed7341fb25"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c0a5b1c22c82831f56f2f7ad9bbe4948879762fe0d59833a4a71f16e5fa0f682"}, - {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2651dfb006f391bcb240635079a68a261b227a10a08af6349cba834a2141efa1"}, - {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3e7e6ef1737a819819b1163116ad4b48d06cfdd40352d813bb14436024fcda99"}, - {file = "greenlet-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54"}, - {file = "greenlet-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9730929375021ec90f6447bff4f7f5508faef1c02f399a1953870cdb78e0c345"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:713d450cf8e61854de9420fb7eea8ad228df4e27e7d4ed465de98c955d2b3fa6"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c3446937be153718250fe421da548f973124189f18fe4575a0510b5c928f0cc"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ddc7bcedeb47187be74208bc652d63d6b20cb24f4e596bd356092d8000da6d6"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44151d7b81b9391ed759a2f2865bbe623ef00d648fed59363be2bbbd5154656f"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cea1cca3be76c9483282dc7760ea1cc08a6ecec1f0b6ca0a94ea0d17432da19"}, - {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:619935a44f414274a2c08c9e74611965650b730eb4efe4b2270f91df5e4adf9a"}, - {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:221169d31cada333a0c7fd087b957c8f431c1dba202c3a58cf5a3583ed973e9b"}, - {file = "greenlet-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:01059afb9b178606b4b6e92c3e710ea1635597c3537e44da69f4531e111dd5e9"}, - {file = "greenlet-3.1.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:24fc216ec7c8be9becba8b64a98a78f9cd057fd2dc75ae952ca94ed8a893bf27"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d07c28b85b350564bdff9f51c1c5007dfb2f389385d1bc23288de51134ca303"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:243a223c96a4246f8a30ea470c440fe9db1f5e444941ee3c3cd79df119b8eebf"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26811df4dc81271033a7836bc20d12cd30938e6bd2e9437f56fa03da81b0f8fc"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d86401550b09a55410f32ceb5fe7efcd998bd2dad9e82521713cb148a4a15f"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26d9c1c4f1748ccac0bae1dbb465fb1a795a75aba8af8ca871503019f4285e2a"}, - {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:cd468ec62257bb4544989402b19d795d2305eccb06cde5da0eb739b63dc04665"}, - {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a53dfe8f82b715319e9953330fa5c8708b610d48b5c59f1316337302af5c0811"}, - {file = "greenlet-3.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:28fe80a3eb673b2d5cc3b12eea468a5e5f4603c26aa34d88bf61bba82ceb2f9b"}, - {file = "greenlet-3.1.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:76b3e3976d2a452cba7aa9e453498ac72240d43030fdc6d538a72b87eaff52fd"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655b21ffd37a96b1e78cc48bf254f5ea4b5b85efaf9e9e2a526b3c9309d660ca"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f4c2027689093775fd58ca2388d58789009116844432d920e9147f91acbe64"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76e5064fd8e94c3f74d9fd69b02d99e3cdb8fc286ed49a1f10b256e59d0d3a0b"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4bf607f690f7987ab3291406e012cd8591a4f77aa54f29b890f9c331e84989"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:037d9ac99540ace9424cb9ea89f0accfaff4316f149520b4ae293eebc5bded17"}, - {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:90b5bbf05fe3d3ef697103850c2ce3374558f6fe40fd57c9fac1bf14903f50a5"}, - {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:726377bd60081172685c0ff46afbc600d064f01053190e4450857483c4d44484"}, - {file = "greenlet-3.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:d46d5069e2eeda111d6f71970e341f4bd9aeeee92074e649ae263b834286ecc0"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81eeec4403a7d7684b5812a8aaa626fa23b7d0848edb3a28d2eb3220daddcbd0"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a3dae7492d16e85ea6045fd11cb8e782b63eac8c8d520c3a92c02ac4573b0a6"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b5ea3664eed571779403858d7cd0a9b0ebf50d57d2cdeafc7748e09ef8cd81a"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22f4e26400f7f48faef2d69c20dc055a1f3043d330923f9abe08ea0aecc44df"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13ff8c8e54a10472ce3b2a2da007f915175192f18e6495bad50486e87c7f6637"}, - {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9671e7282d8c6fcabc32c0fb8d7c0ea8894ae85cee89c9aadc2d7129e1a9954"}, - {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:184258372ae9e1e9bddce6f187967f2e08ecd16906557c4320e3ba88a93438c3"}, - {file = "greenlet-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:a0409bc18a9f85321399c29baf93545152d74a49d92f2f55302f122007cfda00"}, - {file = "greenlet-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9eb4a1d7399b9f3c7ac68ae6baa6be5f9195d1d08c9ddc45ad559aa6b556bce6"}, - {file = "greenlet-3.1.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a8870983af660798dc1b529e1fd6f1cefd94e45135a32e58bd70edd694540f33"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfcfb73aed40f550a57ea904629bdaf2e562c68fa1164fa4588e752af6efdc3f"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9482c2ed414781c0af0b35d9d575226da6b728bd1a720668fa05837184965b7"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d58ec349e0c2c0bc6669bf2cd4982d2f93bf067860d23a0ea1fe677b0f0b1e09"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd65695a8df1233309b701dec2539cc4b11e97d4fcc0f4185b4a12ce54db0491"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:665b21e95bc0fce5cab03b2e1d90ba9c66c510f1bb5fdc864f3a377d0f553f6b"}, - {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3c59a06c2c28a81a026ff11fbf012081ea34fb9b7052f2ed0366e14896f0a1d"}, - {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415b9494ff6240b09af06b91a375731febe0090218e2898d2b85f9b92abcda0"}, - {file = "greenlet-3.1.0-cp38-cp38-win32.whl", hash = "sha256:1544b8dd090b494c55e60c4ff46e238be44fdc472d2589e943c241e0169bcea2"}, - {file = "greenlet-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:7f346d24d74c00b6730440f5eb8ec3fe5774ca8d1c9574e8e57c8671bb51b910"}, - {file = "greenlet-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:db1b3ccb93488328c74e97ff888604a8b95ae4f35f4f56677ca57a4fc3a4220b"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cd313629ded43bb3b98737bba2f3e2c2c8679b55ea29ed73daea6b755fe8e7"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3967dcc1cd2ea61b08b0b276659242cbce5caca39e7cbc02408222fb9e6ff39"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d45b75b0f3fd8d99f62eb7908cfa6d727b7ed190737dec7fe46d993da550b81a"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2d004db911ed7b6218ec5c5bfe4cf70ae8aa2223dffbb5b3c69e342bb253cb28"}, - {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9505a0c8579899057cbefd4ec34d865ab99852baf1ff33a9481eb3924e2da0b"}, - {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fd6e94593f6f9714dbad1aaba734b5ec04593374fa6638df61592055868f8b8"}, - {file = "greenlet-3.1.0-cp39-cp39-win32.whl", hash = "sha256:d0dd943282231480aad5f50f89bdf26690c995e8ff555f26d8a5b9887b559bcc"}, - {file = "greenlet-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:ac0adfdb3a21dc2a24ed728b61e72440d297d0fd3a577389df566651fcd08f97"}, - {file = "greenlet-3.1.0.tar.gz", hash = "sha256:b395121e9bbe8d02a750886f108d540abe66075e61e22f7353d9acb0b81be0f0"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -3508,61 +3534,70 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4 [[package]] name = "grpcio" -version = "1.66.1" +version = "1.66.2" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"}, - {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"}, - {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"}, - {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"}, - {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"}, - {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"}, - {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"}, - {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"}, - {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"}, - {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"}, - {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"}, - {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"}, - {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"}, - {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"}, - {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"}, - {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"}, - {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"}, - {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"}, - {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"}, - {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"}, - {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.66.1)"] + {file = "grpcio-1.66.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:fe96281713168a3270878255983d2cb1a97e034325c8c2c25169a69289d3ecfa"}, + {file = "grpcio-1.66.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:73fc8f8b9b5c4a03e802b3cd0c18b2b06b410d3c1dcbef989fdeb943bd44aff7"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:03b0b307ba26fae695e067b94cbb014e27390f8bc5ac7a3a39b7723fed085604"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d69ce1f324dc2d71e40c9261d3fdbe7d4c9d60f332069ff9b2a4d8a257c7b2b"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05bc2ceadc2529ab0b227b1310d249d95d9001cd106aa4d31e8871ad3c428d73"}, + {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ac475e8da31484efa25abb774674d837b343afb78bb3bcdef10f81a93e3d6bf"}, + {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0be4e0490c28da5377283861bed2941d1d20ec017ca397a5df4394d1c31a9b50"}, + {file = "grpcio-1.66.2-cp310-cp310-win32.whl", hash = "sha256:4e504572433f4e72b12394977679161d495c4c9581ba34a88d843eaf0f2fbd39"}, + {file = "grpcio-1.66.2-cp310-cp310-win_amd64.whl", hash = "sha256:2018b053aa15782db2541ca01a7edb56a0bf18c77efed975392583725974b249"}, + {file = "grpcio-1.66.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:2335c58560a9e92ac58ff2bc5649952f9b37d0735608242973c7a8b94a6437d8"}, + {file = "grpcio-1.66.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45a3d462826f4868b442a6b8fdbe8b87b45eb4f5b5308168c156b21eca43f61c"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a9539f01cb04950fd4b5ab458e64a15f84c2acc273670072abe49a3f29bbad54"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce89f5876662f146d4c1f695dda29d4433a5d01c8681fbd2539afff535da14d4"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25a14af966438cddf498b2e338f88d1c9706f3493b1d73b93f695c99c5f0e2a"}, + {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6001e575b8bbd89eee11960bb640b6da6ae110cf08113a075f1e2051cc596cae"}, + {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4ea1d062c9230278793820146c95d038dc0f468cbdd172eec3363e42ff1c7d01"}, + {file = "grpcio-1.66.2-cp311-cp311-win32.whl", hash = "sha256:38b68498ff579a3b1ee8f93a05eb48dc2595795f2f62716e797dc24774c1aaa8"}, + {file = "grpcio-1.66.2-cp311-cp311-win_amd64.whl", hash = "sha256:6851de821249340bdb100df5eacfecfc4e6075fa85c6df7ee0eb213170ec8e5d"}, + {file = "grpcio-1.66.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:802d84fd3d50614170649853d121baaaa305de7b65b3e01759247e768d691ddf"}, + {file = "grpcio-1.66.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80fd702ba7e432994df208f27514280b4b5c6843e12a48759c9255679ad38db8"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:12fda97ffae55e6526825daf25ad0fa37483685952b5d0f910d6405c87e3adb6"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:950da58d7d80abd0ea68757769c9db0a95b31163e53e5bb60438d263f4bed7b7"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e636ce23273683b00410f1971d209bf3689238cf5538d960adc3cdfe80dd0dbd"}, + {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a917d26e0fe980b0ac7bfcc1a3c4ad6a9a4612c911d33efb55ed7833c749b0ee"}, + {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49f0ca7ae850f59f828a723a9064cadbed90f1ece179d375966546499b8a2c9c"}, + {file = "grpcio-1.66.2-cp312-cp312-win32.whl", hash = "sha256:31fd163105464797a72d901a06472860845ac157389e10f12631025b3e4d0453"}, + {file = "grpcio-1.66.2-cp312-cp312-win_amd64.whl", hash = "sha256:ff1f7882e56c40b0d33c4922c15dfa30612f05fb785074a012f7cda74d1c3679"}, + {file = "grpcio-1.66.2-cp313-cp313-linux_armv7l.whl", hash = "sha256:3b00efc473b20d8bf83e0e1ae661b98951ca56111feb9b9611df8efc4fe5d55d"}, + {file = "grpcio-1.66.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1caa38fb22a8578ab8393da99d4b8641e3a80abc8fd52646f1ecc92bcb8dee34"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c408f5ef75cfffa113cacd8b0c0e3611cbfd47701ca3cdc090594109b9fcbaed"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c806852deaedee9ce8280fe98955c9103f62912a5b2d5ee7e3eaa284a6d8d8e7"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f145cc21836c332c67baa6fc81099d1d27e266401565bf481948010d6ea32d46"}, + {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:73e3b425c1e155730273f73e419de3074aa5c5e936771ee0e4af0814631fb30a"}, + {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9c509a4f78114cbc5f0740eb3d7a74985fd2eff022971bc9bc31f8bc93e66a3b"}, + {file = "grpcio-1.66.2-cp313-cp313-win32.whl", hash = "sha256:20657d6b8cfed7db5e11b62ff7dfe2e12064ea78e93f1434d61888834bc86d75"}, + {file = "grpcio-1.66.2-cp313-cp313-win_amd64.whl", hash = "sha256:fb70487c95786e345af5e854ffec8cb8cc781bcc5df7930c4fbb7feaa72e1cdf"}, + {file = "grpcio-1.66.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:a18e20d8321c6400185b4263e27982488cb5cdd62da69147087a76a24ef4e7e3"}, + {file = "grpcio-1.66.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:02697eb4a5cbe5a9639f57323b4c37bcb3ab2d48cec5da3dc2f13334d72790dd"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:99a641995a6bc4287a6315989ee591ff58507aa1cbe4c2e70d88411c4dcc0839"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ed71e81782966ffead60268bbda31ea3f725ebf8aa73634d5dda44f2cf3fb9c"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbd27c24a4cc5e195a7f56cfd9312e366d5d61b86e36d46bbe538457ea6eb8dd"}, + {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a9724a156c8ec6a379869b23ba3323b7ea3600851c91489b871e375f710bc8"}, + {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d8d4732cc5052e92cea2f78b233c2e2a52998ac40cd651f40e398893ad0d06ec"}, + {file = "grpcio-1.66.2-cp38-cp38-win32.whl", hash = "sha256:7b2c86457145ce14c38e5bf6bdc19ef88e66c5fee2c3d83285c5aef026ba93b3"}, + {file = "grpcio-1.66.2-cp38-cp38-win_amd64.whl", hash = "sha256:e88264caad6d8d00e7913996030bac8ad5f26b7411495848cc218bd3a9040b6c"}, + {file = "grpcio-1.66.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:c400ba5675b67025c8a9f48aa846f12a39cf0c44df5cd060e23fda5b30e9359d"}, + {file = "grpcio-1.66.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:66a0cd8ba6512b401d7ed46bb03f4ee455839957f28b8d61e7708056a806ba6a"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:06de8ec0bd71be123eec15b0e0d457474931c2c407869b6c349bd9bed4adbac3"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb57870449dfcfac428afbb5a877829fcb0d6db9d9baa1148705739e9083880e"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b672abf90a964bfde2d0ecbce30f2329a47498ba75ce6f4da35a2f4532b7acbc"}, + {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ad2efdbe90c73b0434cbe64ed372e12414ad03c06262279b104a029d1889d13e"}, + {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c3a99c519f4638e700e9e3f83952e27e2ea10873eecd7935823dab0c1c9250e"}, + {file = "grpcio-1.66.2-cp39-cp39-win32.whl", hash = "sha256:78fa51ebc2d9242c0fc5db0feecc57a9943303b46664ad89921f5079e2e4ada7"}, + {file = "grpcio-1.66.2-cp39-cp39-win_amd64.whl", hash = "sha256:728bdf36a186e7f51da73be7f8d09457a03061be848718d0edf000e709418987"}, + {file = "grpcio-1.66.2.tar.gz", hash = "sha256:563588c587b75c34b928bc428548e5b00ea38c46972181a4d8b75ba7e3f24231"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.66.2)"] [[package]] name = "grpcio-status" @@ -3993,13 +4028,13 @@ files = [ [[package]] name = "idna" -version = "3.9" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.9-py3-none-any.whl", hash = "sha256:69297d5da0cc9281c77efffb4e730254dd45943f45bbfb461de5991713989b1e"}, - {file = "idna-3.9.tar.gz", hash = "sha256:e5c5dafde284f26e9e0f28f6ea2d6400abd5ca099864a67f576f3981c6476124"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] [package.extras] @@ -4341,17 +4376,18 @@ files = [ [[package]] name = "kombu" -version = "5.4.1" +version = "5.4.2" description = "Messaging library for Python." optional = false python-versions = ">=3.8" files = [ - {file = "kombu-5.4.1-py3-none-any.whl", hash = "sha256:621d365f234e4c089596f3a2510f1ade07026efc28caca426161d8f458786cab"}, - {file = "kombu-5.4.1.tar.gz", hash = "sha256:1c05178826dab811f8cab5b0a154d42a7a33d8bcdde9fa3d7b4582e43c3c03db"}, + {file = "kombu-5.4.2-py3-none-any.whl", hash = "sha256:14212f5ccf022fc0a70453bb025a1dcc32782a588c49ea866884047d66e14763"}, + {file = "kombu-5.4.2.tar.gz", hash = "sha256:eef572dd2fd9fc614b37580e3caeafdd5af46c1eff31e7fba89138cdb406f2cf"}, ] [package.dependencies] amqp = ">=5.1.1,<6.0.0" +tzdata = {version = "*", markers = "python_version >= \"3.9\""} vine = "5.1.0" [package.extras] @@ -4373,17 +4409,18 @@ zookeeper = ["kazoo (>=2.8.0)"] [[package]] name = "kubernetes" -version = "30.1.0" +version = "31.0.0" description = "Kubernetes python client" optional = false python-versions = ">=3.6" files = [ - {file = "kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d"}, - {file = "kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc"}, + {file = "kubernetes-31.0.0-py2.py3-none-any.whl", hash = "sha256:bf141e2d380c8520eada8b351f4e319ffee9636328c137aa432bc486ca1200e1"}, + {file = "kubernetes-31.0.0.tar.gz", hash = "sha256:28945de906c8c259c1ebe62703b56a03b714049372196f854105afe4e6d014c0"}, ] [package.dependencies] certifi = ">=14.05.14" +durationpy = ">=0.7" google-auth = ">=1.0.1" oauthlib = ">=3.2.2" python-dateutil = ">=2.5.3" @@ -4413,13 +4450,13 @@ six = "*" [[package]] name = "langfuse" -version = "2.48.1" +version = "2.51.2" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langfuse-2.48.1-py3-none-any.whl", hash = "sha256:8661070b6d94ba1d7da92c054f3110b6ecf4489d6e8204a4080f934f3f49ebf2"}, - {file = "langfuse-2.48.1.tar.gz", hash = "sha256:b8117d90babec6be1bc3303b42e0b71848531eae44118e6e0123d03e7961d0fc"}, + {file = "langfuse-2.51.2-py3-none-any.whl", hash = "sha256:7aab94a9452cda4587a2cd4917e455da1afd7f8a2696688742130e2f2d23ca59"}, + {file = "langfuse-2.51.2.tar.gz", hash = "sha256:0982b108ab4c02947f682e442b0796b7a73825d31eeace1771575f6454b8f79a"}, ] [package.dependencies] @@ -4438,13 +4475,13 @@ openai = ["openai (>=0.27.8)"] [[package]] name = "langsmith" -version = "0.1.120" +version = "0.1.129" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.120-py3-none-any.whl", hash = "sha256:54d2785e301646c0988e0a69ebe4d976488c87b41928b358cb153b6ddd8db62b"}, - {file = "langsmith-0.1.120.tar.gz", hash = "sha256:25499ca187b41bd89d784b272b97a8d76f60e0e21bdf20336e8a2aa6a9b23ac9"}, + {file = "langsmith-0.1.129-py3-none-any.whl", hash = "sha256:31393fbbb17d6be5b99b9b22d530450094fab23c6c37281a6a6efb2143d05347"}, + {file = "langsmith-0.1.129.tar.gz", hash = "sha256:6c3ba66471bef41b9f87da247cc0b493268b3f54656f73648a256a205261b6a0"}, ] [package.dependencies] @@ -4948,95 +4985,116 @@ tqdm = "*" [[package]] name = "mmh3" -version = "4.1.0" +version = "5.0.1" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "mmh3-4.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be5ac76a8b0cd8095784e51e4c1c9c318c19edcd1709a06eb14979c8d850c31a"}, - {file = "mmh3-4.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98a49121afdfab67cd80e912b36404139d7deceb6773a83620137aaa0da5714c"}, - {file = "mmh3-4.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5259ac0535874366e7d1a5423ef746e0d36a9e3c14509ce6511614bdc5a7ef5b"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5950827ca0453a2be357696da509ab39646044e3fa15cad364eb65d78797437"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1dd0f652ae99585b9dd26de458e5f08571522f0402155809fd1dc8852a613a39"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99d25548070942fab1e4a6f04d1626d67e66d0b81ed6571ecfca511f3edf07e6"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53db8d9bad3cb66c8f35cbc894f336273f63489ce4ac416634932e3cbe79eb5b"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75da0f615eb55295a437264cc0b736753f830b09d102aa4c2a7d719bc445ec05"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b926b07fd678ea84b3a2afc1fa22ce50aeb627839c44382f3d0291e945621e1a"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c5b053334f9b0af8559d6da9dc72cef0a65b325ebb3e630c680012323c950bb6"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:5bf33dc43cd6de2cb86e0aa73a1cc6530f557854bbbe5d59f41ef6de2e353d7b"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fa7eacd2b830727ba3dd65a365bed8a5c992ecd0c8348cf39a05cc77d22f4970"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:42dfd6742b9e3eec599f85270617debfa0bbb913c545bb980c8a4fa7b2d047da"}, - {file = "mmh3-4.1.0-cp310-cp310-win32.whl", hash = "sha256:2974ad343f0d39dcc88e93ee6afa96cedc35a9883bc067febd7ff736e207fa47"}, - {file = "mmh3-4.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:74699a8984ded645c1a24d6078351a056f5a5f1fe5838870412a68ac5e28d865"}, - {file = "mmh3-4.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:f0dc874cedc23d46fc488a987faa6ad08ffa79e44fb08e3cd4d4cf2877c00a00"}, - {file = "mmh3-4.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3280a463855b0eae64b681cd5b9ddd9464b73f81151e87bb7c91a811d25619e6"}, - {file = "mmh3-4.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:97ac57c6c3301769e757d444fa7c973ceb002cb66534b39cbab5e38de61cd896"}, - {file = "mmh3-4.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b6502cdb4dbd880244818ab363c8770a48cdccecf6d729ade0241b736b5ec0"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ba2da04671a9621580ddabf72f06f0e72c1c9c3b7b608849b58b11080d8f14"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a5fef4c4ecc782e6e43fbeab09cff1bac82c998a1773d3a5ee6a3605cde343e"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5135358a7e00991f73b88cdc8eda5203bf9de22120d10a834c5761dbeb07dd13"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cff9ae76a54f7c6fe0167c9c4028c12c1f6de52d68a31d11b6790bb2ae685560"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f02576a4d106d7830ca90278868bf0983554dd69183b7bbe09f2fcd51cf54f"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:073d57425a23721730d3ff5485e2da489dd3c90b04e86243dd7211f889898106"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:71e32ddec7f573a1a0feb8d2cf2af474c50ec21e7a8263026e8d3b4b629805db"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7cbb20b29d57e76a58b40fd8b13a9130db495a12d678d651b459bf61c0714cea"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a42ad267e131d7847076bb7e31050f6c4378cd38e8f1bf7a0edd32f30224d5c9"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a013979fc9390abadc445ea2527426a0e7a4495c19b74589204f9b71bcaafeb"}, - {file = "mmh3-4.1.0-cp311-cp311-win32.whl", hash = "sha256:1d3b1cdad7c71b7b88966301789a478af142bddcb3a2bee563f7a7d40519a00f"}, - {file = "mmh3-4.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0dc6dc32eb03727467da8e17deffe004fbb65e8b5ee2b502d36250d7a3f4e2ec"}, - {file = "mmh3-4.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9ae3a5c1b32dda121c7dc26f9597ef7b01b4c56a98319a7fe86c35b8bc459ae6"}, - {file = "mmh3-4.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0033d60c7939168ef65ddc396611077a7268bde024f2c23bdc283a19123f9e9c"}, - {file = "mmh3-4.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d6af3e2287644b2b08b5924ed3a88c97b87b44ad08e79ca9f93d3470a54a41c5"}, - {file = "mmh3-4.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d82eb4defa245e02bb0b0dc4f1e7ee284f8d212633389c91f7fba99ba993f0a2"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba245e94b8d54765e14c2d7b6214e832557e7856d5183bc522e17884cab2f45d"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb04e2feeabaad6231e89cd43b3d01a4403579aa792c9ab6fdeef45cc58d4ec0"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e3b1a27def545ce11e36158ba5d5390cdbc300cfe456a942cc89d649cf7e3b2"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce0ab79ff736d7044e5e9b3bfe73958a55f79a4ae672e6213e92492ad5e734d5"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b02268be6e0a8eeb8a924d7db85f28e47344f35c438c1e149878bb1c47b1cd3"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:deb887f5fcdaf57cf646b1e062d56b06ef2f23421c80885fce18b37143cba828"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99dd564e9e2b512eb117bd0cbf0f79a50c45d961c2a02402787d581cec5448d5"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:08373082dfaa38fe97aa78753d1efd21a1969e51079056ff552e687764eafdfe"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:54b9c6a2ea571b714e4fe28d3e4e2db37abfd03c787a58074ea21ee9a8fd1740"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a7b1edf24c69e3513f879722b97ca85e52f9032f24a52284746877f6a7304086"}, - {file = "mmh3-4.1.0-cp312-cp312-win32.whl", hash = "sha256:411da64b951f635e1e2284b71d81a5a83580cea24994b328f8910d40bed67276"}, - {file = "mmh3-4.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bebc3ecb6ba18292e3d40c8712482b4477abd6981c2ebf0e60869bd90f8ac3a9"}, - {file = "mmh3-4.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:168473dd608ade6a8d2ba069600b35199a9af837d96177d3088ca91f2b3798e3"}, - {file = "mmh3-4.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:372f4b7e1dcde175507640679a2a8790185bb71f3640fc28a4690f73da986a3b"}, - {file = "mmh3-4.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:438584b97f6fe13e944faf590c90fc127682b57ae969f73334040d9fa1c7ffa5"}, - {file = "mmh3-4.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6e27931b232fc676675fac8641c6ec6b596daa64d82170e8597f5a5b8bdcd3b6"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:571a92bad859d7b0330e47cfd1850b76c39b615a8d8e7aa5853c1f971fd0c4b1"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a69d6afe3190fa08f9e3a58e5145549f71f1f3fff27bd0800313426929c7068"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afb127be0be946b7630220908dbea0cee0d9d3c583fa9114a07156f98566dc28"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:940d86522f36348ef1a494cbf7248ab3f4a1638b84b59e6c9e90408bd11ad729"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3dcccc4935686619a8e3d1f7b6e97e3bd89a4a796247930ee97d35ea1a39341"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01bb9b90d61854dfc2407c5e5192bfb47222d74f29d140cb2dd2a69f2353f7cc"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:bcb1b8b951a2c0b0fb8a5426c62a22557e2ffc52539e0a7cc46eb667b5d606a9"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6477a05d5e5ab3168e82e8b106e316210ac954134f46ec529356607900aea82a"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:da5892287e5bea6977364b15712a2573c16d134bc5fdcdd4cf460006cf849278"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:99180d7fd2327a6fffbaff270f760576839dc6ee66d045fa3a450f3490fda7f5"}, - {file = "mmh3-4.1.0-cp38-cp38-win32.whl", hash = "sha256:9b0d4f3949913a9f9a8fb1bb4cc6ecd52879730aab5ff8c5a3d8f5b593594b73"}, - {file = "mmh3-4.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:598c352da1d945108aee0c3c3cfdd0e9b3edef74108f53b49d481d3990402169"}, - {file = "mmh3-4.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:475d6d1445dd080f18f0f766277e1237fa2914e5fe3307a3b2a3044f30892103"}, - {file = "mmh3-4.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5ca07c41e6a2880991431ac717c2a049056fff497651a76e26fc22224e8b5732"}, - {file = "mmh3-4.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ebe052fef4bbe30c0548d12ee46d09f1b69035ca5208a7075e55adfe091be44"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaefd42e85afb70f2b855a011f7b4d8a3c7e19c3f2681fa13118e4d8627378c5"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0ae43caae5a47afe1b63a1ae3f0986dde54b5fb2d6c29786adbfb8edc9edfb"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6218666f74c8c013c221e7f5f8a693ac9cf68e5ac9a03f2373b32d77c48904de"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac59294a536ba447b5037f62d8367d7d93b696f80671c2c45645fa9f1109413c"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:086844830fcd1e5c84fec7017ea1ee8491487cfc877847d96f86f68881569d2e"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e42b38fad664f56f77f6fbca22d08450f2464baa68acdbf24841bf900eb98e87"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d08b790a63a9a1cde3b5d7d733ed97d4eb884bfbc92f075a091652d6bfd7709a"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:73ea4cc55e8aea28c86799ecacebca09e5f86500414870a8abaedfcbaf74d288"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f90938ff137130e47bcec8dc1f4ceb02f10178c766e2ef58a9f657ff1f62d124"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:aa1f13e94b8631c8cd53259250556edcf1de71738936b60febba95750d9632bd"}, - {file = "mmh3-4.1.0-cp39-cp39-win32.whl", hash = "sha256:a3b680b471c181490cf82da2142029edb4298e1bdfcb67c76922dedef789868d"}, - {file = "mmh3-4.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:fefef92e9c544a8dbc08f77a8d1b6d48006a750c4375bbcd5ff8199d761e263b"}, - {file = "mmh3-4.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:8e2c1f6a2b41723a4f82bd5a762a777836d29d664fc0095f17910bea0adfd4a6"}, - {file = "mmh3-4.1.0.tar.gz", hash = "sha256:a1cf25348b9acd229dda464a094d6170f47d2850a1fcb762a3b6172d2ce6ca4a"}, -] - -[package.extras] -test = ["mypy (>=1.0)", "pytest (>=7.0.0)"] + {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f0a4b4bf05778ed77d820d6e7d0e9bd6beb0c01af10e1ce9233f5d2f814fcafa"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac7a391039aeab95810c2d020b69a94eb6b4b37d4e2374831e92db3a0cdf71c6"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3a2583b5521ca49756d8d8bceba80627a9cc295f255dcab4e3df7ccc2f09679a"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:081a8423fe53c1ac94f87165f3e4c500125d343410c1a0c5f1703e898a3ef038"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8b4d72713799755dc8954a7d36d5c20a6c8de7b233c82404d122c7c7c1707cc"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:389a6fd51efc76d3182d36ec306448559c1244f11227d2bb771bdd0e6cc91321"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39f4128edaa074bff721b1d31a72508cba4d2887ee7867f22082e1fe9d4edea0"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5d23a94d91aabba3386b3769048d5f4210fdfef80393fece2f34ba5a7b466c"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:16347d038361f8b8f24fd2b7ef378c9b68ddee9f7706e46269b6e0d322814713"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e299408565af7d61f2d20a5ffdd77cf2ed902460fe4e6726839d59ba4b72316"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:42050af21ddfc5445ee5a66e73a8fc758c71790305e3ee9e4a85a8e69e810f94"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2ae9b1f5ef27ec54659920f0404b7ceb39966e28867c461bfe83a05e8d18ddb0"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:50c2495a02045f3047d71d4ae9cdd7a15efc0bcbb7ff17a18346834a8e2d1d19"}, + {file = "mmh3-5.0.1-cp310-cp310-win32.whl", hash = "sha256:c028fa77cddf351ca13b4a56d43c1775652cde0764cadb39120b68f02a23ecf6"}, + {file = "mmh3-5.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c5e741e421ec14400c4aae30890515c201f518403bdef29ae1e00d375bb4bbb5"}, + {file = "mmh3-5.0.1-cp310-cp310-win_arm64.whl", hash = "sha256:b17156d56fabc73dbf41bca677ceb6faed435cc8544f6566d72ea77d8a17e9d0"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a6d5a9b1b923f1643559ba1fc0bf7a5076c90cbb558878d3bf3641ce458f25d"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3349b968be555f7334bbcce839da98f50e1e80b1c615d8e2aa847ea4a964a012"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1bd3c94b110e55db02ab9b605029f48a2f7f677c6e58c09d44e42402d438b7e1"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47ba84d48608f79adbb10bb09986b6dc33eeda5c2d1bd75d00820081b73bde9"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0217987a8b8525c8d9170f66d036dec4ab45cfbd53d47e8d76125791ceb155e"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2797063a34e78d1b61639a98b0edec1c856fa86ab80c7ec859f1796d10ba429"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8bba16340adcbd47853a2fbe5afdb397549e8f2e79324ff1dced69a3f8afe7c3"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:282797957c9f60b51b9d768a602c25f579420cc9af46feb77d457a27823d270a"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e4fb670c29e63f954f9e7a2cdcd57b36a854c2538f579ef62681ccbaa1de2b69"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ee7d85438dc6aff328e19ab052086a3c29e8a9b632998a49e5c4b0034e9e8d6"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b7fb5db231f3092444bc13901e6a8d299667126b00636ffbad4a7b45e1051e2f"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c100dd441703da5ec136b1d9003ed4a041d8a1136234c9acd887499796df6ad8"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:71f3b765138260fd7a7a2dba0ea5727dabcd18c1f80323c9cfef97a7e86e01d0"}, + {file = "mmh3-5.0.1-cp311-cp311-win32.whl", hash = "sha256:9a76518336247fd17689ce3ae5b16883fd86a490947d46a0193d47fb913e26e3"}, + {file = "mmh3-5.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:336bc4df2e44271f1c302d289cc3d78bd52d3eed8d306c7e4bff8361a12bf148"}, + {file = "mmh3-5.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:af6522722fbbc5999aa66f7244d0986767a46f1fb05accc5200f75b72428a508"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f2730bb263ed9c388e8860438b057a53e3cc701134a6ea140f90443c4c11aa40"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6246927bc293f6d56724536400b85fb85f5be26101fa77d5f97dd5e2a4c69bf2"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fbca322519a6e6e25b6abf43e940e1667cf8ea12510e07fb4919b48a0cd1c411"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae8c19903ed8a1724ad9e67e86f15d198a7a1271a4f9be83d47e38f312ed672"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09fd6cc72c07c0c07c3357714234b646d78052487c4a3bd5f7f6e08408cff60"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ff8551fee7ae3b11c5d986b6347ade0dccaadd4670ffdb2b944dee120ffcc84"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e39694c73a5a20c8bf36dfd8676ed351e5234d55751ba4f7562d85449b21ef3f"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eba6001989a92f72a89c7cf382fda831678bd780707a66b4f8ca90239fdf2123"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0771f90c9911811cc606a5c7b7b58f33501c9ee896ed68a6ac22c7d55878ecc0"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:09b31ed0c0c0920363e96641fac4efde65b1ab62b8df86293142f35a254e72b4"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5cf4a8deda0235312db12075331cb417c4ba163770edfe789bde71d08a24b692"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:41f7090a95185ef20ac018581a99337f0cbc84a2135171ee3290a9c0d9519585"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b97b5b368fb7ff22194ec5854f5b12d8de9ab67a0f304728c7f16e5d12135b76"}, + {file = "mmh3-5.0.1-cp312-cp312-win32.whl", hash = "sha256:842516acf04da546f94fad52db125ee619ccbdcada179da51c326a22c4578cb9"}, + {file = "mmh3-5.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:d963be0dbfd9fca209c17172f6110787ebf78934af25e3694fe2ba40e55c1e2b"}, + {file = "mmh3-5.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:a5da292ceeed8ce8e32b68847261a462d30fd7b478c3f55daae841404f433c15"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:673e3f1c8d4231d6fb0271484ee34cb7146a6499fc0df80788adb56fd76842da"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f795a306bd16a52ad578b663462cc8e95500b3925d64118ae63453485d67282b"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5ed57a5e28e502a1d60436cc25c76c3a5ba57545f250f2969af231dc1221e0a5"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:632c28e7612e909dbb6cbe2fe496201ada4695b7715584005689c5dc038e59ad"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53fd6bd525a5985e391c43384672d9d6b317fcb36726447347c7fc75bfed34ec"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dceacf6b0b961a0e499836af3aa62d60633265607aef551b2a3e3c48cdaa5edd"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f0738d478fdfb5d920f6aff5452c78f2c35b0eff72caa2a97dfe38e82f93da2"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e70285e7391ab88b872e5bef632bad16b9d99a6d3ca0590656a4753d55988af"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:27e5fc6360aa6b828546a4318da1a7da6bf6e5474ccb053c3a6aa8ef19ff97bd"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7989530c3c1e2c17bf5a0ec2bba09fd19819078ba90beedabb1c3885f5040b0d"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cdad7bee649950da7ecd3cbbbd12fb81f1161072ecbdb5acfa0018338c5cb9cf"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e143b8f184c1bb58cecd85ab4a4fd6dc65a2d71aee74157392c3fddac2a4a331"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e5eb12e886f3646dd636f16b76eb23fc0c27e8ff3c1ae73d4391e50ef60b40f6"}, + {file = "mmh3-5.0.1-cp313-cp313-win32.whl", hash = "sha256:16e6dddfa98e1c2d021268e72c78951234186deb4df6630e984ac82df63d0a5d"}, + {file = "mmh3-5.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:d3ffb792d70b8c4a2382af3598dad6ae0c5bd9cee5b7ffcc99aa2f5fd2c1bf70"}, + {file = "mmh3-5.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:122fa9ec148383f9124292962bda745f192b47bfd470b2af5fe7bb3982b17896"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b12bad8c75e6ff5d67319794fb6a5e8c713826c818d47f850ad08b4aa06960c6"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e5bbb066538c1048d542246fc347bb7994bdda29a3aea61c22f9f8b57111ce69"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:eee6134273f64e2a106827cc8fd77e70cc7239a285006fc6ab4977d59b015af2"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d04d9aa19d48e4c7bbec9cabc2c4dccc6ff3b2402f856d5bf0de03e10f167b5b"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79f37da1eed034d06567a69a7988456345c7f29e49192831c3975b464493b16e"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:242f77666743337aa828a2bf2da71b6ba79623ee7f93edb11e009f69237c8561"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffd943fff690463945f6441a2465555b3146deaadf6a5e88f2590d14c655d71b"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565b15f8d7df43acb791ff5a360795c20bfa68bca8b352509e0fbabd06cc48cd"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc6aafb867c2030df98ac7760ff76b500359252867985f357bd387739f3d5287"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:32898170644d45aa27c974ab0d067809c066205110f5c6d09f47d9ece6978bfe"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:42865567838d2193eb64e0ef571f678bf361a254fcdef0c5c8e73243217829bd"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5ff5c1f301c4a8b6916498969c0fcc7e3dbc56b4bfce5cfe3fe31f3f4609e5ae"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:be74c2dda8a6f44a504450aa2c3507f8067a159201586fc01dd41ab80efc350f"}, + {file = "mmh3-5.0.1-cp38-cp38-win32.whl", hash = "sha256:5610a842621ff76c04b20b29cf5f809b131f241a19d4937971ba77dc99a7f330"}, + {file = "mmh3-5.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:de15739ac50776fe8aa1ef13f1be46a6ee1fbd45f6d0651084097eb2be0a5aa4"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:48e84cf3cc7e8c41bc07de72299a73b92d9e3cde51d97851420055b1484995f7"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd9dc28c2d168c49928195c2e29b96f9582a5d07bd690a28aede4cc07b0e696"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2771a1c56a3d4bdad990309cff5d0a8051f29c8ec752d001f97d6392194ae880"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5ff2a8322ba40951a84411550352fba1073ce1c1d1213bb7530f09aed7f8caf"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a16bd3ec90682c9e0a343e6bd4c778c09947c8c5395cdb9e5d9b82b2559efbca"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d45733a78d68b5b05ff4a823aea51fa664df1d3bf4929b152ff4fd6dea2dd69b"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:904285e83cedebc8873b0838ed54c20f7344120be26e2ca5a907ab007a18a7a0"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac4aeb1784e43df728034d0ed72e4b2648db1a69fef48fa58e810e13230ae5ff"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cb3d4f751a0b8b4c8d06ef1c085216c8fddcc8b8c8d72445976b5167a40c6d1e"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8021851935600e60c42122ed1176399d7692df338d606195cd599d228a04c1c6"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6182d5924a5efc451900f864cbb021d7e8ad5d524816ca17304a0f663bc09bb5"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:5f30b834552a4f79c92e3d266336fb87fd92ce1d36dc6813d3e151035890abbd"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cd4383f35e915e06d077df27e04ffd3be7513ec6a9de2d31f430393f67e192a7"}, + {file = "mmh3-5.0.1-cp39-cp39-win32.whl", hash = "sha256:1455fb6b42665a97db8fc66e89a861e52b567bce27ed054c47877183f86ea6e3"}, + {file = "mmh3-5.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:9e26a0f4eb9855a143f5938a53592fa14c2d3b25801c2106886ab6c173982780"}, + {file = "mmh3-5.0.1-cp39-cp39-win_arm64.whl", hash = "sha256:0d0a35a69abdad7549c4030a714bb4ad07902edb3bbe61e1bbc403ded5d678be"}, + {file = "mmh3-5.0.1.tar.gz", hash = "sha256:7dab080061aeb31a6069a181f27c473a1f67933854e36a3464931f2716508896"}, +] + +[package.extras] +benchmark = ["pymmh3 (==0.0.5)", "pyperf (==2.7.0)", "xxhash (==3.5.0)"] +docs = ["myst-parser (==4.0.0)", "shibuya (==2024.8.30)", "sphinx (==8.0.2)", "sphinx-copybutton (==0.5.2)"] +lint = ["black (==24.8.0)", "clang-format (==18.1.8)", "isort (==5.13.2)", "pylint (==3.2.7)"] +plot = ["matplotlib (==3.9.2)", "pandas (==2.2.2)"] +test = ["pytest (==8.3.3)", "pytest-sugar (==1.0.0)"] +type = ["mypy (==1.11.2)"] [[package]] name = "mock" @@ -5539,13 +5597,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "oci" -version = "2.133.0" +version = "2.135.0" description = "Oracle Cloud Infrastructure Python SDK" optional = false python-versions = "*" files = [ - {file = "oci-2.133.0-py3-none-any.whl", hash = "sha256:9706365481ca538c89b3a15e6b5c246801eccb06be831a7f21c40f2a2ee310a7"}, - {file = "oci-2.133.0.tar.gz", hash = "sha256:800418025bb98f587c65bbf89c6b6d61ef0f2249e0698d73439baf3251640b7f"}, + {file = "oci-2.135.0-py3-none-any.whl", hash = "sha256:c01f1d103ed034fa7ca2bceb297bf00e6f6c456d14a46b35ee9007b25f3ea397"}, + {file = "oci-2.135.0.tar.gz", hash = "sha256:6e28e6595264705d8fd0719045ffc4b23170e7fd2cd76a1c3aa25e4cdaa5883a"}, ] [package.dependencies] @@ -6059,40 +6117,53 @@ files = [ [[package]] name = "pandas" -version = "2.2.2" +version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" files = [ - {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, - {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, - {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, - {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, - {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"}, - {file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"}, - {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, ] [package.dependencies] @@ -6300,13 +6371,13 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.3" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.3.3-py3-none-any.whl", hash = "sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5"}, - {file = "platformdirs-4.3.3.tar.gz", hash = "sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] @@ -6376,13 +6447,13 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "posthog" -version = "3.6.5" +version = "3.6.6" description = "Integrate PostHog into any python application." optional = false python-versions = "*" files = [ - {file = "posthog-3.6.5-py2.py3-none-any.whl", hash = "sha256:f8b7c573826b061a1d22c9495169c38ebe83a1df2729f49c7129a9c23a02acf6"}, - {file = "posthog-3.6.5.tar.gz", hash = "sha256:7fd3ca809e15476c35f75d18cd6bba31395daf0a17b75242965c469fb6292510"}, + {file = "posthog-3.6.6-py2.py3-none-any.whl", hash = "sha256:38834fd7f0732582a20d4eb4674c8d5c088e464d14d1b3f8c176e389aecaa4ef"}, + {file = "posthog-3.6.6.tar.gz", hash = "sha256:1e04783293117109189ad7048f3eedbe21caff0e39bee5e2d47a93dd790fefac"}, ] [package.dependencies] @@ -6399,44 +6470,44 @@ test = ["coverage", "django", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)" [[package]] name = "pox" -version = "0.3.4" +version = "0.3.5" description = "utilities for filesystem exploration and automated builds" optional = false python-versions = ">=3.8" files = [ - {file = "pox-0.3.4-py3-none-any.whl", hash = "sha256:651b8ae8a7b341b7bfd267f67f63106daeb9805f1ac11f323d5280d2da93fdb6"}, - {file = "pox-0.3.4.tar.gz", hash = "sha256:16e6eca84f1bec3828210b06b052adf04cf2ab20c22fd6fbef5f78320c9a6fed"}, + {file = "pox-0.3.5-py3-none-any.whl", hash = "sha256:9e82bcc9e578b43e80a99cad80f0d8f44f4d424f0ee4ee8d4db27260a6aa365a"}, + {file = "pox-0.3.5.tar.gz", hash = "sha256:8120ee4c94e950e6e0483e050a4f0e56076e590ba0a9add19524c254bd23c2d1"}, ] [[package]] name = "ppft" -version = "1.7.6.8" +version = "1.7.6.9" description = "distributed and parallel Python" optional = false python-versions = ">=3.8" files = [ - {file = "ppft-1.7.6.8-py3-none-any.whl", hash = "sha256:de2dd4b1b080923dd9627fbdea52649fd741c752fce4f3cf37e26f785df23d9b"}, - {file = "ppft-1.7.6.8.tar.gz", hash = "sha256:76a429a7d7b74c4d743f6dba8351e58d62b6432ed65df9fe204790160dab996d"}, + {file = "ppft-1.7.6.9-py3-none-any.whl", hash = "sha256:dab36548db5ca3055067fbe6b1a17db5fee29f3c366c579a9a27cebb52ed96f0"}, + {file = "ppft-1.7.6.9.tar.gz", hash = "sha256:73161c67474ea9d81d04bcdad166d399cff3f084d5d2dc21ebdd46c075bbc265"}, ] [package.extras] -dill = ["dill (>=0.3.8)"] +dill = ["dill (>=0.3.9)"] [[package]] name = "primp" -version = "0.6.2" +version = "0.6.3" description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" optional = false python-versions = ">=3.8" files = [ - {file = "primp-0.6.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4a35d441462a55d9a9525bf170e2ffd2fcb3db6039b23e802859fa22c18cdd51"}, - {file = "primp-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:f67ccade95bdbca3cf9b96b93aa53f9617d85ddbf988da4e9c523aa785fd2d54"}, - {file = "primp-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8074b93befaf36567e4cf3d4a1a8cd6ab9cc6e4dd4ff710650678daa405aee71"}, - {file = "primp-0.6.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7d3e2a3f8c6262e9b883651b79c4ff2b7677a76f47293a139f541c9ea333ce3b"}, - {file = "primp-0.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a460ea389371c6d04839b4b50b5805d99da8ebe281a2e8b534d27377c6d44f0e"}, - {file = "primp-0.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b6b27e89d3c05c811aff0e4fde7a36d6957b15b3112f4ce28b6b99e8ca1e725"}, - {file = "primp-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:1006a40a85f88a4c5222094813a1ebc01f85a63e9a33d2c443288c0720bed321"}, - {file = "primp-0.6.2.tar.gz", hash = "sha256:5a96a6b65195a8a989157e67d23bd171c49be238654e02bdf1b1fda36cbcc068"}, + {file = "primp-0.6.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bdbe6a7cdaaf5c9ed863432a941f4a75bd4c6ff626cbc8d32fc232793c70ba06"}, + {file = "primp-0.6.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:eeb53eb987bdcbcd85740633470255cab887d921df713ffa12a36a13366c9cdb"}, + {file = "primp-0.6.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78da53d3c92a8e3f05bd3286ac76c291f1b6fe5e08ea63b7ba92b0f9141800bb"}, + {file = "primp-0.6.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:86337b44deecdac752bd8112909987fc9fa9b894f30191c80a164dc8f895da53"}, + {file = "primp-0.6.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d3cd9a22b97f3eae42b2a5fb99f00480daf4cd6d9b139e05b0ffb03f7cc037f3"}, + {file = "primp-0.6.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7732bec917e2d3c48a31cdb92e1250f4ad6203a1aa4f802bd9abd84f2286a1e0"}, + {file = "primp-0.6.3-cp38-abi3-win_amd64.whl", hash = "sha256:1e4113c34b86c676ae321af185f03a372caef3ee009f1682c2d62e30ec87348c"}, + {file = "primp-0.6.3.tar.gz", hash = "sha256:17d30ebe26864defad5232dbbe1372e80483940012356e1f68846bb182282039"}, ] [package.extras] @@ -6444,13 +6515,13 @@ dev = ["certifi", "pytest (>=8.1.1)"] [[package]] name = "prompt-toolkit" -version = "3.0.47" +version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, ] [package.dependencies] @@ -6475,22 +6546,22 @@ testing = ["google-api-core (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.4" +version = "4.25.5" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, - {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, - {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, - {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, - {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, - {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, - {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, - {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, - {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, + {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, + {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, + {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, + {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, + {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, + {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, + {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, + {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, + {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, ] [[package]] @@ -6644,19 +6715,6 @@ files = [ {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, - {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, - {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, - {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, - {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, - {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, ] [package.dependencies] @@ -6957,13 +7015,13 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymilvus" -version = "2.4.6" +version = "2.4.7" description = "Python Sdk for Milvus" optional = false python-versions = ">=3.8" files = [ - {file = "pymilvus-2.4.6-py3-none-any.whl", hash = "sha256:b4c43472edc313b845d313be50610e19054e6954b2c5c3b515565c596c2d3d97"}, - {file = "pymilvus-2.4.6.tar.gz", hash = "sha256:6ac3eb91c92cc01bbe444fe83f895f02d7b2546d96ac67998630bf31ac074d66"}, + {file = "pymilvus-2.4.7-py3-none-any.whl", hash = "sha256:1e5d377bd40fa7eb459d3958dbd96201758f5cf997d41eb3d2d169d0b7fa462e"}, + {file = "pymilvus-2.4.7.tar.gz", hash = "sha256:9ef460b940782a42e1b7b8ae0da03d8cc02d9d80044d13f4b689a7c935ec7aa7"}, ] [package.dependencies] @@ -7082,28 +7140,28 @@ files = [ [[package]] name = "pyproject-hooks" -version = "1.1.0" +version = "1.2.0" description = "Wrappers to call pyproject.toml-based build backend hooks." optional = false python-versions = ">=3.7" files = [ - {file = "pyproject_hooks-1.1.0-py3-none-any.whl", hash = "sha256:7ceeefe9aec63a1064c18d939bdc3adf2d8aa1988a510afec15151578b232aa2"}, - {file = "pyproject_hooks-1.1.0.tar.gz", hash = "sha256:4b37730834edbd6bd37f26ece6b44802fb1c1ee2ece0e54ddff8bfc06db86965"}, + {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, + {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, ] [[package]] name = "pyreadline3" -version = "3.5.2" +version = "3.5.4" description = "A python implementation of GNU readline." optional = false python-versions = ">=3.8" files = [ - {file = "pyreadline3-3.5.2-py3-none-any.whl", hash = "sha256:a87d56791e2965b2b187e2ea33dcf664600842c997c0623c95cf8ef07db83de9"}, - {file = "pyreadline3-3.5.2.tar.gz", hash = "sha256:ba82292e52c5a3bb256b291af0c40b457c1e8699cac9a873abbcaac8aef3a1bb"}, + {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, + {file = "pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7"}, ] [package.extras] -dev = ["build", "flake8", "pytest", "twine"] +dev = ["build", "flake8", "mypy", "pytest", "twine"] [[package]] name = "pytest" @@ -7149,21 +7207,21 @@ histogram = ["pygal", "pygaljs"] [[package]] name = "pytest-env" -version = "1.1.4" +version = "1.1.5" description = "pytest plugin that allows you to add environment variables." optional = false python-versions = ">=3.8" files = [ - {file = "pytest_env-1.1.4-py3-none-any.whl", hash = "sha256:a4212056d4d440febef311a98fdca56c31256d58fb453d103cba4e8a532b721d"}, - {file = "pytest_env-1.1.4.tar.gz", hash = "sha256:86653658da8f11c6844975db955746c458a9c09f1e64957603161e2ff93f5133"}, + {file = "pytest_env-1.1.5-py3-none-any.whl", hash = "sha256:ce90cf8772878515c24b31cd97c7fa1f4481cd68d588419fd45f10ecaee6bc30"}, + {file = "pytest_env-1.1.5.tar.gz", hash = "sha256:91209840aa0e43385073ac464a554ad2947cc2fd663a9debf88d03b01e0cc1cf"}, ] [package.dependencies] -pytest = ">=8.3.2" +pytest = ">=8.3.3" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -test = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] [[package]] name = "pytest-mock" @@ -7550,123 +7608,103 @@ dev = ["pytest"] [[package]] name = "rapidfuzz" -version = "3.9.7" +version = "3.10.0" description = "rapid fuzzy string matching" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ccf68e30b80e903f2309f90a438dbd640dd98e878eeb5ad361a288051ee5b75c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:696a79018ef989bf1c9abd9005841cee18005ccad4748bad8a4c274c47b6241a"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4eebf6c93af0ae866c22b403a84747580bb5c10f0d7b51c82a87f25405d4dcb"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e9125377fa3d21a8abd4fbdbcf1c27be73e8b1850f0b61b5b711364bf3b59db"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c12d180b17a22d107c8747de9c68d0b9c1d15dcda5445ff9bf9f4ccfb67c3e16"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1318d42610c26dcd68bd3279a1bf9e3605377260867c9a8ed22eafc1bd93a7c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5fa6e3c6e0333051c1f3a49f0807b3366f4131c8d6ac8c3e05fd0d0ce3755c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fcf79b686962d7bec458a0babc904cb4fa319808805e036b9d5a531ee6b9b835"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8b01153c7466d0bad48fba77a303d5a768e66f24b763853469f47220b3de4661"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:94baaeea0b4f8632a6da69348b1e741043eba18d4e3088d674d3f76586b6223d"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6c5b32875646cb7f60c193ade99b2e4b124f19583492115293cd00f6fb198b17"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:110b6294396bc0a447648627479c9320f095c2034c0537f687592e0f58622638"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win32.whl", hash = "sha256:3445a35c4c8d288f2b2011eb61bce1227c633ce85a3154e727170f37c0266bb2"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:0d1415a732ee75e74a90af12020b77a0b396b36c60afae1bde3208a78cd2c9fc"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win_arm64.whl", hash = "sha256:836f4d88b8bd0fff2ebe815dcaab8aa6c8d07d1d566a7e21dd137cf6fe11ed5b"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d098ce6162eb5e48fceb0745455bc950af059df6113eec83e916c129fca11408"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:048d55d36c02c6685a2b2741688503c3d15149694506655b6169dcfd3b6c2585"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c33211cfff9aec425bb1bfedaf94afcf337063aa273754f22779d6dadebef4c2"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6d9db2fa4e9be171e9bb31cf2d2575574774966b43f5b951062bb2e67885852"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4e049d5ad61448c9a020d1061eba20944c4887d720c4069724beb6ea1692507"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfa74aac64c85898b93d9c80bb935a96bf64985e28d4ee0f1a3d1f3bf11a5106"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965693c2e9efd425b0f059f5be50ef830129f82892fa1858e220e424d9d0160f"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8501000a5eb8037c4b56857724797fe5a8b01853c363de91c8d0d0ad56bef319"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d92c552c6b7577402afdd547dcf5d31ea6c8ae31ad03f78226e055cfa37f3c6"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1ee2086f490cb501d86b7e386c1eb4e3a0ccbb0c99067089efaa8c79012c8952"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1de91e7fd7f525e10ea79a6e62c559d1b0278ec097ad83d9da378b6fab65a265"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4da514d13f4433e16960a17f05b67e0af30ac771719c9a9fb877e5004f74477"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win32.whl", hash = "sha256:a40184c67db8252593ec518e17fb8a6e86d7259dc9f2d6c0bf4ff4db8cf1ad4b"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:c4f28f1930b09a2c300357d8465b388cecb7e8b2f454a5d5425561710b7fd07f"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win_arm64.whl", hash = "sha256:675b75412a943bb83f1f53e2e54fd18c80ef15ed642dc6eb0382d1949419d904"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1ef6a1a8f0b12f8722f595f15c62950c9a02d5abc64742561299ffd49f6c6944"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32532af1d70c6ec02ea5ac7ee2766dfff7c8ae8c761abfe8da9e527314e634e8"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1a38bade755aa9dd95a81cda949e1bf9cd92b79341ccc5e2189c9e7bdfc5ec"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d73ee2df41224c87336448d279b5b6a3a75f36e41dd3dcf538c0c9cce36360d8"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be3a1fc3e2ab3bdf93dc0c83c00acca8afd2a80602297d96cf4a0ba028333cdf"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:603f48f621272a448ff58bb556feb4371252a02156593303391f5c3281dfaeac"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:268f8e1ca50fc61c0736f3fe9d47891424adf62d96ed30196f30f4bd8216b41f"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f8bf3f0d02935751d8660abda6044821a861f6229f7d359f98bcdcc7e66c39b"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b997ff3b39d4cee9fb025d6c46b0a24bd67595ce5a5b652a97fb3a9d60beb651"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca66676c8ef6557f9b81c5b2b519097817a7c776a6599b8d6fcc3e16edd216fe"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:35d3044cb635ca6b1b2b7b67b3597bd19f34f1753b129eb6d2ae04cf98cd3945"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5a93c9e60904cb76e7aefef67afffb8b37c4894f81415ed513db090f29d01101"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win32.whl", hash = "sha256:579d107102c0725f7c79b4e79f16d3cf4d7c9208f29c66b064fa1fd4641d5155"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:953b3780765c8846866faf891ee4290f6a41a6dacf4fbcd3926f78c9de412ca6"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win_arm64.whl", hash = "sha256:7c20c1474b068c4bd45bf2fd0ad548df284f74e9a14a68b06746c56e3aa8eb70"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fde81b1da9a947f931711febe2e2bee694e891f6d3e6aa6bc02c1884702aea19"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:47e92c155a14f44511ea8ebcc6bc1535a1fe8d0a7d67ad3cc47ba61606df7bcf"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8772b745668260c5c4d069c678bbaa68812e6c69830f3771eaad521af7bc17f8"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578302828dd97ee2ba507d2f71d62164e28d2fc7bc73aad0d2d1d2afc021a5d5"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc3e6081069eea61593f1d6839029da53d00c8c9b205c5534853eaa3f031085c"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b1c2d504eddf97bc0f2eba422c8915576dbf025062ceaca2d68aecd66324ad9"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb76e5a21034f0307c51c5a2fc08856f698c53a4c593b17d291f7d6e9d09ca3"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d4ba2318ef670ce505f42881a5d2af70f948124646947341a3c6ccb33cd70369"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:057bb03f39e285047d7e9412e01ecf31bb2d42b9466a5409d715d587460dd59b"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a8feac9006d5c9758438906f093befffc4290de75663dbb2098461df7c7d28dd"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:95b8292383e717e10455f2c917df45032b611141e43d1adf70f71b1566136b11"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e9fbf659537d246086d0297628b3795dc3e4a384101ecc01e5791c827b8d7345"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win32.whl", hash = "sha256:1dc516ac6d32027be2b0196bedf6d977ac26debd09ca182376322ad620460feb"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win_amd64.whl", hash = "sha256:b4f86e09d3064dca0b014cd48688964036a904a2d28048f00c8f4640796d06a8"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win_arm64.whl", hash = "sha256:19c64d8ddb2940b42a4567b23f1681af77f50a5ff6c9b8e85daba079c210716e"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fbda3dd68d8b28ccb20ffb6f756fefd9b5ba570a772bedd7643ed441f5793308"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2379e0b2578ad3ac7004f223251550f08bca873ff76c169b09410ec562ad78d8"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d1eff95362f993b0276fd3839aee48625b09aac8938bb0c23b40d219cba5dc5"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd9360e30041690912525a210e48a897b49b230768cc8af1c702e5395690464f"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a93cd834b3c315ab437f0565ee3a2f42dd33768dc885ccbabf9710b131cf70d2"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff196996240db7075f62c7bc4506f40a3c80cd4ae3ab0e79ac6892283a90859"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948dcee7aaa1cd14358b2a7ef08bf0be42bf89049c3a906669874a715fc2c937"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95751f505a301af1aaf086c19f34536056d6c8efa91b2240de532a3db57b543"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:90db86fa196eecf96cb6db09f1083912ea945c50c57188039392d810d0b784e1"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:3171653212218a162540a3c8eb8ae7d3dcc8548540b69eaecaf3b47c14d89c90"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:36dd6e820379c37a1ffefc8a52b648758e867cd9d78ee5b5dc0c9a6a10145378"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7b702de95666a1f7d5c6b47eacadfe2d2794af3742d63d2134767d13e5d1c713"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-win32.whl", hash = "sha256:9030e7238c0df51aed5c9c5ed8eee2bdd47a2ae788e562c1454af2851c3d1906"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:f847fb0fbfb72482b1c05c59cbb275c58a55b73708a7f77a83f8035ee3c86497"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:97f2ce529d2a70a60c290f6ab269a2bbf1d3b47b9724dccc84339b85f7afb044"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e2957fdad10bb83b1982b02deb3604a3f6911a5e545f518b59c741086f92d152"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d5262383634626eb45c536017204b8163a03bc43bda880cf1bdd7885db9a163"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:364587827d7cbd41afa0782adc2d2d19e3f07d355b0750a02a8e33ad27a9c368"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecc24af7f905f3d6efb371a01680116ffea8d64e266618fb9ad1602a9b4f7934"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dc86aa6b29d174713c5f4caac35ffb7f232e3e649113e8d13812b35ab078228"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3dcfbe7266e74a707173a12a7b355a531f2dcfbdb32f09468e664330da14874"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b23806fbdd6b510ba9ac93bb72d503066263b0fba44b71b835be9f063a84025f"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5551d68264c1bb6943f542da83a4dc8940ede52c5847ef158698799cc28d14f5"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:13d8675a1fa7e2b19650ca7ef9a6ec01391d4bb12ab9e0793e8eb024538b4a34"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9b6a5de507b9be6de688dae40143b656f7a93b10995fb8bd90deb555e7875c60"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:111a20a3c090cf244d9406e60500b6c34b2375ba3a5009e2b38fd806fe38e337"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win32.whl", hash = "sha256:22589c0b8ccc6c391ce7f776c93a8c92c96ab8d34e1a19f1bd2b12a235332632"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:6f83221db5755b8f34222e40607d87f1176a8d5d4dbda4a55a0f0b67d588a69c"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win_arm64.whl", hash = "sha256:3665b92e788578c3bb334bd5b5fa7ee1a84bafd68be438e3110861d1578c63a0"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d7df9c2194c7ec930b33c991c55dbd0c10951bd25800c0b7a7b571994ebbced5"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68bd888eafd07b09585dcc8bc2716c5ecdb7eed62827470664d25588982b2873"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1230e0f9026851a6a432beaa0ce575dda7b39fe689b576f99a0704fbb81fc9c"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b36e1c61b796ae1777f3e9e11fd39898b09d351c9384baf6e3b7e6191d8ced"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dba13d86806fcf3fe9c9919f58575e0090eadfb89c058bde02bcc7ab24e4548"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1f1a33e84056b7892c721d84475d3bde49a145126bc4c6efe0d6d0d59cb31c29"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3492c7a42b7fa9f0051d7fcce9893e95ed91c97c9ec7fb64346f3e070dd318ed"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:ece45eb2af8b00f90d10f7419322e8804bd42fb1129026f9bfe712c37508b514"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcd14cf4876f04b488f6e54a7abd3e9b31db5f5a6aba0ce90659917aaa8c088"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:521c58c72ed8a612b25cda378ff10dee17e6deb4ee99a070b723519a345527b9"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18669bb6cdf7d40738526d37e550df09ba065b5a7560f3d802287988b6cb63cf"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7abe2dbae81120a64bb4f8d3fcafe9122f328c9f86d7f327f174187a5af4ed86"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a3c0783910911f4f24655826d007c9f4360f08107410952c01ee3df98c713eb2"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:03126f9a040ff21d2a110610bfd6b93b79377ce8b4121edcb791d61b7df6eec5"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:591908240f4085e2ade5b685c6e8346e2ed44932cffeaac2fb32ddac95b55c7f"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9012d86c6397edbc9da4ac0132de7f8ee9d6ce857f4194d5684c4ddbcdd1c5c"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df596ddd3db38aa513d4c0995611267b3946e7cbe5a8761b50e9306dfec720ee"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3ed5adb752f4308fcc8f4fb6f8eb7aa4082f9d12676fda0a74fa5564242a8107"}, - {file = "rapidfuzz-3.9.7.tar.gz", hash = "sha256:f1c7296534c1afb6f495aa95871f14ccdc197c6db42965854e483100df313030"}, -] - -[package.extras] -full = ["numpy"] + {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:884453860de029380dded8f3c1918af2d8eb5adf8010261645c7e5c88c2b5428"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718c9bd369288aca5fa929df6dbf66fdbe9768d90940a940c0b5cdc96ade4309"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e3724b7dab761c01816aaa64b0903734d999d5589daf97c14ef5cc0629a8e"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1af60988d47534246d9525f77288fdd9de652608a4842815d9018570b959acc6"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3084161fc3e963056232ef8d937449a2943852e07101f5a136c8f3cfa4119217"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cd67d3d017296d98ff505529104299f78433e4b8af31b55003d901a62bbebe9"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11a127ac590fc991e8a02c2d7e1ac86e8141c92f78546f18b5c904064a0552c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aadce42147fc09dcef1afa892485311e824c050352e1aa6e47f56b9b27af4cf0"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b54853c2371bf0e38d67da379519deb6fbe70055efb32f6607081641af3dc752"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ce19887268e90ee81a3957eef5e46a70ecc000713796639f83828b950343f49e"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f39a2a5ded23b9b9194ec45740dce57177b80f86c6d8eba953d3ff1a25c97766"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ec338d5f4ad8d9339a88a08db5c23e7f7a52c2b2a10510c48a0cef1fb3f0ddc"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win32.whl", hash = "sha256:56fd15ea8f4c948864fa5ebd9261c67cf7b89a1c517a0caef4df75446a7af18c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:43dfc5e733808962a822ff6d9c29f3039a3cfb3620706f5953e17cfe4496724c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win_arm64.whl", hash = "sha256:ae7966f205b5a7fde93b44ca8fed37c1c8539328d7f179b1197de34eceaceb5f"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb0013795b40db5cf361e6f21ee7cda09627cf294977149b50e217d7fe9a2f03"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef5b363afff7150a1fbe788007e307b9802a2eb6ad92ed51ab94e6ad2674c6"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c582c46b1bb0b19f1a5f4c1312f1b640c21d78c371a6615c34025b16ee56369b"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:288f6f6e7410cacb115fb851f3f18bf0e4231eb3f6cb5bd1cec0e7b25c4d039d"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e29a13d2fd9be3e7d8c26c7ef4ba60b5bc7efbc9dbdf24454c7e9ebba31768"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea2da0459b951ee461bd4e02b8904890bd1c4263999d291c5cd01e6620177ad4"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457827ba82261aa2ae6ac06a46d0043ab12ba7216b82d87ae1434ec0f29736d6"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5d350864269d56f51ab81ab750c9259ae5cad3152c0680baef143dcec92206a1"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a9b8f51e08c3f983d857c3889930af9ddecc768453822076683664772d87e374"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7f3a6aa6e70fc27e4ff5c479f13cc9fc26a56347610f5f8b50396a0d344c5f55"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:803f255f10d63420979b1909ef976e7d30dec42025c9b067fc1d2040cc365a7e"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2026651761bf83a0f31495cc0f70840d5c0d54388f41316e3f9cb51bd85e49a5"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win32.whl", hash = "sha256:4df75b3ebbb8cfdb9bf8b213b168620b88fd92d0c16a8bc9f9234630b282db59"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f9f0bbfb6787b97c51516f3ccf97737d504db5d239ad44527673b81f598b84ab"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win_arm64.whl", hash = "sha256:10fdad800441b9c97d471a937ba7d42625f1b530db05e572f1cb7d401d95c893"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dc87073ba3a40dd65591a2100aa71602107443bf10770579ff9c8a3242edb94"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a425a0a868cf8e9c6e93e1cda4b758cdfd314bb9a4fc916c5742c934e3613480"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d5d1d75e61df060c1e56596b6b0a4422a929dff19cc3dbfd5eee762c86b61"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34f213d59219a9c3ca14e94a825f585811a68ac56b4118b4dc388b5b14afc108"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96ad46f5f56f70fab2be9e5f3165a21be58d633b90bf6e67fc52a856695e4bcf"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9178277f72d144a6c7704d7ae7fa15b7b86f0f0796f0e1049c7b4ef748a662ef"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76a35e9e19a7c883c422ffa378e9a04bc98cb3b29648c5831596401298ee51e6"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a6405d34c394c65e4f73a1d300c001f304f08e529d2ed6413b46ee3037956eb"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bd393683129f446a75d8634306aed7e377627098a1286ff3af2a4f1736742820"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b0445fa9880ead81f5a7d0efc0b9c977a947d8052c43519aceeaf56eabaf6843"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c50bc308fa29767ed8f53a8d33b7633a9e14718ced038ed89d41b886e301da32"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e89605afebbd2d4b045bccfdc12a14b16fe8ccbae05f64b4b4c64a97dad1c891"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win32.whl", hash = "sha256:2db9187f3acf3cd33424ecdbaad75414c298ecd1513470df7bda885dcb68cc15"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:50e3d0c72ea15391ba9531ead7f2068a67c5b18a6a365fef3127583aaadd1725"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win_arm64.whl", hash = "sha256:9eac95b4278bd53115903d89118a2c908398ee8bdfd977ae844f1bd2b02b917c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe5231e8afd069c742ac5b4f96344a0fe4aff52df8e53ef87faebf77f827822c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:886882367dbc985f5736356105798f2ae6e794e671fc605476cbe2e73838a9bb"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33e13e537e3afd1627d421a142a12bbbe601543558a391a6fae593356842f6e"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:094c26116d55bf9c53abd840d08422f20da78ec4c4723e5024322321caedca48"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:545fc04f2d592e4350f59deb0818886c1b444ffba3bec535b4fbb97191aaf769"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:916a6abf3632e592b937c3d04c00a6efadd8fd30539cdcd4e6e4d92be7ca5d90"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6ec40cef63b1922083d33bfef2f91fc0b0bc07b5b09bfee0b0f1717d558292"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c77a7330dd15c7eb5fd3631dc646fc96327f98db8181138766bd14d3e905f0ba"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:949b5e9eeaa4ecb4c7e9c2a4689dddce60929dd1ff9c76a889cdbabe8bbf2171"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b5363932a5aab67010ae1a6205c567d1ef256fb333bc23c27582481606be480c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5dd6eec15b13329abe66cc241b484002ecb0e17d694491c944a22410a6a9e5e2"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e7f98525b60b3c14524e0a4e1fedf7654657b6e02eb25f1be897ab097706f3"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win32.whl", hash = "sha256:d29d1b9857c65f8cb3a29270732e1591b9bacf89de9d13fa764f79f07d8f1fd2"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:fa9720e56663cc3649d62b4b5f3145e94b8f5611e8a8e1b46507777249d46aad"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win_arm64.whl", hash = "sha256:eda4c661e68dddd56c8fbfe1ca35e40dd2afd973f7ebb1605f4d151edc63dff8"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cffbc50e0767396ed483900900dd58ce4351bc0d40e64bced8694bd41864cc71"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c038b9939da3035afb6cb2f465f18163e8f070aba0482923ecff9443def67178"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca366c2e2a54e2f663f4529b189fdeb6e14d419b1c78b754ec1744f3c01070d4"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c4c82b1689b23b1b5e6a603164ed2be41b6f6de292a698b98ba2381e889eb9d"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98f6ebe28831a482981ecfeedc8237047878424ad0c1add2c7f366ba44a20452"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd1a7676ee2a4c8e2f7f2550bece994f9f89e58afb96088964145a83af7408b"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec9139baa3f85b65adc700eafa03ed04995ca8533dd56c924f0e458ffec044ab"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:26de93e6495078b6af4c4d93a42ca067b16cc0e95699526c82ab7d1025b4d3bf"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f3a0bda83c18195c361b5500377d0767749f128564ca95b42c8849fd475bb327"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:63e4c175cbce8c3adc22dca5e6154588ae673f6c55374d156f3dac732c88d7de"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4dd3d8443970eaa02ab5ae45ce584b061f2799cd9f7e875190e2617440c1f9d4"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5ddb2388610799fc46abe389600625058f2a73867e63e20107c5ad5ffa57c47"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win32.whl", hash = "sha256:2e9be5d05cd960914024412b5406fb75a82f8562f45912ff86255acbfdbfb78e"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:47aca565a39c9a6067927871973ca827023e8b65ba6c5747f4c228c8d7ddc04f"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win_arm64.whl", hash = "sha256:b0732343cdc4273b5921268026dd7266f75466eb21873cb7635a200d9d9c3fac"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f744b5eb1469bf92dd143d36570d2bdbbdc88fe5cb0b5405e53dd34f479cbd8a"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b67cc21a14327a0eb0f47bc3d7e59ec08031c7c55220ece672f9476e7a8068d3"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe5783676f0afba4a522c80b15e99dbf4e393c149ab610308a8ef1f04c6bcc8"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4688862f957c8629d557d084f20b2d803f8738b6c4066802a0b1cc472e088d9"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20bd153aacc244e4c907d772c703fea82754c4db14f8aa64d75ff81b7b8ab92d"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:50484d563f8bfa723c74c944b0bb15b9e054db9c889348c8c307abcbee75ab92"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5897242d455461f2c5b82d7397b29341fd11e85bf3608a522177071044784ee8"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:116c71a81e046ba56551d8ab68067ca7034d94b617545316d460a452c5c3c289"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0a547e4350d1fa32624d3eab51eff8cf329f4cae110b4ea0402486b1da8be40"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:399b9b79ccfcf50ca3bad7692bc098bb8eade88d7d5e15773b7f866c91156d0c"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7947a425d1be3e744707ee58c6cb318b93a56e08f080722dcc0347e0b7a1bb9a"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:94c48b4a2a4b1d22246f48e2b11cae01ec7d23f0c9123f8bb822839ad79d0a88"}, + {file = "rapidfuzz-3.10.0.tar.gz", hash = "sha256:6b62af27e65bb39276a66533655a2fa3c60a487b03935721c45b7809527979be"}, +] + +[package.extras] +all = ["numpy"] [[package]] name = "readabilipy" @@ -8293,13 +8331,13 @@ test = ["accelerate (>=0.24.1,<=0.27.0)", "apache-airflow (==2.9.3)", "apache-ai [[package]] name = "sagemaker-core" -version = "1.0.4" +version = "1.0.9" description = "An python package for sagemaker core functionalities" optional = false python-versions = ">=3.8" files = [ - {file = "sagemaker_core-1.0.4-py3-none-any.whl", hash = "sha256:bf71d988dbda03a3cd1557524f2fab4f19d89e54bd38fc7f05bbbcf580715f95"}, - {file = "sagemaker_core-1.0.4.tar.gz", hash = "sha256:203f4eb9d0d2a0e6ba80d79ba8c28b8ea27c94d04f6d9ff01c2fd55b95615c78"}, + {file = "sagemaker_core-1.0.9-py3-none-any.whl", hash = "sha256:7a22c46cf93594f8d44e3523d4ba98407911f3530af68a8ffdde5082d3b26fa3"}, + {file = "sagemaker_core-1.0.9.tar.gz", hash = "sha256:664115faf797412553fb81b97a4777e78e51dfd4454c32edb2c8371bf203c535"}, ] [package.dependencies] @@ -8473,18 +8511,18 @@ tornado = ["tornado (>=5)"] [[package]] name = "setuptools" -version = "74.1.2" +version = "75.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-74.1.2-py3-none-any.whl", hash = "sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308"}, - {file = "setuptools-74.1.2.tar.gz", hash = "sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6"}, + {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, + {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] @@ -8644,60 +8682,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.34" +version = "2.0.35" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"}, - {file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"}, - {file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] [package.dependencies] @@ -8746,13 +8784,13 @@ doc = ["sphinx"] [[package]] name = "starlette" -version = "0.38.5" +version = "0.38.6" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" files = [ - {file = "starlette-0.38.5-py3-none-any.whl", hash = "sha256:632f420a9d13e3ee2a6f18f437b0a9f1faecb0bc42e1942aa2ea0e379a4c4206"}, - {file = "starlette-0.38.5.tar.gz", hash = "sha256:04a92830a9b6eb1442c766199d62260c3d4dc9c4f9188360626b1e0273cb7077"}, + {file = "starlette-0.38.6-py3-none-any.whl", hash = "sha256:4517a1409e2e73ee4951214ba012052b9e16f60e90d73cfb06192c19203bbb05"}, + {file = "starlette-0.38.6.tar.gz", hash = "sha256:863a1588f5574e70a821dadefb41e4881ea451a47a3cd1b4df359d4ffefe5ead"}, ] [package.dependencies] @@ -8777,13 +8815,13 @@ python-dateutil = ">=2.6.0" [[package]] name = "sympy" -version = "1.13.2" +version = "1.13.3" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, - {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, ] [package.dependencies] @@ -8849,13 +8887,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tencentcloud-sdk-python-common" -version = "3.0.1231" +version = "3.0.1243" description = "Tencent Cloud Common SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-common-3.0.1231.tar.gz", hash = "sha256:22aa281ca2eac511e1615b2953da7c4a0bec87cf93a05a7a15dbb61b23a215ee"}, - {file = "tencentcloud_sdk_python_common-3.0.1231-py2.py3-none-any.whl", hash = "sha256:bd0f7c4df4b156ec35c8731afa1f498043c7e1cd5d2feb595ee441fdb45a061e"}, + {file = "tencentcloud-sdk-python-common-3.0.1243.tar.gz", hash = "sha256:4008126b5506543ddc33ea53ef25abe19b4fa025006ec7751b3d5b3c046438b6"}, + {file = "tencentcloud_sdk_python_common-3.0.1243-py2.py3-none-any.whl", hash = "sha256:ed5d2040627aeed2df1fe5f9d65b9987ef842222d79fb13136cc5866aa5ac50c"}, ] [package.dependencies] @@ -8863,17 +8901,17 @@ requests = ">=2.16.0" [[package]] name = "tencentcloud-sdk-python-hunyuan" -version = "3.0.1231" +version = "3.0.1243" description = "Tencent Cloud Hunyuan SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-hunyuan-3.0.1231.tar.gz", hash = "sha256:6da12f418f14305b3a6b7bb29b6d95bf4038a6b66b81c0e03b8dafc4f6df99ca"}, - {file = "tencentcloud_sdk_python_hunyuan-3.0.1231-py2.py3-none-any.whl", hash = "sha256:21ba28f69c34c15e20900be3f2c06376fcaf7e58265f939833c55631f2348792"}, + {file = "tencentcloud-sdk-python-hunyuan-3.0.1243.tar.gz", hash = "sha256:2caad419f24106574f05faf69d162060689e03452832fda890a28eb8814199bc"}, + {file = "tencentcloud_sdk_python_hunyuan-3.0.1243-py2.py3-none-any.whl", hash = "sha256:b5a5c0dfa8b6de68c2c8747bf34b880c7fd2a1cbd59efc372e076260228ce231"}, ] [package.dependencies] -tencentcloud-sdk-python-common = "3.0.1231" +tencentcloud-sdk-python-common = "3.0.1243" [[package]] name = "threadpoolctl" @@ -9316,13 +9354,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "tzdata" -version = "2024.1" +version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, - {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] [[package]] @@ -9542,13 +9580,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.30.6" +version = "0.31.0" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.8" files = [ - {file = "uvicorn-0.30.6-py3-none-any.whl", hash = "sha256:65fd46fe3fda5bdc1b03b94eb634923ff18cd35b2f084813ea79d1f103f711b5"}, - {file = "uvicorn-0.30.6.tar.gz", hash = "sha256:4b15decdda1e72be08209e860a1e10e92439ad5b97cf44cc945fcbee66fc5788"}, + {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, + {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, ] [package.dependencies] @@ -9685,12 +9723,12 @@ files = [ [[package]] name = "volcengine-python-sdk" -version = "1.0.101" +version = "1.0.103" description = "Volcengine SDK for Python" optional = false python-versions = "*" files = [ - {file = "volcengine-python-sdk-1.0.101.tar.gz", hash = "sha256:1b76e71a6dcf3d5be1b2c058e7d281359e6cca2cc920ffe2567d3115beea1d02"}, + {file = "volcengine-python-sdk-1.0.103.tar.gz", hash = "sha256:49fa8572802724972e1cb47a7e692b184b055f41b09099358c1a0fad1d146af5"}, ] [package.dependencies] @@ -9860,97 +9898,97 @@ test = ["websockets"] [[package]] name = "websockets" -version = "13.0.1" +version = "13.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.8" files = [ - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1841c9082a3ba4a05ea824cf6d99570a6a2d8849ef0db16e9c826acb28089e8f"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c5870b4a11b77e4caa3937142b650fbbc0914a3e07a0cf3131f35c0587489c1c"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f1d3d1f2eb79fe7b0fb02e599b2bf76a7619c79300fc55f0b5e2d382881d4f7f"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c7d62ee071fa94a2fc52c2b472fed4af258d43f9030479d9c4a2de885fd543"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6724b554b70d6195ba19650fef5759ef11346f946c07dbbe390e039bcaa7cc3d"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a952fa2ae57a42ba7951e6b2605e08a24801a4931b5644dfc68939e041bc7f"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17118647c0ea14796364299e942c330d72acc4b248e07e639d34b75067b3cdd8"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64a11aae1de4c178fa653b07d90f2fb1a2ed31919a5ea2361a38760192e1858b"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0617fd0b1d14309c7eab6ba5deae8a7179959861846cbc5cb528a7531c249448"}, - {file = "websockets-13.0.1-cp310-cp310-win32.whl", hash = "sha256:11f9976ecbc530248cf162e359a92f37b7b282de88d1d194f2167b5e7ad80ce3"}, - {file = "websockets-13.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c3c493d0e5141ec055a7d6809a28ac2b88d5b878bb22df8c621ebe79a61123d0"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:699ba9dd6a926f82a277063603fc8d586b89f4cb128efc353b749b641fcddda7"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf2fae6d85e5dc384bf846f8243ddaa9197f3a1a70044f59399af001fd1f51d4"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52aed6ef21a0f1a2a5e310fb5c42d7555e9c5855476bbd7173c3aa3d8a0302f2"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb2b9a318542153674c6e377eb8cb9ca0fc011c04475110d3477862f15d29f0"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5df891c86fe68b2c38da55b7aea7095beca105933c697d719f3f45f4220a5e0e"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2d146ff30d9dd2fcf917e5d147db037a5c573f0446c564f16f1f94cf87462"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b8ac5b46fd798bbbf2ac6620e0437c36a202b08e1f827832c4bf050da081b501"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46af561eba6f9b0848b2c9d2427086cabadf14e0abdd9fde9d72d447df268418"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b5a06d7f60bc2fc378a333978470dfc4e1415ee52f5f0fce4f7853eb10c1e9df"}, - {file = "websockets-13.0.1-cp311-cp311-win32.whl", hash = "sha256:556e70e4f69be1082e6ef26dcb70efcd08d1850f5d6c5f4f2bcb4e397e68f01f"}, - {file = "websockets-13.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:67494e95d6565bf395476e9d040037ff69c8b3fa356a886b21d8422ad86ae075"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f9c9e258e3d5efe199ec23903f5da0eeaad58cf6fccb3547b74fd4750e5ac47a"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6b41a1b3b561f1cba8321fb32987552a024a8f67f0d05f06fcf29f0090a1b956"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f73e676a46b0fe9426612ce8caeca54c9073191a77c3e9d5c94697aef99296af"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f613289f4a94142f914aafad6c6c87903de78eae1e140fa769a7385fb232fdf"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f52504023b1480d458adf496dc1c9e9811df4ba4752f0bc1f89ae92f4f07d0c"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:139add0f98206cb74109faf3611b7783ceafc928529c62b389917a037d4cfdf4"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47236c13be337ef36546004ce8c5580f4b1150d9538b27bf8a5ad8edf23ccfab"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c44ca9ade59b2e376612df34e837013e2b273e6c92d7ed6636d0556b6f4db93d"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9bbc525f4be3e51b89b2a700f5746c2a6907d2e2ef4513a8daafc98198b92237"}, - {file = "websockets-13.0.1-cp312-cp312-win32.whl", hash = "sha256:3624fd8664f2577cf8de996db3250662e259bfbc870dd8ebdcf5d7c6ac0b5185"}, - {file = "websockets-13.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0513c727fb8adffa6d9bf4a4463b2bade0186cbd8c3604ae5540fae18a90cb99"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1ee4cc030a4bdab482a37462dbf3ffb7e09334d01dd37d1063be1136a0d825fa"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dbb0b697cc0655719522406c059eae233abaa3243821cfdfab1215d02ac10231"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:acbebec8cb3d4df6e2488fbf34702cbc37fc39ac7abf9449392cefb3305562e9"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63848cdb6fcc0bf09d4a155464c46c64ffdb5807ede4fb251da2c2692559ce75"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872afa52a9f4c414d6955c365b6588bc4401272c629ff8321a55f44e3f62b553"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e70fec7c54aad4d71eae8e8cab50525e899791fc389ec6f77b95312e4e9920"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e82db3756ccb66266504f5a3de05ac6b32f287faacff72462612120074103329"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4e85f46ce287f5c52438bb3703d86162263afccf034a5ef13dbe4318e98d86e7"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f3fea72e4e6edb983908f0db373ae0732b275628901d909c382aae3b592589f2"}, - {file = "websockets-13.0.1-cp313-cp313-win32.whl", hash = "sha256:254ecf35572fca01a9f789a1d0f543898e222f7b69ecd7d5381d8d8047627bdb"}, - {file = "websockets-13.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:ca48914cdd9f2ccd94deab5bcb5ac98025a5ddce98881e5cce762854a5de330b"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b74593e9acf18ea5469c3edaa6b27fa7ecf97b30e9dabd5a94c4c940637ab96e"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:132511bfd42e77d152c919147078460c88a795af16b50e42a0bd14f0ad71ddd2"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:165bedf13556f985a2aa064309baa01462aa79bf6112fbd068ae38993a0e1f1b"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e801ca2f448850685417d723ec70298feff3ce4ff687c6f20922c7474b4746ae"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30d3a1f041360f029765d8704eae606781e673e8918e6b2c792e0775de51352f"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67648f5e50231b5a7f6d83b32f9c525e319f0ddc841be0de64f24928cd75a603"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4f0426d51c8f0926a4879390f53c7f5a855e42d68df95fff6032c82c888b5f36"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ef48e4137e8799998a343706531e656fdec6797b80efd029117edacb74b0a10a"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:249aab278810bee585cd0d4de2f08cfd67eed4fc75bde623be163798ed4db2eb"}, - {file = "websockets-13.0.1-cp38-cp38-win32.whl", hash = "sha256:06c0a667e466fcb56a0886d924b5f29a7f0886199102f0a0e1c60a02a3751cb4"}, - {file = "websockets-13.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1f3cf6d6ec1142412d4535adabc6bd72a63f5f148c43fe559f06298bc21953c9"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1fa082ea38d5de51dd409434edc27c0dcbd5fed2b09b9be982deb6f0508d25bc"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a365bcb7be554e6e1f9f3ed64016e67e2fa03d7b027a33e436aecf194febb63"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10a0dc7242215d794fb1918f69c6bb235f1f627aaf19e77f05336d147fce7c37"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59197afd478545b1f73367620407b0083303569c5f2d043afe5363676f2697c9"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d20516990d8ad557b5abeb48127b8b779b0b7e6771a265fa3e91767596d7d97"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1a2e272d067030048e1fe41aa1ec8cfbbaabce733b3d634304fa2b19e5c897f"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad327ac80ba7ee61da85383ca8822ff808ab5ada0e4a030d66703cc025b021c4"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:518f90e6dd089d34eaade01101fd8a990921c3ba18ebbe9b0165b46ebff947f0"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68264802399aed6fe9652e89761031acc734fc4c653137a5911c2bfa995d6d6d"}, - {file = "websockets-13.0.1-cp39-cp39-win32.whl", hash = "sha256:a5dc0c42ded1557cc7c3f0240b24129aefbad88af4f09346164349391dea8e58"}, - {file = "websockets-13.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b448a0690ef43db5ef31b3a0d9aea79043882b4632cfc3eaab20105edecf6097"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:faef9ec6354fe4f9a2c0bbb52fb1ff852effc897e2a4501e25eb3a47cb0a4f89"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:03d3f9ba172e0a53e37fa4e636b86cc60c3ab2cfee4935e66ed1d7acaa4625ad"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d450f5a7a35662a9b91a64aefa852f0c0308ee256122f5218a42f1d13577d71e"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f55b36d17ac50aa8a171b771e15fbe1561217510c8768af3d546f56c7576cdc"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14b9c006cac63772b31abbcd3e3abb6228233eec966bf062e89e7fa7ae0b7333"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b79915a1179a91f6c5f04ece1e592e2e8a6bd245a0e45d12fd56b2b59e559a32"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f40de079779acbcdbb6ed4c65af9f018f8b77c5ec4e17a4b737c05c2db554491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80e4ba642fc87fa532bac07e5ed7e19d56940b6af6a8c61d4429be48718a380f"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a02b0161c43cc9e0232711eff846569fad6ec836a7acab16b3cf97b2344c060"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6aa74a45d4cdc028561a7d6ab3272c8b3018e23723100b12e58be9dfa5a24491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00fd961943b6c10ee6f0b1130753e50ac5dcd906130dcd77b0003c3ab797d026"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d93572720d781331fb10d3da9ca1067817d84ad1e7c31466e9f5e59965618096"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:71e6e5a3a3728886caee9ab8752e8113670936a193284be9d6ad2176a137f376"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c4a6343e3b0714e80da0b0893543bf9a5b5fa71b846ae640e56e9abc6fbc4c83"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a678532018e435396e37422a95e3ab87f75028ac79570ad11f5bf23cd2a7d8c"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6716c087e4aa0b9260c4e579bb82e068f84faddb9bfba9906cb87726fa2e870"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e33505534f3f673270dd67f81e73550b11de5b538c56fe04435d63c02c3f26b5"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acab3539a027a85d568c2573291e864333ec9d912675107d6efceb7e2be5d980"}, - {file = "websockets-13.0.1-py3-none-any.whl", hash = "sha256:b80f0c51681c517604152eb6a572f5a9378f877763231fddb883ba2f968e8817"}, - {file = "websockets-13.0.1.tar.gz", hash = "sha256:4d6ece65099411cfd9a48d13701d7438d9c34f479046b34c50ff60bb8834e43e"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] [[package]] @@ -10501,4 +10539,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "c4580c22e2b220c8c80dbc3f765060a09e14874ed29b690c13a533bf0365e789" +content-hash = "74a22b763f5f6f1414e843f1a508ff48210d32f9523b696e5dd443489deaad46" diff --git a/api/pyproject.toml b/api/pyproject.toml index e737761f3b2c0..fe9e64907b4c3 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -221,6 +221,7 @@ volcengine-python-sdk = {extras = ["ark"], version = "^1.0.98"} oci = "^2.133.0" tos = "^2.7.1" nomic = "^3.1.2" +validators = "0.21.0" [tool.poetry.group.indriect.dependencies] kaleido = "0.2.1" rank-bm25 = "~0.2.2" diff --git a/api/schedule/clean_unused_messages_task.py b/api/schedule/clean_unused_messages_task.py new file mode 100644 index 0000000000000..85e6a58a0e90c --- /dev/null +++ b/api/schedule/clean_unused_messages_task.py @@ -0,0 +1,92 @@ +import datetime +import time + +import click +from sqlalchemy import func +from werkzeug.exceptions import NotFound + +import app +from configs import dify_config +from core.rag.index_processor.index_processor_factory import IndexProcessorFactory +from extensions.ext_database import db +from models.dataset import Dataset, DatasetQuery, Document + + +@app.celery.task(queue="dataset") +def clean_unused_message_task(): + click.echo(click.style("Start clean unused messages .", fg="green")) + clean_days = int(dify_config.CLEAN_DAY_SETTING) + start_at = time.perf_counter() + thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days) + page = 1 + while True: + try: + # Subquery for counting new documents + document_subquery_new = ( + db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) + .filter( + Document.indexing_status == "completed", + Document.enabled == True, + Document.archived == False, + Document.updated_at > thirty_days_ago, + ) + .group_by(Document.dataset_id) + .subquery() + ) + + # Subquery for counting old documents + document_subquery_old = ( + db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) + .filter( + Document.indexing_status == "completed", + Document.enabled == True, + Document.archived == False, + Document.updated_at < thirty_days_ago, + ) + .group_by(Document.dataset_id) + .subquery() + ) + + # Main query with join and filter + datasets = ( + db.session.query(Dataset) + .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id) + .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id) + .filter( + Dataset.created_at < thirty_days_ago, + func.coalesce(document_subquery_new.c.document_count, 0) == 0, + func.coalesce(document_subquery_old.c.document_count, 0) > 0, + ) + .order_by(Dataset.created_at.desc()) + .paginate(page=page, per_page=50) + ) + + except NotFound: + break + if datasets.items is None or len(datasets.items) == 0: + break + page += 1 + for dataset in datasets: + dataset_query = ( + db.session.query(DatasetQuery) + .filter(DatasetQuery.created_at > thirty_days_ago, DatasetQuery.dataset_id == dataset.id) + .all() + ) + if not dataset_query or len(dataset_query) == 0: + try: + # remove index + index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor() + index_processor.clean(dataset, None) + + # update document + update_params = {Document.enabled: False} + + Document.query.filter_by(dataset_id=dataset.id).update(update_params) + db.session.commit() + click.echo(click.style("Cleaned unused dataset {} from db success!".format(dataset.id), fg="green")) + except Exception as e: + click.echo( + click.style("clean dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red") + ) + end_at = time.perf_counter() + click.echo(click.style("Cleaned unused dataset from db success latency: {}".format(end_at - start_at), fg="green")) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index e96f06ed40fa4..c165def6d5c1e 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -32,6 +32,7 @@ DatasetQuery, Document, DocumentSegment, + ExternalKnowledgeBindings, ) from models.model import UploadFile from models.source import DataSourceOauthBinding @@ -39,6 +40,7 @@ from services.errors.dataset import DatasetNameDuplicateError from services.errors.document import DocumentIndexingError from services.errors.file import FileNotExistsError +from services.external_knowledge_service import ExternalDatasetService from services.feature_service import FeatureModel, FeatureService from services.tag_service import TagService from services.vector_service import VectorService @@ -56,10 +58,8 @@ class DatasetService: @staticmethod - def get_datasets(page, per_page, provider="vendor", tenant_id=None, user=None, search=None, tag_ids=None): - query = Dataset.query.filter(Dataset.provider == provider, Dataset.tenant_id == tenant_id).order_by( - Dataset.created_at.desc() - ) + def get_datasets(page, per_page, tenant_id=None, user=None, search=None, tag_ids=None): + query = Dataset.query.filter(Dataset.tenant_id == tenant_id).order_by(Dataset.created_at.desc()) if user: # get permitted dataset ids @@ -137,7 +137,14 @@ def get_datasets_by_ids(ids, tenant_id): @staticmethod def create_empty_dataset( - tenant_id: str, name: str, indexing_technique: Optional[str], account: Account, permission: Optional[str] = None + tenant_id: str, + name: str, + indexing_technique: Optional[str], + account: Account, + permission: Optional[str] = None, + provider: str = "vendor", + external_knowledge_api_id: Optional[str] = None, + external_knowledge_id: Optional[str] = None, ): # check if dataset name already exists if Dataset.query.filter_by(name=name, tenant_id=tenant_id).first(): @@ -156,12 +163,28 @@ def create_empty_dataset( dataset.embedding_model_provider = embedding_model.provider if embedding_model else None dataset.embedding_model = embedding_model.model if embedding_model else None dataset.permission = permission or DatasetPermissionEnum.ONLY_ME + dataset.provider = provider db.session.add(dataset) + db.session.flush() + + if provider == "external" and external_knowledge_api_id: + external_knowledge_api = ExternalDatasetService.get_external_knowledge_api(external_knowledge_api_id) + if not external_knowledge_api: + raise ValueError("External API template not found.") + external_knowledge_binding = ExternalKnowledgeBindings( + tenant_id=tenant_id, + dataset_id=dataset.id, + external_knowledge_api_id=external_knowledge_api_id, + external_knowledge_id=external_knowledge_id, + created_by=account.id, + ) + db.session.add(external_knowledge_binding) + db.session.commit() return dataset @staticmethod - def get_dataset(dataset_id): + def get_dataset(dataset_id) -> Dataset: return Dataset.query.filter_by(id=dataset_id).first() @staticmethod @@ -202,81 +225,103 @@ def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, @staticmethod def update_dataset(dataset_id, data, user): - data.pop("partial_member_list", None) - filtered_data = {k: v for k, v in data.items() if v is not None or k == "description"} dataset = DatasetService.get_dataset(dataset_id) + DatasetService.check_dataset_permission(dataset, user) - action = None - if dataset.indexing_technique != data["indexing_technique"]: - # if update indexing_technique - if data["indexing_technique"] == "economy": - action = "remove" - filtered_data["embedding_model"] = None - filtered_data["embedding_model_provider"] = None - filtered_data["collection_binding_id"] = None - elif data["indexing_technique"] == "high_quality": - action = "add" - # get embedding model setting - try: - model_manager = ModelManager() - embedding_model = model_manager.get_model_instance( - tenant_id=current_user.current_tenant_id, - provider=data["embedding_model_provider"], - model_type=ModelType.TEXT_EMBEDDING, - model=data["embedding_model"], - ) - filtered_data["embedding_model"] = embedding_model.model - filtered_data["embedding_model_provider"] = embedding_model.provider - dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding( - embedding_model.provider, embedding_model.model - ) - filtered_data["collection_binding_id"] = dataset_collection_binding.id - except LLMBadRequestError: - raise ValueError( - "No Embedding Model available. Please configure a valid provider " - "in the Settings -> Model Provider." - ) - except ProviderTokenNotInitError as ex: - raise ValueError(ex.description) - else: + if dataset.provider == "external": + dataset.retrieval_model = data.get("external_retrieval_model", None) + dataset.name = data.get("name", dataset.name) + dataset.description = data.get("description", "") + external_knowledge_id = data.get("external_knowledge_id", None) + db.session.add(dataset) + if not external_knowledge_id: + raise ValueError("External knowledge id is required.") + external_knowledge_api_id = data.get("external_knowledge_api_id", None) + if not external_knowledge_api_id: + raise ValueError("External knowledge api id is required.") + external_knowledge_binding = ExternalKnowledgeBindings.query.filter_by(dataset_id=dataset_id).first() if ( - data["embedding_model_provider"] != dataset.embedding_model_provider - or data["embedding_model"] != dataset.embedding_model + external_knowledge_binding.external_knowledge_id != external_knowledge_id + or external_knowledge_binding.external_knowledge_api_id != external_knowledge_api_id ): - action = "update" - try: - model_manager = ModelManager() - embedding_model = model_manager.get_model_instance( - tenant_id=current_user.current_tenant_id, - provider=data["embedding_model_provider"], - model_type=ModelType.TEXT_EMBEDDING, - model=data["embedding_model"], - ) - filtered_data["embedding_model"] = embedding_model.model - filtered_data["embedding_model_provider"] = embedding_model.provider - dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding( - embedding_model.provider, embedding_model.model - ) - filtered_data["collection_binding_id"] = dataset_collection_binding.id - except LLMBadRequestError: - raise ValueError( - "No Embedding Model available. Please configure a valid provider " - "in the Settings -> Model Provider." - ) - except ProviderTokenNotInitError as ex: - raise ValueError(ex.description) + external_knowledge_binding.external_knowledge_id = external_knowledge_id + external_knowledge_binding.external_knowledge_api_id = external_knowledge_api_id + db.session.add(external_knowledge_binding) + db.session.commit() + else: + data.pop("partial_member_list", None) + filtered_data = {k: v for k, v in data.items() if v is not None or k == "description"} + action = None + if dataset.indexing_technique != data["indexing_technique"]: + # if update indexing_technique + if data["indexing_technique"] == "economy": + action = "remove" + filtered_data["embedding_model"] = None + filtered_data["embedding_model_provider"] = None + filtered_data["collection_binding_id"] = None + elif data["indexing_technique"] == "high_quality": + action = "add" + # get embedding model setting + try: + model_manager = ModelManager() + embedding_model = model_manager.get_model_instance( + tenant_id=current_user.current_tenant_id, + provider=data["embedding_model_provider"], + model_type=ModelType.TEXT_EMBEDDING, + model=data["embedding_model"], + ) + filtered_data["embedding_model"] = embedding_model.model + filtered_data["embedding_model_provider"] = embedding_model.provider + dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding( + embedding_model.provider, embedding_model.model + ) + filtered_data["collection_binding_id"] = dataset_collection_binding.id + except LLMBadRequestError: + raise ValueError( + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider." + ) + except ProviderTokenNotInitError as ex: + raise ValueError(ex.description) + else: + if ( + data["embedding_model_provider"] != dataset.embedding_model_provider + or data["embedding_model"] != dataset.embedding_model + ): + action = "update" + try: + model_manager = ModelManager() + embedding_model = model_manager.get_model_instance( + tenant_id=current_user.current_tenant_id, + provider=data["embedding_model_provider"], + model_type=ModelType.TEXT_EMBEDDING, + model=data["embedding_model"], + ) + filtered_data["embedding_model"] = embedding_model.model + filtered_data["embedding_model_provider"] = embedding_model.provider + dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding( + embedding_model.provider, embedding_model.model + ) + filtered_data["collection_binding_id"] = dataset_collection_binding.id + except LLMBadRequestError: + raise ValueError( + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider." + ) + except ProviderTokenNotInitError as ex: + raise ValueError(ex.description) - filtered_data["updated_by"] = user.id - filtered_data["updated_at"] = datetime.datetime.now() + filtered_data["updated_by"] = user.id + filtered_data["updated_at"] = datetime.datetime.now() - # update Retrieval model - filtered_data["retrieval_model"] = data["retrieval_model"] + # update Retrieval model + filtered_data["retrieval_model"] = data["retrieval_model"] - dataset.query.filter_by(id=dataset_id).update(filtered_data) + dataset.query.filter_by(id=dataset_id).update(filtered_data) - db.session.commit() - if action: - deal_dataset_vector_index_task.delay(dataset_id, action) + db.session.commit() + if action: + deal_dataset_vector_index_task.delay(dataset_id, action) return dataset @staticmethod diff --git a/api/services/entities/external_knowledge_entities/external_knowledge_entities.py b/api/services/entities/external_knowledge_entities/external_knowledge_entities.py new file mode 100644 index 0000000000000..4545f385eb989 --- /dev/null +++ b/api/services/entities/external_knowledge_entities/external_knowledge_entities.py @@ -0,0 +1,26 @@ +from typing import Literal, Optional, Union + +from pydantic import BaseModel + + +class AuthorizationConfig(BaseModel): + type: Literal[None, "basic", "bearer", "custom"] + api_key: Union[None, str] = None + header: Union[None, str] = None + + +class Authorization(BaseModel): + type: Literal["no-auth", "api-key"] + config: Optional[AuthorizationConfig] = None + + +class ProcessStatusSetting(BaseModel): + request_method: str + url: str + + +class ExternalKnowledgeApiSetting(BaseModel): + url: str + request_method: str + headers: Optional[dict] = None + params: Optional[dict] = None diff --git a/api/services/external_knowledge_service.py b/api/services/external_knowledge_service.py new file mode 100644 index 0000000000000..4efdf8d7dbe75 --- /dev/null +++ b/api/services/external_knowledge_service.py @@ -0,0 +1,274 @@ +import json +from copy import deepcopy +from datetime import datetime, timezone +from typing import Any, Optional, Union + +import httpx +import validators + +# from tasks.external_document_indexing_task import external_document_indexing_task +from core.helper import ssrf_proxy +from extensions.ext_database import db +from models.dataset import ( + Dataset, + ExternalKnowledgeApis, + ExternalKnowledgeBindings, +) +from services.entities.external_knowledge_entities.external_knowledge_entities import ( + Authorization, + ExternalKnowledgeApiSetting, +) +from services.errors.dataset import DatasetNameDuplicateError + + +class ExternalDatasetService: + @staticmethod + def get_external_knowledge_apis(page, per_page, tenant_id, search=None) -> tuple[list[ExternalKnowledgeApis], int]: + query = ExternalKnowledgeApis.query.filter(ExternalKnowledgeApis.tenant_id == tenant_id).order_by( + ExternalKnowledgeApis.created_at.desc() + ) + if search: + query = query.filter(ExternalKnowledgeApis.name.ilike(f"%{search}%")) + + external_knowledge_apis = query.paginate(page=page, per_page=per_page, max_per_page=100, error_out=False) + + return external_knowledge_apis.items, external_knowledge_apis.total + + @classmethod + def validate_api_list(cls, api_settings: dict): + if not api_settings: + raise ValueError("api list is empty") + if "endpoint" not in api_settings and not api_settings["endpoint"]: + raise ValueError("endpoint is required") + if "api_key" not in api_settings and not api_settings["api_key"]: + raise ValueError("api_key is required") + + @staticmethod + def create_external_knowledge_api(tenant_id: str, user_id: str, args: dict) -> ExternalKnowledgeApis: + ExternalDatasetService.check_endpoint_and_api_key(args.get("settings")) + external_knowledge_api = ExternalKnowledgeApis( + tenant_id=tenant_id, + created_by=user_id, + updated_by=user_id, + name=args.get("name"), + description=args.get("description", ""), + settings=json.dumps(args.get("settings"), ensure_ascii=False), + ) + + db.session.add(external_knowledge_api) + db.session.commit() + return external_knowledge_api + + @staticmethod + def check_endpoint_and_api_key(settings: dict): + if "endpoint" not in settings or not settings["endpoint"]: + raise ValueError("endpoint is required") + if "api_key" not in settings or not settings["api_key"]: + raise ValueError("api_key is required") + + endpoint = f"{settings['endpoint']}/retrieval" + api_key = settings["api_key"] + if not validators.url(endpoint): + raise ValueError(f"invalid endpoint: {endpoint}") + try: + response = httpx.post(endpoint, headers={"Authorization": f"Bearer {api_key}"}) + except Exception as e: + raise ValueError(f"failed to connect to the endpoint: {endpoint}") + if response.status_code == 502: + raise ValueError(f"Bad Gateway: failed to connect to the endpoint: {endpoint}") + if response.status_code == 404: + raise ValueError(f"Not Found: failed to connect to the endpoint: {endpoint}") + if response.status_code == 403: + raise ValueError(f"Forbidden: Authorization failed with api_key: {api_key}") + + @staticmethod + def get_external_knowledge_api(external_knowledge_api_id: str) -> ExternalKnowledgeApis: + return ExternalKnowledgeApis.query.filter_by(id=external_knowledge_api_id).first() + + @staticmethod + def update_external_knowledge_api(tenant_id, user_id, external_knowledge_api_id, args) -> ExternalKnowledgeApis: + external_knowledge_api = ExternalKnowledgeApis.query.filter_by( + id=external_knowledge_api_id, tenant_id=tenant_id + ).first() + if external_knowledge_api is None: + raise ValueError("api template not found") + + external_knowledge_api.name = args.get("name") + external_knowledge_api.description = args.get("description", "") + external_knowledge_api.settings = json.dumps(args.get("settings"), ensure_ascii=False) + external_knowledge_api.updated_by = user_id + external_knowledge_api.updated_at = datetime.now(timezone.utc).replace(tzinfo=None) + db.session.commit() + + return external_knowledge_api + + @staticmethod + def delete_external_knowledge_api(tenant_id: str, external_knowledge_api_id: str): + external_knowledge_api = ExternalKnowledgeApis.query.filter_by( + id=external_knowledge_api_id, tenant_id=tenant_id + ).first() + if external_knowledge_api is None: + raise ValueError("api template not found") + + db.session.delete(external_knowledge_api) + db.session.commit() + + @staticmethod + def external_knowledge_api_use_check(external_knowledge_api_id: str) -> tuple[bool, int]: + count = ExternalKnowledgeBindings.query.filter_by(external_knowledge_api_id=external_knowledge_api_id).count() + if count > 0: + return True, count + return False, 0 + + @staticmethod + def get_external_knowledge_binding_with_dataset_id(tenant_id: str, dataset_id: str) -> ExternalKnowledgeBindings: + external_knowledge_binding = ExternalKnowledgeBindings.query.filter_by( + dataset_id=dataset_id, tenant_id=tenant_id + ).first() + if not external_knowledge_binding: + raise ValueError("external knowledge binding not found") + return external_knowledge_binding + + @staticmethod + def document_create_args_validate(tenant_id: str, external_knowledge_api_id: str, process_parameter: dict): + external_knowledge_api = ExternalKnowledgeApis.query.filter_by( + id=external_knowledge_api_id, tenant_id=tenant_id + ).first() + if external_knowledge_api is None: + raise ValueError("api template not found") + settings = json.loads(external_knowledge_api.settings) + for setting in settings: + custom_parameters = setting.get("document_process_setting") + if custom_parameters: + for parameter in custom_parameters: + if parameter.get("required", False) and not process_parameter.get(parameter.get("name")): + raise ValueError(f'{parameter.get("name")} is required') + + @staticmethod + def process_external_api( + settings: ExternalKnowledgeApiSetting, files: Union[None, dict[str, Any]] + ) -> httpx.Response: + """ + do http request depending on api bundle + """ + + kwargs = { + "url": settings.url, + "headers": settings.headers, + "follow_redirects": True, + } + + response = getattr(ssrf_proxy, settings.request_method)(data=json.dumps(settings.params), files=files, **kwargs) + + return response + + @staticmethod + def assembling_headers(authorization: Authorization, headers: Optional[dict] = None) -> dict[str, Any]: + authorization = deepcopy(authorization) + if headers: + headers = deepcopy(headers) + else: + headers = {} + if authorization.type == "api-key": + if authorization.config is None: + raise ValueError("authorization config is required") + + if authorization.config.api_key is None: + raise ValueError("api_key is required") + + if not authorization.config.header: + authorization.config.header = "Authorization" + + if authorization.config.type == "bearer": + headers[authorization.config.header] = f"Bearer {authorization.config.api_key}" + elif authorization.config.type == "basic": + headers[authorization.config.header] = f"Basic {authorization.config.api_key}" + elif authorization.config.type == "custom": + headers[authorization.config.header] = authorization.config.api_key + + return headers + + @staticmethod + def get_external_knowledge_api_settings(settings: dict) -> ExternalKnowledgeApiSetting: + return ExternalKnowledgeApiSetting.parse_obj(settings) + + @staticmethod + def create_external_dataset(tenant_id: str, user_id: str, args: dict) -> Dataset: + # check if dataset name already exists + if Dataset.query.filter_by(name=args.get("name"), tenant_id=tenant_id).first(): + raise DatasetNameDuplicateError(f"Dataset with name {args.get('name')} already exists.") + external_knowledge_api = ExternalKnowledgeApis.query.filter_by( + id=args.get("external_knowledge_api_id"), tenant_id=tenant_id + ).first() + + if external_knowledge_api is None: + raise ValueError("api template not found") + + dataset = Dataset( + tenant_id=tenant_id, + name=args.get("name"), + description=args.get("description", ""), + provider="external", + retrieval_model=args.get("external_retrieval_model"), + created_by=user_id, + ) + + db.session.add(dataset) + db.session.flush() + + external_knowledge_binding = ExternalKnowledgeBindings( + tenant_id=tenant_id, + dataset_id=dataset.id, + external_knowledge_api_id=args.get("external_knowledge_api_id"), + external_knowledge_id=args.get("external_knowledge_id"), + created_by=user_id, + ) + db.session.add(external_knowledge_binding) + + db.session.commit() + + return dataset + + @staticmethod + def fetch_external_knowledge_retrieval( + tenant_id: str, dataset_id: str, query: str, external_retrieval_parameters: dict + ) -> list: + external_knowledge_binding = ExternalKnowledgeBindings.query.filter_by( + dataset_id=dataset_id, tenant_id=tenant_id + ).first() + if not external_knowledge_binding: + raise ValueError("external knowledge binding not found") + + external_knowledge_api = ExternalKnowledgeApis.query.filter_by( + id=external_knowledge_binding.external_knowledge_api_id + ).first() + if not external_knowledge_api: + raise ValueError("external api template not found") + + settings = json.loads(external_knowledge_api.settings) + headers = {"Content-Type": "application/json"} + if settings.get("api_key"): + headers["Authorization"] = f"Bearer {settings.get('api_key')}" + score_threshold_enabled = external_retrieval_parameters.get("score_threshold_enabled") or False + score_threshold = external_retrieval_parameters.get("score_threshold", 0.0) if score_threshold_enabled else 0.0 + request_params = { + "retrieval_setting": { + "top_k": external_retrieval_parameters.get("top_k"), + "score_threshold": score_threshold, + }, + "query": query, + "knowledge_id": external_knowledge_binding.external_knowledge_id, + } + + external_knowledge_api_setting = { + "url": f"{settings.get('endpoint')}/retrieval", + "request_method": "post", + "headers": headers, + "params": request_params, + } + response = ExternalDatasetService.process_external_api( + ExternalKnowledgeApiSetting(**external_knowledge_api_setting), None + ) + if response.status_code == 200: + return response.json().get("records", []) + return [] diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 3dafafd5b46f6..7957b4dc82dfd 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -19,7 +19,15 @@ class HitTestingService: @classmethod - def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_model: dict, limit: int = 10) -> dict: + def retrieve( + cls, + dataset: Dataset, + query: str, + account: Account, + retrieval_model: dict, + external_retrieval_model: dict, + limit: int = 10, + ) -> dict: if dataset.available_document_count == 0 or dataset.available_segment_count == 0: return { "query": { @@ -62,10 +70,44 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_mode return cls.compact_retrieve_response(dataset, query, all_documents) + @classmethod + def external_retrieve( + cls, + dataset: Dataset, + query: str, + account: Account, + external_retrieval_model: dict, + ) -> dict: + if dataset.provider != "external": + return { + "query": {"content": query}, + "records": [], + } + + start = time.perf_counter() + + all_documents = RetrievalService.external_retrieve( + dataset_id=dataset.id, + query=cls.escape_query_for_search(query), + external_retrieval_model=external_retrieval_model, + ) + + end = time.perf_counter() + logging.debug(f"External knowledge hit testing retrieve in {end - start:0.4f} seconds") + + dataset_query = DatasetQuery( + dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id + ) + + db.session.add(dataset_query) + db.session.commit() + + return cls.compact_external_retrieve_response(dataset, query, all_documents) + @classmethod def compact_retrieve_response(cls, dataset: Dataset, query: str, documents: list[Document]): - i = 0 records = [] + for document in documents: index_node_id = document.metadata["doc_id"] @@ -81,7 +123,6 @@ def compact_retrieve_response(cls, dataset: Dataset, query: str, documents: list ) if not segment: - i += 1 continue record = { @@ -91,8 +132,6 @@ def compact_retrieve_response(cls, dataset: Dataset, query: str, documents: list records.append(record) - i += 1 - return { "query": { "content": query, @@ -100,6 +139,25 @@ def compact_retrieve_response(cls, dataset: Dataset, query: str, documents: list "records": records, } + @classmethod + def compact_external_retrieve_response(cls, dataset: Dataset, query: str, documents: list): + records = [] + if dataset.provider == "external": + for document in documents: + record = { + "content": document.get("content", None), + "title": document.get("title", None), + "score": document.get("score", None), + "metadata": document.get("metadata", None), + } + records.append(record) + return { + "query": { + "content": query, + }, + "records": records, + } + @classmethod def hit_testing_args_check(cls, args): query = args["query"] diff --git a/api/tasks/external_document_indexing_task.py b/api/tasks/external_document_indexing_task.py new file mode 100644 index 0000000000000..6fc719ae8d085 --- /dev/null +++ b/api/tasks/external_document_indexing_task.py @@ -0,0 +1,93 @@ +import json +import logging +import time + +import click +from celery import shared_task + +from core.indexing_runner import DocumentIsPausedException +from extensions.ext_database import db +from extensions.ext_storage import storage +from models.dataset import Dataset, ExternalKnowledgeApis +from models.model import UploadFile +from services.external_knowledge_service import ExternalDatasetService + + +@shared_task(queue="dataset") +def external_document_indexing_task( + dataset_id: str, external_knowledge_api_id: str, data_source: dict, process_parameter: dict +): + """ + Async process document + :param dataset_id: + :param external_knowledge_api_id: + :param data_source: + :param process_parameter: + Usage: external_document_indexing_task.delay(dataset_id, document_id) + """ + start_at = time.perf_counter() + + dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first() + if not dataset: + logging.info( + click.style("Processed external dataset: {} failed, dataset not exit.".format(dataset_id), fg="red") + ) + return + + # get external api template + external_knowledge_api = ( + db.session.query(ExternalKnowledgeApis) + .filter( + ExternalKnowledgeApis.id == external_knowledge_api_id, ExternalKnowledgeApis.tenant_id == dataset.tenant_id + ) + .first() + ) + + if not external_knowledge_api: + logging.info( + click.style( + "Processed external dataset: {} failed, api template: {} not exit.".format( + dataset_id, external_knowledge_api_id + ), + fg="red", + ) + ) + return + files = {} + if data_source["type"] == "upload_file": + upload_file_list = data_source["info_list"]["file_info_list"]["file_ids"] + for file_id in upload_file_list: + file = ( + db.session.query(UploadFile) + .filter(UploadFile.tenant_id == dataset.tenant_id, UploadFile.id == file_id) + .first() + ) + if file: + files[file.id] = (file.name, storage.load_once(file.key), file.mime_type) + try: + settings = ExternalDatasetService.get_external_knowledge_api_settings( + json.loads(external_knowledge_api.settings) + ) + # assemble headers + headers = ExternalDatasetService.assembling_headers(settings.authorization, settings.headers) + + # do http request + response = ExternalDatasetService.process_external_api(settings, headers, process_parameter, files) + job_id = response.json().get("job_id") + if job_id: + # save job_id to dataset + dataset.job_id = job_id + db.session.commit() + + end_at = time.perf_counter() + logging.info( + click.style( + "Processed external dataset: {} successful, latency: {}".format(dataset.id, end_at - start_at), + fg="green", + ) + ) + except DocumentIsPausedException as ex: + logging.info(click.style(str(ex), fg="yellow")) + + except Exception: + pass diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx index e691cc05f6326..a58027bcd12e0 100644 --- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx +++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC, SVGProps } from 'react' -import React, { useEffect } from 'react' +import React, { useEffect, useMemo } from 'react' import { usePathname } from 'next/navigation' import useSWR from 'swr' import { useTranslation } from 'react-i18next' @@ -203,12 +203,23 @@ const DatasetDetailLayout: FC = (props) => { datasetId, }, apiParams => fetchDatasetRelatedApps(apiParams.datasetId)) - const navigation = [ - { name: t('common.datasetMenus.documents'), href: `/datasets/${datasetId}/documents`, icon: DocumentTextIcon, selectedIcon: DocumentTextSolidIcon }, - { name: t('common.datasetMenus.hitTesting'), href: `/datasets/${datasetId}/hitTesting`, icon: TargetIcon, selectedIcon: TargetSolidIcon }, - // { name: 'api & webhook', href: `/datasets/${datasetId}/api`, icon: CommandLineIcon, selectedIcon: CommandLineSolidIcon }, - { name: t('common.datasetMenus.settings'), href: `/datasets/${datasetId}/settings`, icon: Cog8ToothIcon, selectedIcon: Cog8ToothSolidIcon }, - ] + const navigation = useMemo(() => { + const baseNavigation = [ + { name: t('common.datasetMenus.hitTesting'), href: `/datasets/${datasetId}/hitTesting`, icon: TargetIcon, selectedIcon: TargetSolidIcon }, + // { name: 'api & webhook', href: `/datasets/${datasetId}/api`, icon: CommandLineIcon, selectedIcon: CommandLineSolidIcon }, + { name: t('common.datasetMenus.settings'), href: `/datasets/${datasetId}/settings`, icon: Cog8ToothIcon, selectedIcon: Cog8ToothSolidIcon }, + ] + + if (datasetRes?.provider !== 'external') { + baseNavigation.unshift({ + name: t('common.datasetMenus.documents'), + href: `/datasets/${datasetId}/documents`, + icon: DocumentTextIcon, + selectedIcon: DocumentTextSolidIcon, + }) + } + return baseNavigation + }, [datasetRes?.provider, datasetId, t]) useEffect(() => { if (datasetRes) @@ -233,6 +244,7 @@ const DatasetDetailLayout: FC = (props) => { icon={datasetRes?.icon || 'https://static.dify.ai/images/dataset-default-icon.png'} icon_background={datasetRes?.icon_background || '#F5F5F5'} desc={datasetRes?.description || '--'} + isExternal={datasetRes?.provider === 'external'} navigation={navigation} extraInfo={!isCurrentWorkspaceDatasetOperator ? mode => : undefined} iconType={datasetRes?.data_source_type === DataSourceType.NOTION ? 'notion' : 'dataset'} diff --git a/web/app/(commonLayout)/datasets/Container.tsx b/web/app/(commonLayout)/datasets/Container.tsx index f532ca416fc24..e350a85354373 100644 --- a/web/app/(commonLayout)/datasets/Container.tsx +++ b/web/app/(commonLayout)/datasets/Container.tsx @@ -8,6 +8,7 @@ import { useDebounceFn } from 'ahooks' import useSWR from 'swr' // Components +import ExternalAPIPanel from '../../components/datasets/external-api/external-api-panel' import Datasets from './Datasets' import DatasetFooter from './DatasetFooter' import ApiServer from './ApiServer' @@ -16,6 +17,8 @@ import TabSliderNew from '@/app/components/base/tab-slider-new' import SearchInput from '@/app/components/base/search-input' import TagManagementModal from '@/app/components/base/tag-management' import TagFilter from '@/app/components/base/tag-management/filter' +import Button from '@/app/components/base/button' +import { ApiConnectionMod } from '@/app/components/base/icons/src/vender/solid/development' // Services import { fetchDatasetApiBaseUrl } from '@/service/datasets' @@ -24,12 +27,14 @@ import { fetchDatasetApiBaseUrl } from '@/service/datasets' import { useTabSearchParams } from '@/hooks/use-tab-searchparams' import { useStore as useTagStore } from '@/app/components/base/tag-management/store' import { useAppContext } from '@/context/app-context' +import { useExternalApiPanel } from '@/context/external-api-panel-context' const Container = () => { const { t } = useTranslation() const router = useRouter() const { currentWorkspace } = useAppContext() const showTagManagementModal = useTagStore(s => s.showTagManagementModal) + const { showExternalApiPanel, setShowExternalApiPanel } = useExternalApiPanel() const options = useMemo(() => { return [ @@ -66,7 +71,7 @@ const Container = () => { useEffect(() => { if (currentWorkspace.role === 'normal') return router.replace('/apps') - }, [currentWorkspace]) + }, [currentWorkspace, router]) return (
@@ -80,11 +85,18 @@ const Container = () => {
+
+
)} {activeTab === 'api' && data && }
- {activeTab === 'dataset' && ( <> @@ -94,10 +106,10 @@ const Container = () => { )} )} - {activeTab === 'api' && data && } -
+ {showExternalApiPanel && setShowExternalApiPanel(false)} />} +
) } diff --git a/web/app/(commonLayout)/datasets/DatasetCard.tsx b/web/app/(commonLayout)/datasets/DatasetCard.tsx index f66c6bccf421f..e8ccddbcb78d3 100644 --- a/web/app/(commonLayout)/datasets/DatasetCard.tsx +++ b/web/app/(commonLayout)/datasets/DatasetCard.tsx @@ -18,6 +18,7 @@ import Divider from '@/app/components/base/divider' import RenameDatasetModal from '@/app/components/datasets/rename-modal' import type { Tag } from '@/app/components/base/tag-management/constant' import TagSelector from '@/app/components/base/tag-management/selector' +import CornerLabel from '@/app/components/base/corner-label' import { useAppContext } from '@/context/app-context' export type DatasetCardProps = { @@ -32,6 +33,7 @@ const DatasetCard = ({ const { t } = useTranslation() const { notify } = useContext(ToastContext) const { push } = useRouter() + const EXTERNAL_PROVIDER = 'external' as const const { isCurrentWorkspaceDatasetOperator } = useAppContext() const [tags, setTags] = useState(dataset.tags) @@ -39,6 +41,7 @@ const DatasetCard = ({ const [showRenameModal, setShowRenameModal] = useState(false) const [showConfirmDelete, setShowConfirmDelete] = useState(false) const [confirmMessage, setConfirmMessage] = useState('') + const isExternalProvider = (provider: string): boolean => provider === EXTERNAL_PROVIDER const detectIsUsedByApp = useCallback(async () => { try { const { is_using: isUsedByApp } = await checkIsUsedInApp(dataset.id) @@ -108,13 +111,16 @@ const DatasetCard = ({ return ( <>
{ e.preventDefault() - push(`/datasets/${dataset.id}/documents`) + isExternalProvider(dataset.provider) + ? push(`/datasets/${dataset.id}/hitTesting`) + : push(`/datasets/${dataset.id}/documents`) }} > + {isExternalProvider(dataset.provider) && }
- {dataset.document_count}{t('dataset.documentCount')} - · - {Math.round(dataset.word_count / 1000)}{t('dataset.wordCount')} - · - {dataset.app_count}{t('dataset.appCount')} + {dataset.provider === 'external' + ? <> + {dataset.app_count}{t('dataset.appCount')} + + : <> + {dataset.document_count}{t('dataset.documentCount')} + · + {Math.round(dataset.word_count / 1000)}{t('dataset.wordCount')} + · + {dataset.app_count}{t('dataset.appCount')} + + }
diff --git a/web/app/(commonLayout)/datasets/NewDatasetCard.tsx b/web/app/(commonLayout)/datasets/NewDatasetCard.tsx index f76efa57696d3..5dd244ad41006 100644 --- a/web/app/(commonLayout)/datasets/NewDatasetCard.tsx +++ b/web/app/(commonLayout)/datasets/NewDatasetCard.tsx @@ -4,21 +4,32 @@ import { forwardRef } from 'react' import { useTranslation } from 'react-i18next' import { RiAddLine, + RiArrowRightLine, } from '@remixicon/react' const CreateAppCard = forwardRef((_, ref) => { const { t } = useTranslation() return ( - -
- ) }) diff --git a/web/app/(commonLayout)/datasets/connect/page.tsx b/web/app/(commonLayout)/datasets/connect/page.tsx new file mode 100644 index 0000000000000..724c506a7f4eb --- /dev/null +++ b/web/app/(commonLayout)/datasets/connect/page.tsx @@ -0,0 +1,8 @@ +import React from 'react' +import ExternalKnowledgeBaseConnector from '@/app/components/datasets/external-knowledge-base/connector' + +const ExternalKnowledgeBaseCreation = () => { + return +} + +export default ExternalKnowledgeBaseCreation diff --git a/web/app/(commonLayout)/datasets/layout.tsx b/web/app/(commonLayout)/datasets/layout.tsx new file mode 100644 index 0000000000000..aecb537aa6d63 --- /dev/null +++ b/web/app/(commonLayout)/datasets/layout.tsx @@ -0,0 +1,14 @@ +'use client' + +import { ExternalApiPanelProvider } from '@/context/external-api-panel-context' +import { ExternalKnowledgeApiProvider } from '@/context/external-knowledge-api-context' + +export default function DatasetsLayout({ children }: { children: React.ReactNode }) { + return ( + + + {children} + + + ) +} diff --git a/web/app/(commonLayout)/datasets/page.tsx b/web/app/(commonLayout)/datasets/page.tsx index 5aa11aa275070..096a1b8979210 100644 --- a/web/app/(commonLayout)/datasets/page.tsx +++ b/web/app/(commonLayout)/datasets/page.tsx @@ -1,9 +1,7 @@ import Container from './Container' const AppList = async () => { - return ( - - ) + return } export const metadata = { diff --git a/web/app/(commonLayout)/datasets/store.ts b/web/app/(commonLayout)/datasets/store.ts new file mode 100644 index 0000000000000..40b7b15594809 --- /dev/null +++ b/web/app/(commonLayout)/datasets/store.ts @@ -0,0 +1,11 @@ +import { create } from 'zustand' + +type DatasetStore = { + showExternalApiPanel: boolean + setShowExternalApiPanel: (show: boolean) => void +} + +export const useDatasetStore = create(set => ({ + showExternalApiPanel: false, + setShowExternalApiPanel: show => set({ showExternalApiPanel: show }), +})) diff --git a/web/app/components/app-sidebar/basic.tsx b/web/app/components/app-sidebar/basic.tsx index c939cb7bb3a5a..51fc10721eb7a 100644 --- a/web/app/components/app-sidebar/basic.tsx +++ b/web/app/components/app-sidebar/basic.tsx @@ -1,4 +1,5 @@ import React from 'react' +import { useTranslation } from 'react-i18next' import AppIcon from '../base/app-icon' import Tooltip from '@/app/components/base/tooltip' @@ -6,6 +7,7 @@ export type IAppBasicProps = { iconType?: 'app' | 'api' | 'dataset' | 'webapp' | 'notion' icon?: string icon_background?: string | null + isExternal?: boolean name: string type: string | React.ReactNode hoverTip?: string @@ -52,7 +54,9 @@ const ICON_MAP = { notion: , } -export default function AppBasic({ icon, icon_background, name, type, hoverTip, textStyle, mode = 'expand', iconType = 'app' }: IAppBasicProps) { +export default function AppBasic({ icon, icon_background, name, isExternal, type, hoverTip, textStyle, mode = 'expand', iconType = 'app' }: IAppBasicProps) { + const { t } = useTranslation() + return (
{icon && icon_background && iconType === 'app' && ( @@ -83,6 +87,7 @@ export default function AppBasic({ icon, icon_background, name, type, hoverTip, }
{type}
+
{isExternal ? t('dataset.externalTag') : ''}
}
) diff --git a/web/app/components/app-sidebar/index.tsx b/web/app/components/app-sidebar/index.tsx index 5d5d407dc0f90..5ee063ad646f3 100644 --- a/web/app/components/app-sidebar/index.tsx +++ b/web/app/components/app-sidebar/index.tsx @@ -15,6 +15,7 @@ export type IAppDetailNavProps = { iconType?: 'app' | 'dataset' | 'notion' title: string desc: string + isExternal?: boolean icon: string icon_background: string navigation: Array<{ @@ -26,7 +27,7 @@ export type IAppDetailNavProps = { extraInfo?: (modeState: string) => React.ReactNode } -const AppDetailNav = ({ title, desc, icon, icon_background, navigation, extraInfo, iconType = 'app' }: IAppDetailNavProps) => { +const AppDetailNav = ({ title, desc, isExternal, icon, icon_background, navigation, extraInfo, iconType = 'app' }: IAppDetailNavProps) => { const { appSidebarExpand, setAppSiderbarExpand } = useAppStore(useShallow(state => ({ appSidebarExpand: state.appSidebarExpand, setAppSiderbarExpand: state.setAppSiderbarExpand, @@ -70,6 +71,7 @@ const AppDetailNav = ({ title, desc, icon, icon_background, navigation, extraInf icon_background={icon_background} name={title} type={desc} + isExternal={isExternal} /> )} diff --git a/web/app/components/app/configuration/dataset-config/card-item/item.tsx b/web/app/components/app/configuration/dataset-config/card-item/item.tsx index 184a598e1081c..9c5e6fa785ee4 100644 --- a/web/app/components/app/configuration/dataset-config/card-item/item.tsx +++ b/web/app/components/app/configuration/dataset-config/card-item/item.tsx @@ -5,6 +5,7 @@ import { RiDeleteBinLine, RiEditLine, } from '@remixicon/react' +import { useTranslation } from 'react-i18next' import SettingsModal from '../settings-modal' import type { DataSet } from '@/models/datasets' import { DataSourceType } from '@/models/datasets' @@ -33,6 +34,7 @@ const Item: FC = ({ const isMobile = media === MediaType.mobile const [showSettingsModal, setShowSettingsModal] = useState(false) const { formatIndexingTechniqueAndMethod } = useKnowledge() + const { t } = useTranslation() const handleSave = (newDataset: DataSet) => { onSave(newDataset) @@ -65,9 +67,11 @@ const Item: FC = ({
{config.name}
- + {config.provider === 'external' + ? + : }
diff --git a/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx b/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx index 91cae54bb8e7e..f99496b14fa8f 100644 --- a/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx +++ b/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx @@ -174,6 +174,20 @@ const ConfigContent: FC = ({
) } + { + selectedDatasetsMode.mixtureInternalAndExternal && ( +
+ {t('dataset.mixtureInternalAndExternalTip')} +
+ ) + } + { + selectedDatasetsMode.allExternal && ( +
+ {t('dataset.allExternalTip')} +
+ ) + } { selectedDatasetsMode.mixtureHighQualityAndEconomic && ( @@ -229,15 +243,15 @@ const ConfigContent: FC = ({ /> ) } -
{t('common.modelProvider.rerankModel.key')}
+
{t('common.modelProvider.rerankModel.key')}
{t('common.modelProvider.rerankModel.tip')} } - popupClassName='ml-0.5' - triggerClassName='ml-0.5 w-3.5 h-3.5' + popupClassName='ml-1' + triggerClassName='ml-1 w-4 h-4' />
diff --git a/web/app/components/app/configuration/dataset-config/params-config/index.tsx b/web/app/components/app/configuration/dataset-config/params-config/index.tsx index 656cbfea6564f..2d3df0b03990a 100644 --- a/web/app/components/app/configuration/dataset-config/params-config/index.tsx +++ b/web/app/components/app/configuration/dataset-config/params-config/index.tsx @@ -39,13 +39,26 @@ const ParamsConfig = ({ useEffect(() => { const { allEconomic, + allHighQuality, + allHighQualityFullTextSearch, + allHighQualityVectorSearch, + allExternal, + mixtureHighQualityAndEconomic, + inconsistentEmbeddingModel, + mixtureInternalAndExternal, } = getSelectedDatasetsMode(selectedDatasets) const { datasets, retrieval_model, score_threshold_enabled, ...restConfigs } = datasetConfigs let rerankEnable = restConfigs.reranking_enable - if (allEconomic && !restConfigs.reranking_model?.reranking_provider_name && rerankEnable === undefined) + if ((allEconomic && !restConfigs.reranking_model?.reranking_provider_name && rerankEnable === undefined) || allExternal) rerankEnable = false + if (allEconomic || allHighQuality || allHighQualityFullTextSearch || allHighQualityVectorSearch || (allExternal && selectedDatasets.length === 1)) + setRerankSettingModalOpen(false) + + if (mixtureHighQualityAndEconomic || inconsistentEmbeddingModel || mixtureInternalAndExternal || (allExternal && selectedDatasets.length > 1)) + setRerankSettingModalOpen(true) + setTempDataSetConfigs({ ...getMultipleRetrievalConfig({ top_k: restConfigs.top_k, diff --git a/web/app/components/app/configuration/dataset-config/select-dataset/index.tsx b/web/app/components/app/configuration/dataset-config/select-dataset/index.tsx index 4493755ba0138..0d94e599b4f2c 100644 --- a/web/app/components/app/configuration/dataset-config/select-dataset/index.tsx +++ b/web/app/components/app/configuration/dataset-config/select-dataset/index.tsx @@ -47,7 +47,7 @@ const SelectDataSet: FC = ({ const { data, has_more } = await fetchDatasets({ url: '/datasets', params: { page } }) setPage(getPage() + 1) setIsNoMore(!has_more) - const newList = [...(datasets || []), ...data.filter(item => item.indexing_technique)] + const newList = [...(datasets || []), ...data.filter(item => item.indexing_technique || item.provider === 'external')] setDataSets(newList) setLoaded(true) if (!selected.find(item => !item.name)) @@ -145,6 +145,11 @@ const SelectDataSet: FC = ({ /> ) } + { + item.provider === 'external' && ( + + ) + }
))} diff --git a/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx b/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx index e538c347d95c9..7b3b4abc0fb1e 100644 --- a/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx +++ b/web/app/components/app/configuration/dataset-config/settings-modal/index.tsx @@ -5,8 +5,10 @@ import { useTranslation } from 'react-i18next' import { isEqual } from 'lodash-es' import { RiCloseLine } from '@remixicon/react' import { BookOpenIcon } from '@heroicons/react/24/outline' +import { ApiConnectionMod } from '@/app/components/base/icons/src/vender/solid/development' import cn from '@/utils/classnames' import IndexMethodRadio from '@/app/components/datasets/settings/index-method-radio' +import Divider from '@/app/components/base/divider' import Button from '@/app/components/base/button' import type { DataSet } from '@/models/datasets' import { useToastContext } from '@/app/components/base/toast' @@ -14,6 +16,7 @@ import { updateDatasetSetting } from '@/service/datasets' import { useAppContext } from '@/context/app-context' import { useModalContext } from '@/context/modal-context' import type { RetrievalConfig } from '@/types/app' +import RetrievalSettings from '@/app/components/datasets/external-knowledge-base/create/RetrievalSettings' import RetrievalMethodConfig from '@/app/components/datasets/common/retrieval-method-config' import EconomicalRetrievalMethodConfig from '@/app/components/datasets/common/economical-retrieval-method-config' import { ensureRerankModelSelected, isReRankModelSelected } from '@/app/components/datasets/common/check-rerank-model' @@ -56,6 +59,9 @@ const SettingsModal: FC = ({ const { t } = useTranslation() const { notify } = useToastContext() const ref = useRef(null) + const [topK, setTopK] = useState(currentDataset?.external_retrieval_model.top_k ?? 2) + const [scoreThreshold, setScoreThreshold] = useState(currentDataset?.external_retrieval_model.score_threshold ?? 0.5) + const [scoreThresholdEnabled, setScoreThresholdEnabled] = useState(currentDataset?.external_retrieval_model.score_threshold_enabled ?? false) const { setShowAccountSettingModal } = useModalContext() const [loading, setLoading] = useState(false) @@ -73,6 +79,15 @@ const SettingsModal: FC = ({ const [isHideChangedTip, setIsHideChangedTip] = useState(false) const isRetrievalChanged = !isEqual(retrievalConfig, localeCurrentDataset?.retrieval_model_dict) || indexMethod !== localeCurrentDataset?.indexing_technique + const handleSettingsChange = (data: { top_k?: number; score_threshold?: number; score_threshold_enabled?: boolean }) => { + if (data.top_k !== undefined) + setTopK(data.top_k) + if (data.score_threshold !== undefined) + setScoreThreshold(data.score_threshold) + if (data.score_threshold_enabled !== undefined) + setScoreThresholdEnabled(data.score_threshold_enabled) + } + const handleSave = async () => { if (loading) return @@ -107,10 +122,17 @@ const SettingsModal: FC = ({ description, permission, indexing_technique: indexMethod, + external_retrieval_model: { + top_k: topK, + score_threshold: scoreThreshold, + score_threshold_enabled: scoreThresholdEnabled, + }, retrieval_model: { ...postRetrievalConfig, score_threshold: postRetrievalConfig.score_threshold_enabled ? postRetrievalConfig.score_threshold : 0, }, + external_knowledge_id: currentDataset!.external_knowledge_info.external_knowledge_id, + external_knowledge_api_id: currentDataset!.external_knowledge_info.external_knowledge_api_id, embedding_model: localeCurrentDataset.embedding_model, embedding_model_provider: localeCurrentDataset.embedding_model_provider, }, @@ -178,7 +200,7 @@ const SettingsModal: FC = ({ }}>
- {t('datasetSettings.form.name')} +
{t('datasetSettings.form.name')}
= ({
- {t('datasetSettings.form.desc')} +
{t('datasetSettings.form.desc')}