From 06008553bdae0d2c0768d41a1d1878523e5bd810 Mon Sep 17 00:00:00 2001 From: Angelo Paparazzi Date: Thu, 20 May 2021 12:47:08 -0400 Subject: [PATCH] feat(stt-tts): generation release changes --- ibm_watson/speech_to_text_v1.py | 407 ++++-- ibm_watson/speech_to_text_v1_adapter.py | 403 +++--- ibm_watson/text_to_speech_v1.py | 1523 +++++++++++++++++++- resources/tts_audio.wav | Bin 0 -> 75726 bytes test/integration/test_text_to_speech_v1.py | 29 + test/unit/test_speech_to_text_v1.py | 383 ++--- test/unit/test_text_to_speech_v1.py | 1123 +++++++++++++-- 7 files changed, 3231 insertions(+), 637 deletions(-) create mode 100644 resources/tts_audio.wav diff --git a/ibm_watson/speech_to_text_v1.py b/ibm_watson/speech_to_text_v1.py index 97716c530..b29557c08 100644 --- a/ibm_watson/speech_to_text_v1.py +++ b/ibm_watson/speech_to_text_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# (C) Copyright IBM Corp. 2015, 2020. +# (C) Copyright IBM Corp. 2015, 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 """ The IBM Watson™ Speech to Text service provides APIs that use IBM's speech-recognition capabilities to produce transcripts of spoken audio. The service can transcribe speech from various languages and audio formats. In addition to basic transcription, the service can produce detailed information about many different aspects -of the audio. For most languages, the service supports two sampling rates, broadband and -narrowband. It returns all JSON response content in the UTF-8 character set. +of the audio. It returns all JSON response content in the UTF-8 character set. +The service supports two types of models: previous-generation models that include the +terms `Broadband` and `Narrowband` in their names, and beta next-generation models that +include the terms `Multimedia` and `Telephony` in their names. Broadband and multimedia +models have minimum sampling rates of 16 kHz. Narrowband and telephony models have minimum +sampling rates of 8 kHz. The beta next-generation models currently support fewer languages +and features, but they offer high throughput and greater transcription accuracy. For speech recognition, the service supports synchronous and asynchronous HTTP Representational State Transfer (REST) interfaces. It also supports a WebSocket interface that provides a full-duplex, low-latency communication channel: Clients send requests and @@ -32,8 +37,9 @@ language model customization, the service also supports grammars. A grammar is a formal language specification that lets you restrict the phrases that the service can recognize. Language model customization and acoustic model customization are generally available for -production use with all language models that are generally available. Grammars are beta -functionality for all language models that support language model customization. +production use with all previous-generation models that are generally available. Grammars +are beta functionality for all previous-generation models that support language model +customization. Next-generation models do not support customization at this time. """ from enum import Enum @@ -89,8 +95,8 @@ def list_models(self, **kwargs) -> DetailedResponse: information includes the name of the model and its minimum sampling rate in Hertz, among other things. The ordering of the list of models can change from call to call; do not rely on an alphabetized or static list of models. - **See also:** [Languages and - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + **See also:** [Listing + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -120,11 +126,12 @@ def get_model(self, model_id: str, **kwargs) -> DetailedResponse: Gets information for a single specified language model that is available for use with the service. The information includes the name of the model and its minimum sampling rate in Hertz, among other things. - **See also:** [Languages and - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + **See also:** [Listing + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list). :param str model_id: The identifier of the model in the form of its name - from the output of the **Get a model** method. + from the output of the **Get a model** method. (**Note:** The model + `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SpeechModel` object @@ -182,6 +189,7 @@ def recognize(self, split_transcript_at_phrase_end: bool = None, speech_detector_sensitivity: float = None, background_audio_suppression: float = None, + low_latency: bool = None, **kwargs) -> DetailedResponse: """ Recognize audio. @@ -240,8 +248,33 @@ def recognize(self, required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the request fails. - **See also:** [Audio - formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats#audio-formats). + **See also:** [Supported audio + formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). + ### Next-generation models + **Note:** The next-generation language models are beta functionality. They + support a limited number of languages and features at this time. The supported + languages, models, and features will increase with future releases. + The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 kHz) + models for many languages. Next-generation models have higher throughput than the + service's previous generation of `Broadband` and `Narrowband` models. When you use + next-generation models, the service can return transcriptions more quickly and + also provide noticeably better transcription accuracy. + You specify a next-generation model by using the `model` query parameter, as you + do a previous-generation model. Next-generation models support the same request + headers as previous-generation models, but they support only the following + additional query parameters: + * `background_audio_suppression` + * `inactivity_timeout` + * `profanity_filter` + * `redaction` + * `smart_formatting` + * `speaker_labels` + * `speech_detector_sensitivity` + * `timestamps` + Many next-generation models also support the beta `low_latency` parameter, which + is not available with previous-generation models. + **See also:** [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). ### Multipart speech recognition **Note:** The Watson SDKs do not support multipart speech recognition. The HTTP `POST` method of the service also supports multipart speech recognition. @@ -261,15 +294,19 @@ def recognize(self, For more information about specifying an audio format, see **Audio formats (content types)** in the method description. :param str model: (optional) The identifier of the model that is to be used - for the recognition request. See [Languages and - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is + deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) + and [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). :param str language_customization_id: (optional) The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with credentials for the instance of the service that owns the custom - model. By default, no custom language model is used. See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + model. By default, no custom language model is used. See [Using a custom + language model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. :param str acoustic_customization_id: (optional) The customization ID @@ -277,15 +314,17 @@ def recognize(self, request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with credentials for the instance of the service that owns the custom - model. By default, no custom acoustic model is used. See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + model. By default, no custom acoustic model is used. See [Using a custom + acoustic model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). :param str base_model_version: (optional) The version of the specified base model that is to be used with the recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether - the parameter is used with or without a custom model. See [Base model - version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). + the parameter is used with or without a custom model. See [Making speech + recognition requests with upgraded custom + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). :param float customization_weight: (optional) If you specify the customization ID (GUID) of a custom language model with the recognition request, the customization weight tells the service how much weight to give @@ -300,8 +339,8 @@ def recognize(self, Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. - See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). :param int inactivity_timeout: (optional) The time in seconds after which, if only silence (no speech) is detected in streaming audio, the connection is closed with a 400 error. The parameter is useful for stopping audio @@ -319,39 +358,39 @@ def recognize(self, effective length for double-byte languages might be shorter. Keywords are case-insensitive. See [Keyword - spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). :param float keywords_threshold: (optional) A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. If you specify a threshold, you must also specify one or more keywords. The service performs no keyword spotting if you omit either parameter. See [Keyword - spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). :param int max_alternatives: (optional) The maximum number of alternative transcripts that the service is to return. By default, the service returns a single transcript. If you specify a value of `0`, the service uses the default value, `1`. See [Maximum - alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#max_alternatives). + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). :param float word_alternatives_threshold: (optional) A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. By default, the service computes no alternative words. See [Word - alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_alternatives). + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). :param bool word_confidence: (optional) If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, the service returns no word confidence scores. See [Word - confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_confidence). + confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). :param bool timestamps: (optional) If `true`, the service returns time alignment for each word. By default, no timestamps are returned. See [Word - timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_timestamps). + timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). :param bool profanity_filter: (optional) If `true`, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to - `false` to return results with no censoring. Applies to US English - transcription only. See [Profanity - filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#profanity_filter). + `false` to return results with no censoring. Applies to US English and + Japanese transcription only. See [Profanity + filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). :param bool smart_formatting: (optional) If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in @@ -360,17 +399,20 @@ def recognize(self, the service performs no smart formatting. **Note:** Applies to US English, Japanese, and Spanish transcription only. See [Smart - formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#smart_formatting). + formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). :param bool speaker_labels: (optional) If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, the service returns no speaker labels. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. - **Note:** Applies to US English, Australian English, German, Japanese, - Korean, and Spanish (both broadband and narrowband models) and UK English - (narrowband model) transcription only. - See [Speaker - labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + * For previous-generation models, can be used for US English, Australian + English, German, Japanese, Korean, and Spanish (both broadband and + narrowband models) and UK English (narrowband model) transcription only. + * For next-generation models, can be used for English (Australian, UK, and + US), German, and Spanish transcription only. + Restrictions and limitations apply to the use of speaker labels for both + types of models. See [Speaker + labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). :param str customization_id: (optional) **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a custom language model that is to be used with the recognition @@ -381,7 +423,8 @@ def recognize(self, custom language model for which the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it does not recognize other custom words from the model's words resource. See - [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#grammars-input). + [Using a grammar for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). :param bool redaction: (optional) If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any number that has three or more consecutive digits by replacing each digit with an `X` @@ -395,13 +438,13 @@ def recognize(self, be `1`). **Note:** Applies to US English, Japanese, and Korean transcription only. See [Numeric - redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). + redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). :param bool audio_metrics: (optional) If `true`, requests detailed information about the signal characteristics of the input audio. The service returns audio metrics with the final transcription results. By default, the service returns no audio metrics. See [Audio - metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics). + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). :param float end_of_phrase_silence_time: (optional) If `true`, specifies the duration of the pause interval at which the service splits a transcript into multiple final results. If the service detects pauses or extended @@ -416,7 +459,7 @@ def recognize(self, The default pause interval for most languages is 0.8 seconds; the default for Chinese is 0.6 seconds. See [End of phrase silence - time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). :param bool split_transcript_at_phrase_end: (optional) If `true`, directs the service to split the transcript into multiple final results based on semantic features of the input, for example, at the conclusion of @@ -426,7 +469,7 @@ def recognize(self, where the service splits a transcript. By default, the service splits transcripts based solely on the pause interval. See [Split transcript at phrase - end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). :param float speech_detector_sensitivity: (optional) The sensitivity of speech activity detection that the service is to perform. Use the parameter to suppress word insertions from music, coughing, and other non-speech @@ -438,8 +481,8 @@ def recognize(self, * 0.5 (the default) provides a reasonable compromise for the level of sensitivity. * 1.0 suppresses no audio (speech detection sensitivity is disabled). - The values increase on a monotonic curve. See [Speech Activity - Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + The values increase on a monotonic curve. See [Speech detector + sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). :param float background_audio_suppression: (optional) The level to which the service is to suppress background audio based on its volume to prevent it from being transcribed as speech. Use the parameter to suppress side @@ -449,8 +492,24 @@ def recognize(self, is disabled). * 0.5 provides a reasonable level of audio suppression for general usage. * 1.0 suppresses all audio (no audio is transcribed). - The values increase on a monotonic curve. See [Speech Activity - Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + The values increase on a monotonic curve. See [Background audio + suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). + :param bool low_latency: (optional) If `true` for next-generation + `Multimedia` and `Telephony` models that support low latency, directs the + service to produce results even more quickly than it usually does. + Next-generation models produce transcription results faster than + previous-generation models. The `low_latency` parameter causes the models + to produce results even more quickly, though the results might be less + accurate when the parameter is used. + **Note:** The parameter is beta functionality. It is not available for + previous-generation `Broadband` and `Narrowband` models. It is available + only for some next-generation models. + * For a list of next-generation models that support low latency, see + [Supported language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) + for next-generation models. + * For more information about the `low_latency` parameter, see [Low + latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SpeechRecognitionResults` object @@ -487,7 +546,8 @@ def recognize(self, 'end_of_phrase_silence_time': end_of_phrase_silence_time, 'split_transcript_at_phrase_end': split_transcript_at_phrase_end, 'speech_detector_sensitivity': speech_detector_sensitivity, - 'background_audio_suppression': background_audio_suppression + 'background_audio_suppression': background_audio_suppression, + 'low_latency': low_latency } data = audio @@ -659,6 +719,7 @@ def create_job(self, split_transcript_at_phrase_end: bool = None, speech_detector_sensitivity: float = None, background_audio_suppression: float = None, + low_latency: bool = None, **kwargs) -> DetailedResponse: """ Create a job. @@ -743,16 +804,44 @@ def create_job(self, required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the request fails. - **See also:** [Audio - formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats#audio-formats). + **See also:** [Supported audio + formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). + ### Next-generation models + **Note:** The next-generation language models are beta functionality. They + support a limited number of languages and features at this time. The supported + languages, models, and features will increase with future releases. + The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 kHz) + models for many languages. Next-generation models have higher throughput than the + service's previous generation of `Broadband` and `Narrowband` models. When you use + next-generation models, the service can return transcriptions more quickly and + also provide noticeably better transcription accuracy. + You specify a next-generation model by using the `model` query parameter, as you + do a previous-generation model. Next-generation models support the same request + headers as previous-generation models, but they support only the following + additional query parameters: + * `background_audio_suppression` + * `inactivity_timeout` + * `profanity_filter` + * `redaction` + * `smart_formatting` + * `speaker_labels` + * `speech_detector_sensitivity` + * `timestamps` + Many next-generation models also support the beta `low_latency` parameter, which + is not available with previous-generation models. + **See also:** [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). :param BinaryIO audio: The audio to transcribe. :param str content_type: (optional) The format (MIME type) of the audio. For more information about specifying an audio format, see **Audio formats (content types)** in the method description. :param str model: (optional) The identifier of the model that is to be used - for the recognition request. See [Languages and - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is + deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) + and [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). :param str callback_url: (optional) A URL to which callback notifications are to be sent. The URL must already be successfully allowlisted by using the **Register a callback** method. You can include the same callback URL @@ -794,8 +883,9 @@ def create_job(self, request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with credentials for the instance of the service that owns the custom - model. By default, no custom language model is used. See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + model. By default, no custom language model is used. See [Using a custom + language model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. :param str acoustic_customization_id: (optional) The customization ID @@ -803,15 +893,17 @@ def create_job(self, request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with credentials for the instance of the service that owns the custom - model. By default, no custom acoustic model is used. See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + model. By default, no custom acoustic model is used. See [Using a custom + acoustic model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). :param str base_model_version: (optional) The version of the specified base model that is to be used with the recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether - the parameter is used with or without a custom model. See [Base model - version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). + the parameter is used with or without a custom model. See [Making speech + recognition requests with upgraded custom + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). :param float customization_weight: (optional) If you specify the customization ID (GUID) of a custom language model with the recognition request, the customization weight tells the service how much weight to give @@ -826,8 +918,8 @@ def create_job(self, Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. - See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). :param int inactivity_timeout: (optional) The time in seconds after which, if only silence (no speech) is detected in streaming audio, the connection is closed with a 400 error. The parameter is useful for stopping audio @@ -845,39 +937,39 @@ def create_job(self, effective length for double-byte languages might be shorter. Keywords are case-insensitive. See [Keyword - spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). :param float keywords_threshold: (optional) A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. If you specify a threshold, you must also specify one or more keywords. The service performs no keyword spotting if you omit either parameter. See [Keyword - spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). :param int max_alternatives: (optional) The maximum number of alternative transcripts that the service is to return. By default, the service returns a single transcript. If you specify a value of `0`, the service uses the default value, `1`. See [Maximum - alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#max_alternatives). + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). :param float word_alternatives_threshold: (optional) A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. By default, the service computes no alternative words. See [Word - alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_alternatives). + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). :param bool word_confidence: (optional) If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, the service returns no word confidence scores. See [Word - confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_confidence). + confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). :param bool timestamps: (optional) If `true`, the service returns time alignment for each word. By default, no timestamps are returned. See [Word - timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_timestamps). + timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). :param bool profanity_filter: (optional) If `true`, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to - `false` to return results with no censoring. Applies to US English - transcription only. See [Profanity - filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#profanity_filter). + `false` to return results with no censoring. Applies to US English and + Japanese transcription only. See [Profanity + filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). :param bool smart_formatting: (optional) If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in @@ -886,17 +978,20 @@ def create_job(self, the service performs no smart formatting. **Note:** Applies to US English, Japanese, and Spanish transcription only. See [Smart - formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#smart_formatting). + formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). :param bool speaker_labels: (optional) If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, the service returns no speaker labels. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. - **Note:** Applies to US English, Australian English, German, Japanese, - Korean, and Spanish (both broadband and narrowband models) and UK English - (narrowband model) transcription only. - See [Speaker - labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + * For previous-generation models, can be used for US English, Australian + English, German, Japanese, Korean, and Spanish (both broadband and + narrowband models) and UK English (narrowband model) transcription only. + * For next-generation models, can be used for English (Australian, UK, and + US), German, and Spanish transcription only. + Restrictions and limitations apply to the use of speaker labels for both + types of models. See [Speaker + labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). :param str customization_id: (optional) **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a custom language model that is to be used with the recognition @@ -907,7 +1002,8 @@ def create_job(self, custom language model for which the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it does not recognize other custom words from the model's words resource. See - [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#grammars-input). + [Using a grammar for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). :param bool redaction: (optional) If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any number that has three or more consecutive digits by replacing each digit with an `X` @@ -921,7 +1017,7 @@ def create_job(self, be `1`). **Note:** Applies to US English, Japanese, and Korean transcription only. See [Numeric - redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). + redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). :param bool processing_metrics: (optional) If `true`, requests processing metrics about the service's transcription of the input audio. The service returns processing metrics at the interval specified by the @@ -929,7 +1025,7 @@ def create_job(self, for transcription events, for example, for final and interim results. By default, the service returns no processing metrics. See [Processing - metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing_metrics). + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). :param float processing_metrics_interval: (optional) Specifies the interval in real wall-clock seconds at which the service is to return processing metrics. The parameter is ignored unless the `processing_metrics` parameter @@ -943,13 +1039,13 @@ def create_job(self, duration of the audio, the service returns processing metrics only for transcription events. See [Processing - metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing_metrics). + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). :param bool audio_metrics: (optional) If `true`, requests detailed information about the signal characteristics of the input audio. The service returns audio metrics with the final transcription results. By default, the service returns no audio metrics. See [Audio - metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics). + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). :param float end_of_phrase_silence_time: (optional) If `true`, specifies the duration of the pause interval at which the service splits a transcript into multiple final results. If the service detects pauses or extended @@ -964,7 +1060,7 @@ def create_job(self, The default pause interval for most languages is 0.8 seconds; the default for Chinese is 0.6 seconds. See [End of phrase silence - time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). :param bool split_transcript_at_phrase_end: (optional) If `true`, directs the service to split the transcript into multiple final results based on semantic features of the input, for example, at the conclusion of @@ -974,7 +1070,7 @@ def create_job(self, where the service splits a transcript. By default, the service splits transcripts based solely on the pause interval. See [Split transcript at phrase - end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). :param float speech_detector_sensitivity: (optional) The sensitivity of speech activity detection that the service is to perform. Use the parameter to suppress word insertions from music, coughing, and other non-speech @@ -986,8 +1082,8 @@ def create_job(self, * 0.5 (the default) provides a reasonable compromise for the level of sensitivity. * 1.0 suppresses no audio (speech detection sensitivity is disabled). - The values increase on a monotonic curve. See [Speech Activity - Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + The values increase on a monotonic curve. See [Speech detector + sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). :param float background_audio_suppression: (optional) The level to which the service is to suppress background audio based on its volume to prevent it from being transcribed as speech. Use the parameter to suppress side @@ -997,8 +1093,24 @@ def create_job(self, is disabled). * 0.5 provides a reasonable level of audio suppression for general usage. * 1.0 suppresses all audio (no audio is transcribed). - The values increase on a monotonic curve. See [Speech Activity - Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + The values increase on a monotonic curve. See [Background audio + suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). + :param bool low_latency: (optional) If `true` for next-generation + `Multimedia` and `Telephony` models that support low latency, directs the + service to produce results even more quickly than it usually does. + Next-generation models produce transcription results faster than + previous-generation models. The `low_latency` parameter causes the models + to produce results even more quickly, though the results might be less + accurate when the parameter is used. + **Note:** The parameter is beta functionality. It is not available for + previous-generation `Broadband` and `Narrowband` models. It is available + only for some next-generation models. + * For a list of next-generation models that support low latency, see + [Supported language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) + for next-generation models. + * For more information about the `low_latency` parameter, see [Low + latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `RecognitionJob` object @@ -1041,7 +1153,8 @@ def create_job(self, 'end_of_phrase_silence_time': end_of_phrase_silence_time, 'split_transcript_at_phrase_end': split_transcript_at_phrase_end, 'speech_detector_sensitivity': speech_detector_sensitivity, - 'background_audio_suppression': background_audio_suppression + 'background_audio_suppression': background_audio_suppression, + 'low_latency': low_latency } data = audio @@ -1299,7 +1412,8 @@ def list_language_models(self, :param str language: (optional) The identifier of the language for which custom language or custom acoustic models are to be returned. Omit the parameter to see all custom language or custom acoustic models that are - owned by the requesting credentials. + owned by the requesting credentials. (**Note:** The identifier `ar-AR` is + deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). @@ -1477,6 +1591,8 @@ def train_language_model(self, The value that you assign is used for all recognition requests that use the model. You can override it for any recognition request by specifying a customization weight for that request. + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `TrainingResponse` object @@ -1577,7 +1693,7 @@ def upgrade_language_model(self, customization_id: str, resumes the status that it had prior to upgrade. The service cannot accept subsequent requests for the model until the upgrade completes. **See also:** [Upgrading a custom language - model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeLanguage). + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language). :param str customization_id: The customization ID (GUID) of the custom language model that is to be used for the request. You must make the @@ -2587,7 +2703,8 @@ def create_acoustic_model(self, `Mobile custom model` or `Noisy car custom model`. :param str base_model_name: The name of the base language model that is to be customized by the new custom acoustic model. The new custom model can be - used only with the base model that it customizes. + used only with the base model that it customizes. (**Note:** The model + `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) To determine whether a base model supports acoustic model customization, refer to [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). @@ -2649,7 +2766,8 @@ def list_acoustic_models(self, :param str language: (optional) The identifier of the language for which custom language or custom acoustic models are to be returned. Omit the parameter to see all custom language or custom acoustic models that are - owned by the requesting credentials. + owned by the requesting credentials. (**Note:** The identifier `ar-AR` is + deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). @@ -2949,7 +3067,7 @@ def upgrade_acoustic_model(self, the custom acoustic model can be upgraded. Omit the parameter if the custom acoustic model was not trained with a custom language model. **See also:** [Upgrading a custom acoustic - model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeAcoustic). + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). :param str customization_id: The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the @@ -2967,7 +3085,7 @@ def upgrade_acoustic_model(self, model that is trained with a custom language model, and only if you receive a 400 response code and the message `No input data modified since last training`. See [Upgrading a custom acoustic - model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeAcoustic). + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse @@ -3123,8 +3241,8 @@ def add_audio(self, minimum required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the service labels the audio file as `invalid`. - **See also:** [Audio - formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats#audio-formats). + **See also:** [Supported audio + formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). ### Content types for archive-type resources You can add an archive file (**.zip** or **.tar.gz** file) that contains audio files in any format that the service supports for speech recognition. For an @@ -3409,18 +3527,26 @@ class GetModelEnums: class ModelId(str, Enum): """ The identifier of the model in the form of its name from the output of the **Get a - model** method. + model** method. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use + `ar-MS_BroadbandModel` instead.). """ AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel' + AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' + AR_MS_TELEPHONY = 'ar-MS_Telephony' DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel' DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel' + DE_DE_TELEPHONY = 'de-DE_Telephony' EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' + EN_AU_TELEPHONY = 'en-AU_Telephony' EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' + EN_GB_TELEPHONY = 'en-GB_Telephony' EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' + EN_US_MULTIMEDIA = 'en-US_Multimedia' EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel' + EN_US_TELEPHONY = 'en-US_Telephony' ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel' ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel' ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel' @@ -3429,16 +3555,20 @@ class ModelId(str, Enum): ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel' ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel' ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel' + ES_ES_TELEPHONY = 'es-ES_Telephony' ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel' ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel' ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel' ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel' FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel' FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' + FR_CA_TELEPHONY = 'fr-CA_Telephony' FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' + FR_FR_TELEPHONY = 'fr-FR_Telephony' IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' + IT_IT_TELEPHONY = 'it-IT_Telephony' JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' @@ -3447,6 +3577,7 @@ class ModelId(str, Enum): NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel' + PT_BR_TELEPHONY = 'pt-BR_Telephony' ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel' ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel' @@ -3480,20 +3611,30 @@ class ContentType(str, Enum): class Model(str, Enum): """ - The identifier of the model that is to be used for the recognition request. See - [Languages and - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + The identifier of the model that is to be used for the recognition request. + (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use + `ar-MS_BroadbandModel` instead.) See [Languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and + [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). """ AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel' + AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' + AR_MS_TELEPHONY = 'ar-MS_Telephony' DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel' DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel' + DE_DE_TELEPHONY = 'de-DE_Telephony' EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' + EN_AU_TELEPHONY = 'en-AU_Telephony' EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' + EN_GB_TELEPHONY = 'en-GB_Telephony' EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' + EN_US_MULTIMEDIA = 'en-US_Multimedia' EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel' + EN_US_TELEPHONY = 'en-US_Telephony' ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel' ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel' ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel' @@ -3502,16 +3643,20 @@ class Model(str, Enum): ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel' ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel' ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel' + ES_ES_TELEPHONY = 'es-ES_Telephony' ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel' ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel' ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel' ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel' FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel' FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' + FR_CA_TELEPHONY = 'fr-CA_Telephony' FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' + FR_FR_TELEPHONY = 'fr-FR_Telephony' IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' + IT_IT_TELEPHONY = 'it-IT_Telephony' JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' @@ -3520,6 +3665,7 @@ class Model(str, Enum): NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel' + PT_BR_TELEPHONY = 'pt-BR_Telephony' ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel' ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel' @@ -3553,20 +3699,30 @@ class ContentType(str, Enum): class Model(str, Enum): """ - The identifier of the model that is to be used for the recognition request. See - [Languages and - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + The identifier of the model that is to be used for the recognition request. + (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use + `ar-MS_BroadbandModel` instead.) See [Languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and + [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). """ AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel' + AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' + AR_MS_TELEPHONY = 'ar-MS_Telephony' DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel' DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel' + DE_DE_TELEPHONY = 'de-DE_Telephony' EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' + EN_AU_TELEPHONY = 'en-AU_Telephony' EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' + EN_GB_TELEPHONY = 'en-GB_Telephony' EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' + EN_US_MULTIMEDIA = 'en-US_Multimedia' EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel' + EN_US_TELEPHONY = 'en-US_Telephony' ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel' ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel' ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel' @@ -3575,16 +3731,20 @@ class Model(str, Enum): ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel' ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel' ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel' + ES_ES_TELEPHONY = 'es-ES_Telephony' ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel' ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel' ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel' ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel' FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel' FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' + FR_CA_TELEPHONY = 'fr-CA_Telephony' FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' + FR_FR_TELEPHONY = 'fr-FR_Telephony' IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' + IT_IT_TELEPHONY = 'it-IT_Telephony' JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' @@ -3593,6 +3753,7 @@ class Model(str, Enum): NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel' + PT_BR_TELEPHONY = 'pt-BR_Telephony' ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel' ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel' @@ -3631,12 +3792,14 @@ class Language(str, Enum): """ The identifier of the language for which custom language or custom acoustic models are to be returned. Omit the parameter to see all custom language or custom - acoustic models that are owned by the requesting credentials. + acoustic models that are owned by the requesting credentials. (**Note:** The + identifier `ar-AR` is deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). """ AR_AR = 'ar-AR' + AR_MS = 'ar-MS' DE_DE = 'de-DE' EN_AU = 'en-AU' EN_GB = 'en-GB' @@ -3735,12 +3898,14 @@ class Language(str, Enum): """ The identifier of the language for which custom language or custom acoustic models are to be returned. Omit the parameter to see all custom language or custom - acoustic models that are owned by the requesting credentials. + acoustic models that are owned by the requesting credentials. (**Note:** The + identifier `ar-AR` is deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). """ AR_AR = 'ar-AR' + AR_MS = 'ar-MS' DE_DE = 'de-DE' EN_AU = 'en-AU' EN_GB = 'en-GB' @@ -6888,8 +7053,10 @@ class SpeechRecognitionAlternative(): :attr str transcript: A transcription of the audio. :attr float confidence: (optional) A score that indicates the service's - confidence in the transcript in the range of 0.0 to 1.0. A confidence score is - returned only for the best alternative and only with results marked as final. + confidence in the transcript in the range of 0.0 to 1.0. For speech recognition + with previous-generation models, a confidence score is returned only for the + best alternative and only with results marked as final. For speech recognition + with next-generation models, a confidence score is never returned. :attr List[str] timestamps: (optional) Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds, for example: @@ -6913,9 +7080,11 @@ def __init__(self, :param str transcript: A transcription of the audio. :param float confidence: (optional) A score that indicates the service's - confidence in the transcript in the range of 0.0 to 1.0. A confidence score - is returned only for the best alternative and only with results marked as - final. + confidence in the transcript in the range of 0.0 to 1.0. For speech + recognition with previous-generation models, a confidence score is returned + only for the best alternative and only with results marked as final. For + speech recognition with next-generation models, a confidence score is never + returned. :param List[str] timestamps: (optional) Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds, for @@ -7338,10 +7507,17 @@ class SupportedFeatures(): supported only for US English, Australian English, German, Japanese, Korean, and Spanish (both broadband and narrowband models) and UK English (narrowband model only). Speaker labels are not supported for any other models. + :attr bool low_latency: (optional) Indicates whether the `low_latency` parameter + can be used with a next-generation language model. The field is returned only + for next-generation models. Previous-generation models do not support the + `low_latency` parameter. """ - def __init__(self, custom_language_model: bool, - speaker_labels: bool) -> None: + def __init__(self, + custom_language_model: bool, + speaker_labels: bool, + *, + low_latency: bool = None) -> None: """ Initialize a SupportedFeatures object. @@ -7355,9 +7531,14 @@ def __init__(self, custom_language_model: bool, Korean, and Spanish (both broadband and narrowband models) and UK English (narrowband model only). Speaker labels are not supported for any other models. + :param bool low_latency: (optional) Indicates whether the `low_latency` + parameter can be used with a next-generation language model. The field is + returned only for next-generation models. Previous-generation models do not + support the `low_latency` parameter. """ self.custom_language_model = custom_language_model self.speaker_labels = speaker_labels + self.low_latency = low_latency @classmethod def from_dict(cls, _dict: Dict) -> 'SupportedFeatures': @@ -7375,6 +7556,8 @@ def from_dict(cls, _dict: Dict) -> 'SupportedFeatures': raise ValueError( 'Required property \'speaker_labels\' not present in SupportedFeatures JSON' ) + if 'low_latency' in _dict: + args['low_latency'] = _dict.get('low_latency') return cls(**args) @classmethod @@ -7390,6 +7573,8 @@ def to_dict(self) -> Dict: _dict['custom_language_model'] = self.custom_language_model if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: _dict['speaker_labels'] = self.speaker_labels + if hasattr(self, 'low_latency') and self.low_latency is not None: + _dict['low_latency'] = self.low_latency return _dict def _to_dict(self): diff --git a/ibm_watson/speech_to_text_v1_adapter.py b/ibm_watson/speech_to_text_v1_adapter.py index 7cf5087f3..e9119ff72 100644 --- a/ibm_watson/speech_to_text_v1_adapter.py +++ b/ibm_watson/speech_to_text_v1_adapter.py @@ -1,6 +1,6 @@ # coding: utf-8 -# (C) Copyright IBM Corp. 2018, 2020. +# (C) Copyright IBM Corp. 2018, 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -55,202 +55,238 @@ def recognize_using_websocket(self, split_transcript_at_phrase_end=None, speech_detector_sensitivity=None, background_audio_suppression=None, + low_latency=None, **kwargs): """ Sends audio for speech recognition using web sockets. + :param AudioSource audio: The audio to transcribe in the format specified by the - `Content-Type` header. + `Content-Type` header. :param str content_type: The type of the input: audio/basic, audio/flac, - audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, - audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or - audio/webm;codecs=vorbis. + audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, + audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or + audio/webm;codecs=vorbis. :param RecognizeCallback recognize_callback: The callback method for the websocket. - :param str model: The identifier of the model that is to be used for the - recognition request or, for the **Create a session** method, with the new session. - :param str language_customization_id: The customization ID (GUID) of a custom - language model that is to be used with the recognition request. The base model of - the specified custom language model must match the model specified with the - `model` parameter. You must make the request with service credentials created for - the instance of the service that owns the custom model. By default, no custom - language model is used. See [Custom - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom). - **Note:** Use this parameter instead of the deprecated `customization_id` - parameter. - :param str acoustic_customization_id: The customization ID (GUID) of a custom - acoustic model that is to be used with the recognition request or, for the - **Create a session** method, with the new session. The base model of the specified - custom acoustic model must match the model specified with the `model` parameter. - You must make the request with service credentials created for the instance of the - service that owns the custom model. By default, no custom acoustic model is used. - :param float customization_weight: If you specify the customization ID (GUID) of a - custom language model with the recognition request or, for sessions, with the - **Create a session** method, the customization weight tells the service how much - weight to give to words from the custom language model compared to those from the - base model for the current request. - Specify a value between 0.0 and 1.0. Unless a different customization weight was - specified for the custom model when it was trained, the default value is 0.3. A - customization weight that you specify overrides a weight that was specified when - the custom model was trained. - The default value yields the best performance in general. Assign a higher value if - your audio makes frequent use of OOV words from the custom model. Use caution when - setting the weight: a higher value can improve the accuracy of phrases from the - custom model's domain, but it can negatively affect performance on non-domain - phrases. - :param str base_model_version: The version of the specified base model that is to - be used with recognition request or, for the **Create a session** method, with the - new session. Multiple versions of a base model can exist when a model is updated - for internal improvements. The parameter is intended primarily for use with custom - models that have been upgraded for a new base model. The default value depends on - whether the parameter is used with or without a custom model. For more - information, see [Base model - version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). - :param int inactivity_timeout: The time in seconds after which, if only silence - (no speech) is detected in submitted audio, the connection is closed with a 400 - error. Useful for stopping audio submission from a live microphone when a user - simply walks away. Use `-1` for infinity. + :param str model: (optional) The identifier of the model that is to be used + for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is + deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) + and [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). + :param str language_customization_id: (optional) The customization ID + (GUID) of a custom language model that is to be used with the recognition + request. The base model of the specified custom language model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom language model is used. See [Using a custom + language model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). + **Note:** Use this parameter instead of the deprecated `customization_id` + parameter. + :param str acoustic_customization_id: (optional) The customization ID + (GUID) of a custom acoustic model that is to be used with the recognition + request. The base model of the specified custom acoustic model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom acoustic model is used. See [Using a custom + acoustic model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). + :param str base_model_version: (optional) The version of the specified base + model that is to be used with the recognition request. Multiple versions of + a base model can exist when a model is updated for internal improvements. + The parameter is intended primarily for use with custom models that have + been upgraded for a new base model. The default value depends on whether + the parameter is used with or without a custom model. See [Making speech + recognition requests with upgraded custom + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). + :param float customization_weight: (optional) If you specify the + customization ID (GUID) of a custom language model with the recognition + request, the customization weight tells the service how much weight to give + to words from the custom language model compared to those from the base + model for the current request. + Specify a value between 0.0 and 1.0. Unless a different customization + weight was specified for the custom model when it was trained, the default + value is 0.3. A customization weight that you specify overrides a weight + that was specified when the custom model was trained. + The default value yields the best performance in general. Assign a higher + value if your audio makes frequent use of OOV words from the custom model. + Use caution when setting the weight: a higher value can improve the + accuracy of phrases from the custom model's domain, but it can negatively + affect performance on non-domain phrases. + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). + :param int inactivity_timeout: (optional) The time in seconds after which, + if only silence (no speech) is detected in streaming audio, the connection + is closed with a 400 error. The parameter is useful for stopping audio + submission from a live microphone when a user simply walks away. Use `-1` + for infinity. See [Inactivity + timeout](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#timeouts-inactivity). :param List[str] keywords: (optional) An array of keyword strings to spot - in the audio. Each keyword string can include one or more string tokens. - Keywords are spotted only in the final results, not in interim hypotheses. - If you specify any keywords, you must also specify a keywords threshold. - Omit the parameter or specify an empty array if you do not need to spot - keywords. - You can spot a maximum of 1000 keywords with a single request. A single - keyword can have a maximum length of 1024 characters, though the maximum - effective length for double-byte languages might be shorter. Keywords are - case-insensitive. - See [Keyword - spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). - :param float keywords_threshold: A confidence value that is the lower bound for - spotting a keyword. A word is considered to match a keyword if its confidence is - greater than or equal to the threshold. Specify a probability between 0 and 1 - inclusive. No keyword spotting is performed if you omit the parameter. If you - specify a threshold, you must also specify one or more keywords. - :param int max_alternatives: The maximum number of alternative transcripts to be - returned. By default, a single transcription is returned. - :param float word_alternatives_threshold: A confidence value that is the lower - bound for identifying a hypothesis as a possible word alternative (also known as - \"Confusion Networks\"). An alternative word is considered if its confidence is - greater than or equal to the threshold. Specify a probability between 0 and 1 - inclusive. No alternative words are computed if you omit the parameter. - :param bool word_confidence: If `true`, a confidence measure in the range of 0 to - 1 is returned for each word. By default, no word confidence measures are returned. - :param bool timestamps: If `true`, time alignment is returned for each word. By - default, no timestamps are returned. - :param bool profanity_filter: If `true` (the default), filters profanity from all - output except for keyword results by replacing inappropriate words with a series - of asterisks. Set the parameter to `false` to return results with no censoring. - Applies to US English transcription only. - :param bool smart_formatting: If `true`, converts dates, times, series of digits - and numbers, phone numbers, currency values, and internet addresses into more - readable, conventional representations in the final transcript of a recognition - request. For US English, also converts certain keyword strings to punctuation - symbols. By default, no smart formatting is performed. Applies to US English and - Spanish transcription only. + in the audio. Each keyword string can include one or more string tokens. + Keywords are spotted only in the final results, not in interim hypotheses. + If you specify any keywords, you must also specify a keywords threshold. + Omit the parameter or specify an empty array if you do not need to spot + keywords. + You can spot a maximum of 1000 keywords with a single request. A single + keyword can have a maximum length of 1024 characters, though the maximum + effective length for double-byte languages might be shorter. Keywords are + case-insensitive. + See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param float keywords_threshold: (optional) A confidence value that is the + lower bound for spotting a keyword. A word is considered to match a keyword + if its confidence is greater than or equal to the threshold. Specify a + probability between 0.0 and 1.0. If you specify a threshold, you must also + specify one or more keywords. The service performs no keyword spotting if + you omit either parameter. See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param int max_alternatives: (optional) The maximum number of alternative + transcripts that the service is to return. By default, the service returns + a single transcript. If you specify a value of `0`, the service uses the + default value, `1`. See [Maximum + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). + :param float word_alternatives_threshold: (optional) A confidence value + that is the lower bound for identifying a hypothesis as a possible word + alternative (also known as "Confusion Networks"). An alternative word is + considered if its confidence is greater than or equal to the threshold. + Specify a probability between 0.0 and 1.0. By default, the service computes + no alternative words. See [Word + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). + :param bool word_confidence: (optional) If `true`, the service returns a + confidence measure in the range of 0.0 to 1.0 for each word. By default, + the service returns no word confidence scores. See [Word + confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). + :param bool timestamps: (optional) If `true`, the service returns time + alignment for each word. By default, no timestamps are returned. See [Word + timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). + :param bool profanity_filter: (optional) If `true`, the service filters + profanity from all output except for keyword results by replacing + inappropriate words with a series of asterisks. Set the parameter to + `false` to return results with no censoring. Applies to US English and + Japanese transcription only. See [Profanity + filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). + :param bool smart_formatting: (optional) If `true`, the service converts + dates, times, series of digits and numbers, phone numbers, currency values, + and internet addresses into more readable, conventional representations in + the final transcript of a recognition request. For US English, the service + also converts certain keyword strings to punctuation symbols. By default, + the service performs no smart formatting. + **Note:** Applies to US English, Japanese, and Spanish transcription only. + See [Smart + formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). :param bool speaker_labels: (optional) If `true`, the response includes - labels that identify which words were spoken by which participants in a - multi-person exchange. By default, the service returns no speaker labels. - Setting `speaker_labels` to `true` forces the `timestamps` parameter to be - `true`, regardless of whether you specify `false` for the parameter. - **Note:** Applies to US English, German, Japanese, Korean, and Spanish - (both broadband and narrowband models) and UK English (narrowband model) - transcription only. To determine whether a language model supports speaker - labels, you can also use the **Get a model** method and check that the - attribute `speaker_labels` is set to `true`. - See [Speaker - labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + labels that identify which words were spoken by which participants in a + multi-person exchange. By default, the service returns no speaker labels. + Setting `speaker_labels` to `true` forces the `timestamps` parameter to be + `true`, regardless of whether you specify `false` for the parameter. + * For previous-generation models, can be used for US English, Australian + English, German, Japanese, Korean, and Spanish (both broadband and + narrowband models) and UK English (narrowband model) transcription only. + * For next-generation models, can be used for English (Australian, UK, and + US), German, and Spanish transcription only. + Restrictions and limitations apply to the use of speaker labels for both + types of models. See [Speaker + labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). :param str http_proxy_host: http proxy host name. :param str http_proxy_port: http proxy port. If not set, set to 80. - :param str customization_id: **Deprecated.** Use the `language_customization_id` - parameter to specify the customization ID (GUID) of a custom language model that - is to be used with the recognition request. Do not specify both parameters with a - request. - :param str grammar_name: The name of a grammar that is to be used with the - recognition request. If you specify a grammar, you must also use the - `language_customization_id` parameter to specify the name of the custom language - model for which the grammar is defined. The service recognizes only strings that - are recognized by the specified grammar; it does not recognize other custom words - from the model's words resource. See - [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output). - :param bool redaction: If `true`, the service redacts, or masks, numeric data from - final transcripts. The feature redacts any number that has three or more - consecutive digits by replacing each digit with an `X` character. It is intended - to redact sensitive numeric data, such as credit card numbers. By default, the - service performs no redaction. - When you enable redaction, the service automatically enables smart formatting, - regardless of whether you explicitly disable that feature. To ensure maximum - security, the service also disables keyword spotting (ignores the `keywords` and - `keywords_threshold` parameters) and returns only a single final transcript - (forces the `max_alternatives` parameter to be `1`). - **Note:** Applies to US English, Japanese, and Korean transcription only. - See [Numeric - redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). - :param bool processing_metrics: If `true`, requests processing metrics about the - service's transcription of the input audio. The service returns processing metrics - at the interval specified by the `processing_metrics_interval` parameter. It also - returns processing metrics for transcription events, for example, for final and - interim results. By default, the service returns no processing metrics. - :param float processing_metrics_interval: Specifies the interval in real - wall-clock seconds at which the service is to return processing metrics. The - parameter is ignored unless the `processing_metrics` parameter is set to `true`. - The parameter accepts a minimum value of 0.1 seconds. The level of precision is - not restricted, so you can specify values such as 0.25 and 0.125. - The service does not impose a maximum value. If you want to receive processing - metrics only for transcription events instead of at periodic intervals, set the - value to a large number. If the value is larger than the duration of the audio, - the service returns processing metrics only for transcription events. - :param bool audio_metrics: If `true`, requests detailed information about the - signal characteristics of the input audio. The service returns audio metrics with - the final transcription results. By default, the service returns no audio metrics. + :param str customization_id: (optional) **Deprecated.** Use the + `language_customization_id` parameter to specify the customization ID + (GUID) of a custom language model that is to be used with the recognition + request. Do not specify both parameters with a request. + :param str grammar_name: (optional) The name of a grammar that is to be + used with the recognition request. If you specify a grammar, you must also + use the `language_customization_id` parameter to specify the name of the + custom language model for which the grammar is defined. The service + recognizes only strings that are recognized by the specified grammar; it + does not recognize other custom words from the model's words resource. See + [Using a grammar for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). + :param bool redaction: (optional) If `true`, the service redacts, or masks, + numeric data from final transcripts. The feature redacts any number that + has three or more consecutive digits by replacing each digit with an `X` + character. It is intended to redact sensitive numeric data, such as credit + card numbers. By default, the service performs no redaction. + When you enable redaction, the service automatically enables smart + formatting, regardless of whether you explicitly disable that feature. To + ensure maximum security, the service also disables keyword spotting + (ignores the `keywords` and `keywords_threshold` parameters) and returns + only a single final transcript (forces the `max_alternatives` parameter to + be `1`). + **Note:** Applies to US English, Japanese, and Korean transcription only. + See [Numeric + redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). + :param bool audio_metrics: (optional) If `true`, requests detailed + information about the signal characteristics of the input audio. The + service returns audio metrics with the final transcription results. By + default, the service returns no audio metrics. + See [Audio + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). :param float end_of_phrase_silence_time: (optional) If `true`, specifies - the duration of the pause interval at which the service splits a transcript - into multiple final results. If the service detects pauses or extended - silence before it reaches the end of the audio stream, its response can - include multiple final results. Silence indicates a point at which the - speaker pauses between spoken words or phrases. - Specify a value for the pause interval in the range of 0.0 to 120.0. - * A value greater than 0 specifies the interval that the service is to use - for speech recognition. - * A value of 0 indicates that the service is to use the default interval. - It is equivalent to omitting the parameter. - The default pause interval for most languages is 0.8 seconds; the default - for Chinese is 0.6 seconds. - See [End of phrase silence - time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + the duration of the pause interval at which the service splits a transcript + into multiple final results. If the service detects pauses or extended + silence before it reaches the end of the audio stream, its response can + include multiple final results. Silence indicates a point at which the + speaker pauses between spoken words or phrases. + Specify a value for the pause interval in the range of 0.0 to 120.0. + * A value greater than 0 specifies the interval that the service is to use + for speech recognition. + * A value of 0 indicates that the service is to use the default interval. + It is equivalent to omitting the parameter. + The default pause interval for most languages is 0.8 seconds; the default + for Chinese is 0.6 seconds. + See [End of phrase silence + time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). :param bool split_transcript_at_phrase_end: (optional) If `true`, directs - the service to split the transcript into multiple final results based on - semantic features of the input, for example, at the conclusion of - meaningful phrases such as sentences. The service bases its understanding - of semantic features on the base language model that you use with a - request. Custom language models and grammars can also influence how and - where the service splits a transcript. By default, the service splits - transcripts based solely on the pause interval. - See [Split transcript at phrase - end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + the service to split the transcript into multiple final results based on + semantic features of the input, for example, at the conclusion of + meaningful phrases such as sentences. The service bases its understanding + of semantic features on the base language model that you use with a + request. Custom language models and grammars can also influence how and + where the service splits a transcript. By default, the service splits + transcripts based solely on the pause interval. + See [Split transcript at phrase + end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). :param float speech_detector_sensitivity: (optional) The sensitivity of - speech activity detection that the service is to perform. Use the parameter - to suppress word insertions from music, coughing, and other non-speech - events. The service biases the audio it passes for speech recognition by - evaluating the input audio against prior models of speech and non-speech - activity. - Specify a value between 0.0 and 1.0: - * 0.0 suppresses all audio (no speech is transcribed). - * 0.5 (the default) provides a reasonable compromise for the level of - sensitivity. - * 1.0 suppresses no audio (speech detection sensitivity is disabled). - The values increase on a monotonic curve. See [Speech Activity - Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + speech activity detection that the service is to perform. Use the parameter + to suppress word insertions from music, coughing, and other non-speech + events. The service biases the audio it passes for speech recognition by + evaluating the input audio against prior models of speech and non-speech + activity. + Specify a value between 0.0 and 1.0: + * 0.0 suppresses all audio (no speech is transcribed). + * 0.5 (the default) provides a reasonable compromise for the level of + sensitivity. + * 1.0 suppresses no audio (speech detection sensitivity is disabled). + The values increase on a monotonic curve. See [Speech detector + sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). :param float background_audio_suppression: (optional) The level to which - the service is to suppress background audio based on its volume to prevent - it from being transcribed as speech. Use the parameter to suppress side - conversations or background noise. - Specify a value in the range of 0.0 to 1.0: - * 0.0 (the default) provides no suppression (background audio suppression - is disabled). - * 0.5 provides a reasonable level of audio suppression for general usage. - * 1.0 suppresses all audio (no audio is transcribed). - The values increase on a monotonic curve. See [Speech Activity - Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + the service is to suppress background audio based on its volume to prevent + it from being transcribed as speech. Use the parameter to suppress side + conversations or background noise. + Specify a value in the range of 0.0 to 1.0: + * 0.0 (the default) provides no suppression (background audio suppression + is disabled). + * 0.5 provides a reasonable level of audio suppression for general usage. + * 1.0 suppresses all audio (no audio is transcribed). + The values increase on a monotonic curve. See [Background audio + suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). + :param bool low_latency: (optional) If `true` for next-generation + `Multimedia` and `Telephony` models that support low latency, directs the + service to produce results even more quickly than it usually does. + Next-generation models produce transcription results faster than + previous-generation models. The `low_latency` parameter causes the models + to produce results even more quickly, though the results might be less + accurate when the parameter is used. + **Note:** The parameter is beta functionality. It is not available for + previous-generation `Broadband` and `Narrowband` models. It is available + only for some next-generation models. + * For a list of next-generation models that support low latency, see + [Supported language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) + for next-generation models. + * For more information about the `low_latency` parameter, see [Low + latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). :param dict headers: A `dict` containing the request headers :return: A `dict` containing the `SpeechRecognitionResults` response. :rtype: dict @@ -316,7 +352,8 @@ def recognize_using_websocket(self, 'end_of_phrase_silence_time': end_of_phrase_silence_time, 'split_transcript_at_phrase_end': split_transcript_at_phrase_end, 'speech_detector_sensitivity': speech_detector_sensitivity, - 'background_audio_suppression': background_audio_suppression + 'background_audio_suppression': background_audio_suppression, + 'low_latency': low_latency } options = {k: v for k, v in options.items() if v is not None} request['options'] = options diff --git a/ibm_watson/text_to_speech_v1.py b/ibm_watson/text_to_speech_v1.py index 3834a376d..9c8c78ad9 100644 --- a/ibm_watson/text_to_speech_v1.py +++ b/ibm_watson/text_to_speech_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# (C) Copyright IBM Corp. 2015, 2020. +# (C) Copyright IBM Corp. 2015, 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 """ The IBM Watson™ Text to Speech service provides APIs that use IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, @@ -30,12 +30,16 @@ that, when combined, sound like the word. A phonetic translation is based on the SSML phoneme format for representing a word. You can specify a phonetic translation in standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic -Phonetic Representation (SPR). The Arabic, Chinese, Dutch, and Korean languages support -only IPA. +Phonetic Representation (SPR). The Arabic, Chinese, Dutch, Australian English, and Korean +languages support only IPA. +The service also offers a Tune by Example feature that lets you define custom prompts. You +can also define speaker models to improve the quality of your custom prompts. The service +support custom prompts only for US English custom models and voices. """ from enum import Enum -from typing import Dict, List +from os.path import basename +from typing import BinaryIO, Dict, List import json from ibm_cloud_sdk_core import BaseService, DetailedResponse @@ -127,8 +131,31 @@ def get_voice(self, voices** method. **See also:** [Listing a specific voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#listVoice). - - :param str voice: The voice for which information is to be returned. + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural + instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the + `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now + deprecated: Brazilian Portuguese, United Kingdom and United States English, + French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` + attribute of the `` element are deprecated and are not supported with any + of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) + for production use. + The deprecated voices and features will continue to function for at least one year + but might be removed at a future date. You are encouraged to migrate to the + equivalent neural voices at your earliest convenience. For more information about + all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) + in the release notes. + + :param str voice: The voice for which information is to be returned. For + more information about specifying a voice, see **Important voice updates** + in the method description. :param str customization_id: (optional) The customization ID (GUID) of a custom model for which information is to be returned. You must make the request with credentials for the instance of the service that owns the @@ -230,6 +257,27 @@ def synthesize(self, For more information about specifying an audio format, including additional details about some of the formats, see [Audio formats](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-audioFormats#audioFormats). + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural + instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the + `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now + deprecated: Brazilian Portuguese, United Kingdom and United States English, + French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` + attribute of the `` element are deprecated and are not supported with any + of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) + for production use. + The deprecated voices and features will continue to function for at least one year + but might be removed at a future date. You are encouraged to migrate to the + equivalent neural voices at your earliest convenience. For more information about + all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) + in the release notes. ### Warning messages If a request includes invalid query parameters, the service returns a `Warnings` response header that provides messages about the invalid parameters. The warning @@ -243,7 +291,9 @@ def synthesize(self, audio. You can use the `Accept` header or the `accept` parameter to specify the audio format. For more information about specifying an audio format, see **Audio formats (accept types)** in the method description. - :param str voice: (optional) The voice to use for synthesis. + :param str voice: (optional) The voice to use for synthesis. For more + information about specifying a voice, see **Important voice updates** in + the method description. :param str customization_id: (optional) The customization ID (GUID) of a custom model to use for the synthesis. If a custom model is specified, it works only if it matches the language of the indicated voice. You must make @@ -303,15 +353,38 @@ def get_pronunciation(self, for a specific custom model to see the translation for that model. **See also:** [Querying a word from a language](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordsQueryLanguage). + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural + instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the + `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now + deprecated: Brazilian Portuguese, United Kingdom and United States English, + French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` + attribute of the `` element are deprecated and are not supported with any + of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) + for production use. + The deprecated voices and features will continue to function for at least one year + but might be removed at a future date. You are encouraged to migrate to the + equivalent neural voices at your earliest convenience. For more information about + all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) + in the release notes. :param str text: The word for which the pronunciation is requested. :param str voice: (optional) A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for - example, `en-US`) return the same translation. + example, `en-US`) return the same translation. For more information about + specifying a voice, see **Important voice updates** in the method + description. :param str format: (optional) The phoneme format in which to return the - pronunciation. The Arabic, Chinese, Dutch, and Korean languages support - only IPA. Omit the parameter to obtain the pronunciation in the default - format. + pronunciation. The Arabic, Chinese, Dutch, Australian English, and Korean + languages support only IPA. Omit the parameter to obtain the pronunciation + in the default format. :param str customization_id: (optional) The customization ID (GUID) of a custom model for which the pronunciation is to be returned. The language of a specified custom model must match the language of the specified voice. If @@ -372,13 +445,35 @@ def create_custom_model(self, used to create it. **See also:** [Creating a custom model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsCreate). + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural + instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the + `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now + deprecated: Brazilian Portuguese, United Kingdom and United States English, + French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` + attribute of the `` element are deprecated and are not supported with any + of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) + for production use. + The deprecated voices and features will continue to function for at least one year + but might be removed at a future date. You are encouraged to migrate to the + equivalent neural voices at your earliest convenience. For more information about + all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) + in the release notes. :param str name: The name of the new custom model. :param str language: (optional) The language of the new custom model. You create a custom model for a specific language, not for a specific voice. A - custom model can be used with any voice, standard or neural, for its - specified language. Omit the parameter to use the the default language, - `en-US`. + custom model can be used with any voice for its specified language. Omit + the parameter to use the the default language, `en-US`. **Note:** The + `ar-AR` language identifier cannot be used to create a custom model. Use + the `ar-MS` identifier instead. :param str description: (optional) A description of the new custom model. Specifying a description is recommended. :param dict headers: A `dict` containing the request headers @@ -421,8 +516,8 @@ def list_custom_models(self, Lists metadata such as the name and description for all custom models that are owned by an instance of the service. Specify a language to list the custom models - for that language only. To see the words in addition to the metadata for a - specific custom model, use the **List a custom model** method. You must use + for that language only. To see the words and prompts in addition to the metadata + for a specific custom model, use the **Get a custom model** method. You must use credentials for the instance of the service that owns a model to list information about it. **See also:** [Querying all custom @@ -542,8 +637,9 @@ def get_custom_model(self, customization_id: str, Gets all information about a specified custom model. In addition to metadata such as the name and description of the custom model, the output includes the words and - their translations as defined in the model. To see just the metadata for a model, - use the **List custom models** method. + their translations that are defined for the model, as well as any prompts that are + defined for the model. To see just the metadata for a model, use the **List custom + models** method. **See also:** [Querying a custom model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQuery). @@ -778,9 +874,9 @@ def add_word(self, :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR - translation. The Arabic, Chinese, Dutch, and Korean languages support only - IPA. A sounds-like is one or more words that, when combined, sound like the - word. + translation. The Arabic, Chinese, Dutch, Australian English, and Korean + languages support only IPA. A sounds-like is one or more words that, when + combined, sound like the word. :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single @@ -914,6 +1010,536 @@ def delete_word(self, customization_id: str, word: str, response = self.send(request) return response + ######################### + # Custom prompts + ######################### + + def list_custom_prompts(self, customization_id: str, + **kwargs) -> DetailedResponse: + """ + List custom prompts. + + Lists information about all custom prompts that are defined for a custom model. + The information includes the prompt ID, prompt text, status, and optional speaker + ID for each prompt of the custom model. You must use credentials for the instance + of the service that owns the custom model. The same information about all of the + prompts for a custom model is also provided by the **Get a custom model** method. + That method provides complete details about a specified custom model, including + its language, owner, custom words, and more. + **Beta:** Custom prompts are beta functionality that is supported only for use + with US English custom models and voices. + **See also:** [Listing custom + prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Prompts` object + """ + + if customization_id is None: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_custom_prompts') + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts'.format( + **path_param_dict) + request = self.prepare_request(method='GET', url=url, headers=headers) + + response = self.send(request) + return response + + def add_custom_prompt(self, + customization_id: str, + prompt_id: str, + metadata: 'PromptMetadata', + file: BinaryIO, + *, + filename: str = None, + **kwargs) -> DetailedResponse: + """ + Add a custom prompt. + + Adds a custom prompt to a custom model. A prompt is defined by the text that is to + be spoken, the audio for that text, a unique user-specified ID for the prompt, and + an optional speaker ID. The information is used to generate prosodic data that is + not visible to the user. This data is used by the service to produce the + synthesized audio upon request. You must use credentials for the instance of the + service that owns a custom model to add a prompt to it. You can add a maximum of + 1000 custom prompts to a single custom model. + You are recommended to assign meaningful values for prompt IDs. For example, use + `goodbye` to identify a prompt that speaks a farewell message. Prompt IDs must be + unique within a given custom model. You cannot define two prompts with the same + name for the same custom model. If you provide the ID of an existing prompt, the + previously uploaded prompt is replaced by the new information. The existing prompt + is reprocessed by using the new text and audio and, if provided, new speaker + model, and the prosody data associated with the prompt is updated. + The quality of a prompt is undefined if the language of a prompt does not match + the language of its custom model. This is consistent with any text or SSML that is + specified for a speech synthesis request. The service makes a best-effort attempt + to render the specified text for the prompt; it does not validate that the + language of the text matches the language of the model. + Adding a prompt is an asynchronous operation. Although it accepts less audio than + speaker enrollment, the service must align the audio with the provided text. The + time that it takes to process a prompt depends on the prompt itself. The + processing time for a reasonably sized prompt generally matches the length of the + audio (for example, it takes 20 seconds to process a 20-second prompt). + For shorter prompts, you can wait for a reasonable amount of time and then check + the status of the prompt with the **Get a custom prompt** method. For longer + prompts, consider using that method to poll the service every few seconds to + determine when the prompt becomes available. No prompt can be used for speech + synthesis if it is in the `processing` or `failed` state. Only prompts that are in + the `available` state can be used for speech synthesis. + When it processes a request, the service attempts to align the text and the audio + that are provided for the prompt. The text that is passed with a prompt must match + the spoken audio as closely as possible. Optimally, the text and audio match + exactly. The service does its best to align the specified text with the audio, and + it can often compensate for mismatches between the two. But if the service cannot + effectively align the text and the audio, possibly because the magnitude of + mismatches between the two is too great, processing of the prompt fails. + ### Evaluating a prompt + Always listen to and evaluate a prompt to determine its quality before using it + in production. To evaluate a prompt, include only the single prompt in a speech + synthesis request by using the following SSML extension, in this case for a prompt + whose ID is `goodbye`: + `` + In some cases, you might need to rerecord and resubmit a prompt as many as five + times to address the following possible problems: + * The service might fail to detect a mismatch between the prompt’s text and audio. + The longer the prompt, the greater the chance for misalignment between its text + and audio. Therefore, multiple shorter prompts are preferable to a single long + prompt. + * The text of a prompt might include a word that the service does not recognize. + In this case, you can create a custom word and pronunciation pair to tell the + service how to pronounce the word. You must then re-create the prompt. + * The quality of the input audio might be insufficient or the service’s processing + of the audio might fail to detect the intended prosody. Submitting new audio for + the prompt can correct these issues. + If a prompt that is created without a speaker ID does not adequately reflect the + intended prosody, enrolling the speaker and providing a speaker ID for the prompt + is one recommended means of potentially improving the quality of the prompt. This + is especially important for shorter prompts such as "good-bye" or "thank you," + where less audio data makes it more difficult to match the prosody of the speaker. + **Beta:** Custom prompts are beta functionality that is supported only for use + with US English custom models and voices. + **See also:** + * [Add a custom + prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-add-prompt) + * [Evaluate a custom + prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-evaluate-prompt) + * [Rules for creating custom + prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-prompts). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str prompt_id: The identifier of the prompt that is to be added to + the custom model: + * Include a maximum of 49 characters in the ID. + * Include only alphanumeric characters and `_` (underscores) in the ID. + * Do not include XML sensitive characters (double quotes, single quotes, + ampersands, angle brackets, and slashes) in the ID. + * To add a new prompt, the ID must be unique for the specified custom + model. Otherwise, the new information for the prompt overwrites the + existing prompt that has that ID. + :param PromptMetadata metadata: Information about the prompt that is to be + added to a custom model. The following example of a `PromptMetadata` object + includes both the required prompt text and an optional speaker model ID: + `{ "prompt_text": "Thank you and good-bye!", "speaker_id": + "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. + :param BinaryIO file: An audio file that speaks the text of the prompt with + intonation and prosody that matches how you would like the prompt to be + spoken. + * The prompt audio must be in WAV format and must have a minimum sampling + rate of 16 kHz. The service accepts audio with higher sampling rates. The + service transcodes all audio to 16 kHz before processing it. + * The length of the prompt audio is limited to 30 seconds. + :param str filename: (optional) The filename for file. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Prompt` object + """ + + if customization_id is None: + raise ValueError('customization_id must be provided') + if prompt_id is None: + raise ValueError('prompt_id must be provided') + if metadata is None: + raise ValueError('metadata must be provided') + if file is None: + raise ValueError('file must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_custom_prompt') + headers.update(sdk_headers) + + form_data = [] + form_data.append( + ('metadata', (None, json.dumps(metadata), 'application/json'))) + if not filename and hasattr(file, 'name'): + filename = basename(file.name) + if not filename: + raise ValueError('filename must be provided') + form_data.append(('file', (filename, file, 'audio/wav'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'prompt_id'] + path_param_values = self.encode_path_vars(customization_id, prompt_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts/{prompt_id}'.format( + **path_param_dict) + request = self.prepare_request(method='POST', + url=url, + headers=headers, + files=form_data) + + response = self.send(request) + return response + + def get_custom_prompt(self, customization_id: str, prompt_id: str, + **kwargs) -> DetailedResponse: + """ + Get a custom prompt. + + Gets information about a specified custom prompt for a specified custom model. The + information includes the prompt ID, prompt text, status, and optional speaker ID + for each prompt of the custom model. You must use credentials for the instance of + the service that owns the custom model. + **Beta:** Custom prompts are beta functionality that is supported only for use + with US English custom models and voices. + **See also:** [Listing custom + prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str prompt_id: The identifier (name) of the prompt. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Prompt` object + """ + + if customization_id is None: + raise ValueError('customization_id must be provided') + if prompt_id is None: + raise ValueError('prompt_id must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_custom_prompt') + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'prompt_id'] + path_param_values = self.encode_path_vars(customization_id, prompt_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts/{prompt_id}'.format( + **path_param_dict) + request = self.prepare_request(method='GET', url=url, headers=headers) + + response = self.send(request) + return response + + def delete_custom_prompt(self, customization_id: str, prompt_id: str, + **kwargs) -> DetailedResponse: + """ + Delete a custom prompt. + + Deletes an existing custom prompt from a custom model. The service deletes the + prompt with the specified ID. You must use credentials for the instance of the + service that owns the custom model from which the prompt is to be deleted. + **Caution:** Deleting a custom prompt elicits a 400 response code from synthesis + requests that attempt to use the prompt. Make sure that you do not attempt to use + a deleted prompt in a production application. + **Beta:** Custom prompts are beta functionality that is supported only for use + with US English custom models and voices. + **See also:** [Deleting a custom + prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-delete). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str prompt_id: The identifier (name) of the prompt that is to be + deleted. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if customization_id is None: + raise ValueError('customization_id must be provided') + if prompt_id is None: + raise ValueError('prompt_id must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_custom_prompt') + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + + path_param_keys = ['customization_id', 'prompt_id'] + path_param_values = self.encode_path_vars(customization_id, prompt_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts/{prompt_id}'.format( + **path_param_dict) + request = self.prepare_request(method='DELETE', + url=url, + headers=headers) + + response = self.send(request) + return response + + ######################### + # Speaker models + ######################### + + def list_speaker_models(self, **kwargs) -> DetailedResponse: + """ + List speaker models. + + Lists information about all speaker models that are defined for a service + instance. The information includes the speaker ID and speaker name of each defined + speaker. You must use credentials for the instance of a service to list its + speakers. + **Beta:** Speaker models and the custom prompts with which they are used are beta + functionality that is supported only for use with US English custom models and + voices. + **See also:** [Listing speaker + models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list). + + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Speakers` object + """ + + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_speaker_models') + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + headers['Accept'] = 'application/json' + + url = '/v1/speakers' + request = self.prepare_request(method='GET', url=url, headers=headers) + + response = self.send(request) + return response + + def create_speaker_model(self, speaker_name: str, audio: BinaryIO, + **kwargs) -> DetailedResponse: + """ + Create a speaker model. + + Creates a new speaker model, which is an optional enrollment token for users who + are to add prompts to custom models. A speaker model contains information about a + user's voice. The service extracts this information from a WAV audio sample that + you pass as the body of the request. Associating a speaker model with a prompt is + optional, but the information that is extracted from the speaker model helps the + service learn about the speaker's voice. + A speaker model can make an appreciable difference in the quality of prompts, + especially short prompts with relatively little audio, that are associated with + that speaker. A speaker model can help the service produce a prompt with more + confidence; the lack of a speaker model can potentially compromise the quality of + a prompt. + The gender of the speaker who creates a speaker model does not need to match the + gender of a voice that is used with prompts that are associated with that speaker + model. For example, a speaker model that is created by a male speaker can be + associated with prompts that are spoken by female voices. + You create a speaker model for a given instance of the service. The new speaker + model is owned by the service instance whose credentials are used to create it. + That same speaker can then be used to create prompts for all custom models within + that service instance. No language is associated with a speaker model, but each + custom model has a single specified language. You can add prompts only to US + English models. + You specify a name for the speaker when you create it. The name must be unique + among all speaker names for the owning service instance. To re-create a speaker + model for an existing speaker name, you must first delete the existing speaker + model that has that name. + Speaker enrollment is a synchronous operation. Although it accepts more audio data + than a prompt, the process of adding a speaker is very fast. The service simply + extracts information about the speaker’s voice from the audio. Unlike prompts, + speaker models neither need nor accept a transcription of the audio. When the call + returns, the audio is fully processed and the speaker enrollment is complete. + The service returns a speaker ID with the request. A speaker ID is globally unique + identifier (GUID) that you use to identify the speaker in subsequent requests to + the service. + **Beta:** Speaker models and the custom prompts with which they are used are beta + functionality that is supported only for use with US English custom models and + voices. + **See also:** + * [Create a speaker + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-speaker-model) + * [Rules for creating speaker + models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-speakers). + + :param str speaker_name: The name of the speaker that is to be added to the + service instance. + * Include a maximum of 49 characters in the name. + * Include only alphanumeric characters and `_` (underscores) in the name. + * Do not include XML sensitive characters (double quotes, single quotes, + ampersands, angle brackets, and slashes) in the name. + * Do not use the name of an existing speaker that is already defined for + the service instance. + :param BinaryIO audio: An enrollment audio file that contains a sample of + the speaker’s voice. + * The enrollment audio must be in WAV format and must have a minimum + sampling rate of 16 kHz. The service accepts audio with higher sampling + rates. It transcodes all audio to 16 kHz before processing it. + * The length of the enrollment audio is limited to 1 minute. Speaking one + or two paragraphs of text that include five to ten sentences is + recommended. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SpeakerModel` object + """ + + if speaker_name is None: + raise ValueError('speaker_name must be provided') + if audio is None: + raise ValueError('audio must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_speaker_model') + headers.update(sdk_headers) + + params = {'speaker_name': speaker_name} + + data = audio + headers['content-type'] = 'audio/wav' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + headers['Accept'] = 'application/json' + + url = '/v1/speakers' + request = self.prepare_request(method='POST', + url=url, + headers=headers, + params=params, + data=data) + + response = self.send(request) + return response + + def get_speaker_model(self, speaker_id: str, **kwargs) -> DetailedResponse: + """ + Get a speaker model. + + Gets information about all prompts that are defined by a specified speaker for all + custom models that are owned by a service instance. The information is grouped by + the customization IDs of the custom models. For each custom model, the information + lists information about each prompt that is defined for that custom model by the + speaker. You must use credentials for the instance of the service that owns a + speaker model to list its prompts. + **Beta:** Speaker models and the custom prompts with which they are used are beta + functionality that is supported only for use with US English custom models and + voices. + **See also:** [Listing the custom prompts for a speaker + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list-prompts). + + :param str speaker_id: The speaker ID (GUID) of the speaker model. You must + make the request with service credentials for the instance of the service + that owns the speaker model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SpeakerCustomModels` object + """ + + if speaker_id is None: + raise ValueError('speaker_id must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_speaker_model') + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + headers['Accept'] = 'application/json' + + path_param_keys = ['speaker_id'] + path_param_values = self.encode_path_vars(speaker_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/speakers/{speaker_id}'.format(**path_param_dict) + request = self.prepare_request(method='GET', url=url, headers=headers) + + response = self.send(request) + return response + + def delete_speaker_model(self, speaker_id: str, + **kwargs) -> DetailedResponse: + """ + Delete a speaker model. + + Deletes an existing speaker model from the service instance. The service deletes + the enrolled speaker with the specified speaker ID. You must use credentials for + the instance of the service that owns a speaker model to delete the speaker. + Any prompts that are associated with the deleted speaker are not affected by the + speaker's deletion. The prosodic data that defines the quality of a prompt is + established when the prompt is created. A prompt is static and remains unaffected + by deletion of its associated speaker. However, the prompt cannot be resubmitted + or updated with its original speaker once that speaker is deleted. + **Beta:** Speaker models and the custom prompts with which they are used are beta + functionality that is supported only for use with US English custom models and + voices. + **See also:** [Deleting a speaker + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-delete). + + :param str speaker_id: The speaker ID (GUID) of the speaker model. You must + make the request with service credentials for the instance of the service + that owns the speaker model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if speaker_id is None: + raise ValueError('speaker_id must be provided') + headers = {} + sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_speaker_model') + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + + path_param_keys = ['speaker_id'] + path_param_values = self.encode_path_vars(speaker_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/speakers/{speaker_id}'.format(**path_param_dict) + request = self.prepare_request(method='DELETE', + url=url, + headers=headers) + + response = self.send(request) + return response + ######################### # User data ######################### @@ -973,14 +1599,18 @@ class GetVoiceEnums: class Voice(str, Enum): """ - The voice for which information is to be returned. + The voice for which information is to be returned. For more information about + specifying a voice, see **Important voice updates** in the method description. """ AR_AR_OMARVOICE = 'ar-AR_OmarVoice' + AR_MS_OMARVOICE = 'ar-MS_OmarVoice' DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice' DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice' DE_DE_DIETERVOICE = 'de-DE_DieterVoice' DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice' DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice' + EN_AU_CRAIGVOICE = 'en-AU-CraigVoice' + EN_AU_MADISONVOICE = 'en-AU-MadisonVoice' EN_GB_CHARLOTTEV3VOICE = 'en-GB_CharlotteV3Voice' EN_GB_JAMESV3VOICE = 'en-GB_JamesV3Voice' EN_GB_KATEVOICE = 'en-GB_KateVoice' @@ -1003,6 +1633,7 @@ class Voice(str, Enum): ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice' ES_US_SOFIAVOICE = 'es-US_SofiaVoice' ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice' + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice' FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice' FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice' FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice' @@ -1010,6 +1641,8 @@ class Voice(str, Enum): IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice' JA_JP_EMIVOICE = 'ja-JP_EmiVoice' JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice' + KO_KR_HYUNJUNVOICE = 'ko-KR_HyunjunVoice' + KO_KR_SIWOOVOICE = 'ko-KR_SiWooVoice' KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice' KO_KR_YUNAVOICE = 'ko-KR_YunaVoice' NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice' @@ -1049,14 +1682,18 @@ class Accept(str, Enum): class Voice(str, Enum): """ - The voice to use for synthesis. + The voice to use for synthesis. For more information about specifying a voice, see + **Important voice updates** in the method description. """ AR_AR_OMARVOICE = 'ar-AR_OmarVoice' + AR_MS_OMARVOICE = 'ar-MS_OmarVoice' DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice' DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice' DE_DE_DIETERVOICE = 'de-DE_DieterVoice' DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice' DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice' + EN_AU_CRAIGVOICE = 'en-AU-CraigVoice' + EN_AU_MADISONVOICE = 'en-AU-MadisonVoice' EN_GB_CHARLOTTEV3VOICE = 'en-GB_CharlotteV3Voice' EN_GB_JAMESV3VOICE = 'en-GB_JamesV3Voice' EN_GB_KATEVOICE = 'en-GB_KateVoice' @@ -1079,6 +1716,7 @@ class Voice(str, Enum): ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice' ES_US_SOFIAVOICE = 'es-US_SofiaVoice' ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice' + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice' FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice' FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice' FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice' @@ -1086,6 +1724,8 @@ class Voice(str, Enum): IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice' JA_JP_EMIVOICE = 'ja-JP_EmiVoice' JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice' + KO_KR_HYUNJUNVOICE = 'ko-KR_HyunjunVoice' + KO_KR_SIWOOVOICE = 'ko-KR_SiWooVoice' KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice' KO_KR_YUNAVOICE = 'ko-KR_YunaVoice' NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice' @@ -1106,14 +1746,18 @@ class Voice(str, Enum): """ A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same - translation. + translation. For more information about specifying a voice, see **Important voice + updates** in the method description. """ AR_AR_OMARVOICE = 'ar-AR_OmarVoice' + AR_MS_OMARVOICE = 'ar-MS_OmarVoice' DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice' DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice' DE_DE_DIETERVOICE = 'de-DE_DieterVoice' DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice' DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice' + EN_AU_CRAIGVOICE = 'en-AU-CraigVoice' + EN_AU_MADISONVOICE = 'en-AU-MadisonVoice' EN_GB_CHARLOTTEV3VOICE = 'en-GB_CharlotteV3Voice' EN_GB_JAMESV3VOICE = 'en-GB_JamesV3Voice' EN_GB_KATEVOICE = 'en-GB_KateVoice' @@ -1136,6 +1780,7 @@ class Voice(str, Enum): ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice' ES_US_SOFIAVOICE = 'es-US_SofiaVoice' ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice' + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice' FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice' FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice' FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice' @@ -1143,6 +1788,8 @@ class Voice(str, Enum): IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice' JA_JP_EMIVOICE = 'ja-JP_EmiVoice' JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice' + KO_KR_HYUNJUNVOICE = 'ko-KR_HyunjunVoice' + KO_KR_SIWOOVOICE = 'ko-KR_SiWooVoice' KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice' KO_KR_YUNAVOICE = 'ko-KR_YunaVoice' NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice' @@ -1156,8 +1803,8 @@ class Voice(str, Enum): class Format(str, Enum): """ The phoneme format in which to return the pronunciation. The Arabic, Chinese, - Dutch, and Korean languages support only IPA. Omit the parameter to obtain the - pronunciation in the default format. + Dutch, Australian English, and Korean languages support only IPA. Omit the + parameter to obtain the pronunciation in the default format. """ IBM = 'ibm' IPA = 'ipa' @@ -1174,12 +1821,15 @@ class Language(str, Enum): are to be returned. Omit the parameter to see all custom models that are owned by the requester. """ + AR_MS = 'ar-MS' DE_DE = 'de-DE' + EN_AU = 'en-AU' EN_GB = 'en-GB' EN_US = 'en-US' ES_ES = 'es-ES' ES_LA = 'es-LA' ES_US = 'es-US' + FR_CA = 'fr-CA' FR_FR = 'fr-FR' IT_IT = 'it-IT' JA_JP = 'ja-JP' @@ -1218,9 +1868,12 @@ class CustomModel(): :attr List[Word] words: (optional) An array of `Word` objects that lists the words and their translations from the custom model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The - array is empty if the custom model contains no words. This field is returned - only by the **Get a voice** method and only when you specify the customization - ID of a custom model. + array is empty if no words are defined for the custom model. This field is + returned only by the **Get a custom model** method. + :attr List[Prompt] prompts: (optional) An array of `Prompt` objects that + provides information about the prompts that are defined for the specified custom + model. The array is empty if no prompts are defined for the custom model. This + field is returned only by the **Get a custom model** method. """ def __init__(self, @@ -1232,7 +1885,8 @@ def __init__(self, created: str = None, last_modified: str = None, description: str = None, - words: List['Word'] = None) -> None: + words: List['Word'] = None, + prompts: List['Prompt'] = None) -> None: """ Initialize a CustomModel object. @@ -1256,9 +1910,13 @@ def __init__(self, :param List[Word] words: (optional) An array of `Word` objects that lists the words and their translations from the custom model. The words are listed in alphabetical order, with uppercase letters listed before - lowercase letters. The array is empty if the custom model contains no - words. This field is returned only by the **Get a voice** method and only - when you specify the customization ID of a custom model. + lowercase letters. The array is empty if no words are defined for the + custom model. This field is returned only by the **Get a custom model** + method. + :param List[Prompt] prompts: (optional) An array of `Prompt` objects that + provides information about the prompts that are defined for the specified + custom model. The array is empty if no prompts are defined for the custom + model. This field is returned only by the **Get a custom model** method. """ self.customization_id = customization_id self.name = name @@ -1268,6 +1926,7 @@ def __init__(self, self.last_modified = last_modified self.description = description self.words = words + self.prompts = prompts @classmethod def from_dict(cls, _dict: Dict) -> 'CustomModel': @@ -1293,6 +1952,10 @@ def from_dict(cls, _dict: Dict) -> 'CustomModel': args['description'] = _dict.get('description') if 'words' in _dict: args['words'] = [Word.from_dict(x) for x in _dict.get('words')] + if 'prompts' in _dict: + args['prompts'] = [ + Prompt.from_dict(x) for x in _dict.get('prompts') + ] return cls(**args) @classmethod @@ -1320,6 +1983,8 @@ def to_dict(self) -> Dict: _dict['description'] = self.description if hasattr(self, 'words') and self.words is not None: _dict['words'] = [x.to_dict() for x in self.words] + if hasattr(self, 'prompts') and self.prompts is not None: + _dict['prompts'] = [x.to_dict() for x in self.prompts] return _dict def _to_dict(self): @@ -1407,47 +2072,102 @@ def __ne__(self, other: 'CustomModels') -> bool: return not self == other -class Pronunciation(): +class Prompt(): """ - The pronunciation of the specified text. - - :attr str pronunciation: The pronunciation of the specified text in the - requested voice and format. If a custom model is specified, the pronunciation - also reflects that custom model. + Information about a custom prompt. + + :attr str prompt: The user-specified text of the prompt. + :attr str prompt_id: The user-specified identifier (name) of the prompt. + :attr str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now ready + for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of the + prompt includes an `error` field that describes the reason for the failure. + :attr str error: (optional) If the status of the prompt is `failed`, an error + message that describes the reason for the failure. The field is omitted if no + error occurred. + :attr str speaker_id: (optional) The speaker ID (GUID) of the speaker for which + the prompt was defined. The field is omitted if no speaker ID was specified. """ - def __init__(self, pronunciation: str) -> None: - """ - Initialize a Pronunciation object. - - :param str pronunciation: The pronunciation of the specified text in the - requested voice and format. If a custom model is specified, the - pronunciation also reflects that custom model. - """ - self.pronunciation = pronunciation + def __init__(self, + prompt: str, + prompt_id: str, + status: str, + *, + error: str = None, + speaker_id: str = None) -> None: + """ + Initialize a Prompt object. + + :param str prompt: The user-specified text of the prompt. + :param str prompt_id: The user-specified identifier (name) of the prompt. + :param str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now + ready for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of + the prompt includes an `error` field that describes the reason for the + failure. + :param str error: (optional) If the status of the prompt is `failed`, an + error message that describes the reason for the failure. The field is + omitted if no error occurred. + :param str speaker_id: (optional) The speaker ID (GUID) of the speaker for + which the prompt was defined. The field is omitted if no speaker ID was + specified. + """ + self.prompt = prompt + self.prompt_id = prompt_id + self.status = status + self.error = error + self.speaker_id = speaker_id @classmethod - def from_dict(cls, _dict: Dict) -> 'Pronunciation': - """Initialize a Pronunciation object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Prompt': + """Initialize a Prompt object from a json dictionary.""" args = {} - if 'pronunciation' in _dict: - args['pronunciation'] = _dict.get('pronunciation') + if 'prompt' in _dict: + args['prompt'] = _dict.get('prompt') else: raise ValueError( - 'Required property \'pronunciation\' not present in Pronunciation JSON' - ) + 'Required property \'prompt\' not present in Prompt JSON') + if 'prompt_id' in _dict: + args['prompt_id'] = _dict.get('prompt_id') + else: + raise ValueError( + 'Required property \'prompt_id\' not present in Prompt JSON') + if 'status' in _dict: + args['status'] = _dict.get('status') + else: + raise ValueError( + 'Required property \'status\' not present in Prompt JSON') + if 'error' in _dict: + args['error'] = _dict.get('error') + if 'speaker_id' in _dict: + args['speaker_id'] = _dict.get('speaker_id') return cls(**args) @classmethod def _from_dict(cls, _dict): - """Initialize a Pronunciation object from a json dictionary.""" + """Initialize a Prompt object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'pronunciation') and self.pronunciation is not None: - _dict['pronunciation'] = self.pronunciation + if hasattr(self, 'prompt') and self.prompt is not None: + _dict['prompt'] = self.prompt + if hasattr(self, 'prompt_id') and self.prompt_id is not None: + _dict['prompt_id'] = self.prompt_id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id return _dict def _to_dict(self): @@ -1455,16 +2175,666 @@ def _to_dict(self): return self.to_dict() def __str__(self) -> str: - """Return a `str` version of this Pronunciation object.""" + """Return a `str` version of this Prompt object.""" return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other: 'Pronunciation') -> bool: + def __eq__(self, other: 'Prompt') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other: 'Pronunciation') -> bool: + def __ne__(self, other: 'Prompt') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class PromptMetadata(): + """ + Information about the prompt that is to be added to a custom model. The following + example of a `PromptMetadata` object includes both the required prompt text and an + optional speaker model ID: + `{ "prompt_text": "Thank you and good-bye!", "speaker_id": + "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. + + :attr str prompt_text: The required written text of the spoken prompt. The + length of a prompt's text is limited to a few sentences. Speaking one or two + sentences of text is the recommended limit. A prompt cannot contain more than + 1000 characters of text. Escape any XML control characters (double quotes, + single quotes, ampersands, angle brackets, and slashes) that appear in the text + of the prompt. + :attr str speaker_id: (optional) The optional speaker ID (GUID) of a previously + defined speaker model that is to be associated with the prompt. + """ + + def __init__(self, prompt_text: str, *, speaker_id: str = None) -> None: + """ + Initialize a PromptMetadata object. + + :param str prompt_text: The required written text of the spoken prompt. The + length of a prompt's text is limited to a few sentences. Speaking one or + two sentences of text is the recommended limit. A prompt cannot contain + more than 1000 characters of text. Escape any XML control characters + (double quotes, single quotes, ampersands, angle brackets, and slashes) + that appear in the text of the prompt. + :param str speaker_id: (optional) The optional speaker ID (GUID) of a + previously defined speaker model that is to be associated with the prompt. + """ + self.prompt_text = prompt_text + self.speaker_id = speaker_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'PromptMetadata': + """Initialize a PromptMetadata object from a json dictionary.""" + args = {} + if 'prompt_text' in _dict: + args['prompt_text'] = _dict.get('prompt_text') + else: + raise ValueError( + 'Required property \'prompt_text\' not present in PromptMetadata JSON' + ) + if 'speaker_id' in _dict: + args['speaker_id'] = _dict.get('speaker_id') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a PromptMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'prompt_text') and self.prompt_text is not None: + _dict['prompt_text'] = self.prompt_text + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this PromptMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'PromptMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'PromptMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Prompts(): + """ + Information about the custom prompts that are defined for a custom model. + + :attr List[Prompt] prompts: An array of `Prompt` objects that provides + information about the prompts that are defined for the specified custom model. + The array is empty if no prompts are defined for the custom model. + """ + + def __init__(self, prompts: List['Prompt']) -> None: + """ + Initialize a Prompts object. + + :param List[Prompt] prompts: An array of `Prompt` objects that provides + information about the prompts that are defined for the specified custom + model. The array is empty if no prompts are defined for the custom model. + """ + self.prompts = prompts + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Prompts': + """Initialize a Prompts object from a json dictionary.""" + args = {} + if 'prompts' in _dict: + args['prompts'] = [ + Prompt.from_dict(x) for x in _dict.get('prompts') + ] + else: + raise ValueError( + 'Required property \'prompts\' not present in Prompts JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Prompts object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'prompts') and self.prompts is not None: + _dict['prompts'] = [x.to_dict() for x in self.prompts] + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Prompts object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Prompts') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Prompts') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Pronunciation(): + """ + The pronunciation of the specified text. + + :attr str pronunciation: The pronunciation of the specified text in the + requested voice and format. If a custom model is specified, the pronunciation + also reflects that custom model. + """ + + def __init__(self, pronunciation: str) -> None: + """ + Initialize a Pronunciation object. + + :param str pronunciation: The pronunciation of the specified text in the + requested voice and format. If a custom model is specified, the + pronunciation also reflects that custom model. + """ + self.pronunciation = pronunciation + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Pronunciation': + """Initialize a Pronunciation object from a json dictionary.""" + args = {} + if 'pronunciation' in _dict: + args['pronunciation'] = _dict.get('pronunciation') + else: + raise ValueError( + 'Required property \'pronunciation\' not present in Pronunciation JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Pronunciation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'pronunciation') and self.pronunciation is not None: + _dict['pronunciation'] = self.pronunciation + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Pronunciation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Pronunciation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Pronunciation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Speaker(): + """ + Information about a speaker model. + + :attr str speaker_id: The speaker ID (GUID) of the speaker. + :attr str name: The user-defined name of the speaker. + """ + + def __init__(self, speaker_id: str, name: str) -> None: + """ + Initialize a Speaker object. + + :param str speaker_id: The speaker ID (GUID) of the speaker. + :param str name: The user-defined name of the speaker. + """ + self.speaker_id = speaker_id + self.name = name + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Speaker': + """Initialize a Speaker object from a json dictionary.""" + args = {} + if 'speaker_id' in _dict: + args['speaker_id'] = _dict.get('speaker_id') + else: + raise ValueError( + 'Required property \'speaker_id\' not present in Speaker JSON') + if 'name' in _dict: + args['name'] = _dict.get('name') + else: + raise ValueError( + 'Required property \'name\' not present in Speaker JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Speaker object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Speaker object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Speaker') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Speaker') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerCustomModel(): + """ + A custom models for which the speaker has defined prompts. + + :attr str customization_id: The customization ID (GUID) of a custom model for + which the speaker has defined one or more prompts. + :attr List[SpeakerPrompt] prompts: An array of `SpeakerPrompt` objects that + provides information about each prompt that the user has defined for the custom + model. + """ + + def __init__(self, customization_id: str, + prompts: List['SpeakerPrompt']) -> None: + """ + Initialize a SpeakerCustomModel object. + + :param str customization_id: The customization ID (GUID) of a custom model + for which the speaker has defined one or more prompts. + :param List[SpeakerPrompt] prompts: An array of `SpeakerPrompt` objects + that provides information about each prompt that the user has defined for + the custom model. + """ + self.customization_id = customization_id + self.prompts = prompts + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerCustomModel': + """Initialize a SpeakerCustomModel object from a json dictionary.""" + args = {} + if 'customization_id' in _dict: + args['customization_id'] = _dict.get('customization_id') + else: + raise ValueError( + 'Required property \'customization_id\' not present in SpeakerCustomModel JSON' + ) + if 'prompts' in _dict: + args['prompts'] = [ + SpeakerPrompt.from_dict(x) for x in _dict.get('prompts') + ] + else: + raise ValueError( + 'Required property \'prompts\' not present in SpeakerCustomModel JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerCustomModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'prompts') and self.prompts is not None: + _dict['prompts'] = [x.to_dict() for x in self.prompts] + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerCustomModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SpeakerCustomModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerCustomModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerCustomModels(): + """ + Custom models for which the speaker has defined prompts. + + :attr List[SpeakerCustomModel] customizations: An array of `SpeakerCustomModel` + objects. Each object provides information about the prompts that are defined for + a specified speaker in the custom models that are owned by a specified service + instance. The array is empty if no prompts are defined for the speaker. + """ + + def __init__(self, customizations: List['SpeakerCustomModel']) -> None: + """ + Initialize a SpeakerCustomModels object. + + :param List[SpeakerCustomModel] customizations: An array of + `SpeakerCustomModel` objects. Each object provides information about the + prompts that are defined for a specified speaker in the custom models that + are owned by a specified service instance. The array is empty if no prompts + are defined for the speaker. + """ + self.customizations = customizations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerCustomModels': + """Initialize a SpeakerCustomModels object from a json dictionary.""" + args = {} + if 'customizations' in _dict: + args['customizations'] = [ + SpeakerCustomModel.from_dict(x) + for x in _dict.get('customizations') + ] + else: + raise ValueError( + 'Required property \'customizations\' not present in SpeakerCustomModels JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerCustomModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'customizations') and self.customizations is not None: + _dict['customizations'] = [x.to_dict() for x in self.customizations] + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerCustomModels object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SpeakerCustomModels') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerCustomModels') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerModel(): + """ + The speaker ID of the speaker model. + + :attr str speaker_id: The speaker ID (GUID) of the speaker model. + """ + + def __init__(self, speaker_id: str) -> None: + """ + Initialize a SpeakerModel object. + + :param str speaker_id: The speaker ID (GUID) of the speaker model. + """ + self.speaker_id = speaker_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerModel': + """Initialize a SpeakerModel object from a json dictionary.""" + args = {} + if 'speaker_id' in _dict: + args['speaker_id'] = _dict.get('speaker_id') + else: + raise ValueError( + 'Required property \'speaker_id\' not present in SpeakerModel JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SpeakerModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerPrompt(): + """ + A prompt that a speaker has defined for a custom model. + + :attr str prompt: The user-specified text of the prompt. + :attr str prompt_id: The user-specified identifier (name) of the prompt. + :attr str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now ready + for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of the + prompt includes an `error` field that describes the reason for the failure. + :attr str error: (optional) If the status of the prompt is `failed`, an error + message that describes the reason for the failure. The field is omitted if no + error occurred. + """ + + def __init__(self, + prompt: str, + prompt_id: str, + status: str, + *, + error: str = None) -> None: + """ + Initialize a SpeakerPrompt object. + + :param str prompt: The user-specified text of the prompt. + :param str prompt_id: The user-specified identifier (name) of the prompt. + :param str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now + ready for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of + the prompt includes an `error` field that describes the reason for the + failure. + :param str error: (optional) If the status of the prompt is `failed`, an + error message that describes the reason for the failure. The field is + omitted if no error occurred. + """ + self.prompt = prompt + self.prompt_id = prompt_id + self.status = status + self.error = error + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerPrompt': + """Initialize a SpeakerPrompt object from a json dictionary.""" + args = {} + if 'prompt' in _dict: + args['prompt'] = _dict.get('prompt') + else: + raise ValueError( + 'Required property \'prompt\' not present in SpeakerPrompt JSON' + ) + if 'prompt_id' in _dict: + args['prompt_id'] = _dict.get('prompt_id') + else: + raise ValueError( + 'Required property \'prompt_id\' not present in SpeakerPrompt JSON' + ) + if 'status' in _dict: + args['status'] = _dict.get('status') + else: + raise ValueError( + 'Required property \'status\' not present in SpeakerPrompt JSON' + ) + if 'error' in _dict: + args['error'] = _dict.get('error') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerPrompt object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'prompt') and self.prompt is not None: + _dict['prompt'] = self.prompt + if hasattr(self, 'prompt_id') and self.prompt_id is not None: + _dict['prompt_id'] = self.prompt_id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerPrompt object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SpeakerPrompt') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerPrompt') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Speakers(): + """ + Information about all speaker models for the service instance. + + :attr List[Speaker] speakers: An array of `Speaker` objects that provides + information about the speakers for the service instance. The array is empty if + the service instance has no speakers. + """ + + def __init__(self, speakers: List['Speaker']) -> None: + """ + Initialize a Speakers object. + + :param List[Speaker] speakers: An array of `Speaker` objects that provides + information about the speakers for the service instance. The array is empty + if the service instance has no speakers. + """ + self.speakers = speakers + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Speakers': + """Initialize a Speakers object from a json dictionary.""" + args = {} + if 'speakers' in _dict: + args['speakers'] = [ + Speaker.from_dict(x) for x in _dict.get('speakers') + ] + else: + raise ValueError( + 'Required property \'speakers\' not present in Speakers JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Speakers object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'speakers') and self.speakers is not None: + _dict['speakers'] = [x.to_dict() for x in self.speakers] + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Speakers object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Speakers') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Speakers') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other @@ -1477,7 +2847,8 @@ class SupportedFeatures(): `false`, the voice cannot be customized. (Same as `customizable`.). :attr bool voice_transformation: If `true`, the voice can be transformed by using the SSML <voice-transformation> element; if `false`, the voice - cannot be transformed. + cannot be transformed. The feature was available only for the now-deprecated + standard voices. You cannot use the feature with neural voices. """ def __init__(self, custom_pronunciation: bool, @@ -1489,7 +2860,9 @@ def __init__(self, custom_pronunciation: bool, if `false`, the voice cannot be customized. (Same as `customizable`.). :param bool voice_transformation: If `true`, the voice can be transformed by using the SSML <voice-transformation> element; if `false`, the - voice cannot be transformed. + voice cannot be transformed. The feature was available only for the + now-deprecated standard voices. You cannot use the feature with neural + voices. """ self.custom_pronunciation = custom_pronunciation self.voice_transformation = voice_transformation @@ -1554,8 +2927,9 @@ class Translation(): :attr str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. The - Arabic, Chinese, Dutch, and Korean languages support only IPA. A sounds-like is - one or more words that, when combined, sound like the word. + Arabic, Chinese, Dutch, Australian English, and Korean languages support only + IPA. A sounds-like is one or more words that, when combined, sound like the + word. :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of @@ -1571,9 +2945,9 @@ def __init__(self, translation: str, *, part_of_speech: str = None) -> None: :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR - translation. The Arabic, Chinese, Dutch, and Korean languages support only - IPA. A sounds-like is one or more words that, when combined, sound like the - word. + translation. The Arabic, Chinese, Dutch, Australian English, and Korean + languages support only IPA. A sounds-like is one or more words that, when + combined, sound like the word. :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single @@ -1876,9 +3250,9 @@ class Word(): :attr str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. The Arabic, Chinese, - Dutch, and Korean languages support only IPA. A sounds-like translation consists - of one or more words that, when combined, sound like the word. The maximum - length of a translation is 499 characters. + Dutch, Australian English, and Korean languages support only IPA. A sounds-like + translation consists of one or more words that, when combined, sound like the + word. The maximum length of a translation is 499 characters. :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of @@ -1900,9 +3274,10 @@ def __init__(self, :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. The - Arabic, Chinese, Dutch, and Korean languages support only IPA. A - sounds-like translation consists of one or more words that, when combined, - sound like the word. The maximum length of a translation is 499 characters. + Arabic, Chinese, Dutch, Australian English, and Korean languages support + only IPA. A sounds-like translation consists of one or more words that, + when combined, sound like the word. The maximum length of a translation is + 499 characters. :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single diff --git a/resources/tts_audio.wav b/resources/tts_audio.wav new file mode 100644 index 0000000000000000000000000000000000000000..ba4760649ea3a46afb64ad02ef3f10adce8d24d3 GIT binary patch literal 75726 zcmeFZXM7V!7d|?>>eaF=_g=tM(>o#b9w77{5_&?1P(z16fCNGdB@jvop_fn-AoSjC znz4<$CEMy<+TFR2c<+1v_kO*fU-UDURy#B2Jm)!QX4ms<{~let-pyplkPd^pOqe<+ zMMnr>@LJXmZx$265*_K?W5B>TeC*MuYu_FNx(?LheeW;lPH5ILt7)^WMhzR1abM2) zav6I0-@pGM@IM6phrs_3_#XoQL*Rc1{Qp6KQbPXUKcPNCpa1iZe*E{}_o3haIXATS zVE#Yv|8wL&tC`S>=Krit{?9j|_04~N{@>sJ=iL9a`R}>^9VzrXbd3G@3Vr&|drCPP zqym-TTb6LtPgx=%Dk3E!B2X!j6B!W`ALVhJqaxy`0r(_*^y|Mqg+^rmA0I-WLf?c& z2ptdo3w;W`WB%`!|F!PF*8k_XfL8ypp2DL4So_~L0>=N32Ndr@{j=yT)L-bI0>8x{ zk19yWQY(%yB%HWlt(j`z4TYDL=+lL7qv2H&iNGtCG$8r(D_WE6Bn?SRl0|RQO!5Os zVOY|JoTrEAee|-CyrqPmC#TTcMcRsHkqWB9IL`$e?N5v8b<&ycC#`{2Z~7f=!u(8r z1CEb`f5|*jM1LR&R8Mx1P8d%|t_U8|gWMnrvV~3}J?SXIlMJ$ySx1-9*hT3RjdWxQ;Ye`))g6tJ~k&V7LX~h=NlSF9rRz}8eKzf(owVoqrQdROXyYl8BM@z3z<%*kiGOWeL@?u>*x@o zA(eCrT`Sd*9j~Mo#tLOw$3@Z$XK{f4150M~^!>AfCghS+Q z_~G%RK-BG2jhO368BHhlkDL;~1UVSNg4JrACnf=SQvsyj&{|A5`IX!zpVBzy9hpeB zlDYIAB2x}zu96Homee8R(dT+vjoz-%0w7XJAJ7PlP$%&ez zl7qA}c}0C->1?`~jHBTsli5ypkR$XJ`0_95MwU_sISSU+Bz?(B;-E{&6hwSKJiJf7 zhNX|GPxzN$AL%97 z+KT)Pj=Uhp=~i-vyr2xz4DafbRz%4RrG1E;62?YfBT~f}ZwLLIOrV=!@g2I7_yr%W z59B%!voI8~8b@1`SyV=b1NlfqbT=~WC|J4x*1RQOBUpM8#1tD`+nH z89sMGCJw+TH%L3ul}rHZe}&gFRG8oBM8v8tIRQ^2f$d>wvlodm1E_|6! zqeyLJTn^QMYg15*F4D!Y_5(8X6Xfk(I+rX1PyO^Y9gP07h?abXtT+Ok3ds_Ttp)C> z7U!8!@zo>_u@;eOq%Ue` zCixDn^<*FY7xPCG+!*;2@k>Qzy99QAj(%r>^-D+?j{Ji9up9lf1tOo}`zv%OxKtBW zbU1B8BIsmbIT9AOLtQr0nK&9i#jA;#SA)n_p_ab~#x+qlvT1wR`73<~jAfWBo0D7Q zI^BUvb%aKfn`9V0NYYVv+L89iyGLl7i}#tZO-vHO)%KV%>mf7u!={^v-!;rCnV9GP z2DVPx1(|E7!{Gk|WPBC!@&ai>Q&1BcV;)!puf_t|Z;`D(A(qdPK@2fth8_wIv`4OH zqXIq#KUjD+3)ugF-*+(bM$EluFoSjm%f2EzQ9m{#t|ySe^Dz&r$mc+R7Aog1x*3*y zj?c}IIU|7jFwzlOkVQ_?NX!bI5z7tW+AF$_oI^gXqr;IMh4c@mG5Xtu`m_*HE~H0^ zRcK9Skh6$L3P!v}yJ2NA9A`8}#@)kgXaI+081ocHQW6W+XltnfC~IMR3H_3eCduH{ zJk)6!IfP2-qJA)DGI+NE=rjkGk1;EhlXIjgP`HYkH-UC#o+G~XNiE>`EA35{%u)Cf zz)aDF&Y%}Cvn8WG|48c7=X5D*Z7OWJhXLB>8tye83UKsN&`pm(Swc37fC&Xl3f zjY1V|&a6l6`;L4;-DEQ2{|Hg+29|C^Ob^n3V0Aibz)m`qv<7DyA-k?&RT4|$v1+YC zhI>EGx+&oF2h2P9;A9cvA_CXNV1f_h8>t7}6=OsnGA$ms6=A)3kSstieK7O)qnluL z0bawv+heHB+v)GfhOMYl2heLn(jGHXB4)Jam^VvM*Z-jlz}2Q?8hC3*?MR2;4^U^i zVOCFqm2c=n#QP6g2#=4UVh;nBt1$ajVcegA|0JL{2yG^!l6oO`l8L)b}x2S|?Q8&IuHH|`+o&(b3 zu-aiTUwwjE&y1Q~n+zhCVdo9h6o%A8#9dhHZU^ofoOKC&e}HIBMJ?@x^+*DpgE{CV z*!C~D$QKTPT9l`_a%Fr4<8*A{ZsJl;4Gc_a+2)-aG$hDiOS0}MPse=&~ z)0dd9+`#;p@Cz7pAB^~%%8~PJF%JzOy|DglLz6IfeTF(YP52!CZp2J|3~_&hjCllH zMX1Wh>1Cp2`eNoYqcRFu>77J{`xCV$2`k_W7_|XCjQQ#rX27*zV;6X^1S_NdsEF_3 zZ9~*f)En9y{47Q8crlBlVFmXidgd@@AzeqYcEa5CGmid6rlML5!?n&c^z$9?lVasD z5!TP5$IzDs`L_Vvxr|x$6ZF;)Ys)R*=xyZOBCL_U!1M=jv=7Gj;JP9UvsW535o?V< za0XWA80Q|vdIXG^2Sit(g7ifN-b1Cx&`b0Ys_z0EJA{h+HD>%vc-IhCoW(4Zg_>%` zIn!X#F0gGsSqCiUqZf`ABg!K&gKtBv*iO%&lJCMge=w@~YkD}xQNekiVKW}t@F zLyj&$wMe70z~U=J&(r_{&oJMg0@v^1Dkcgu|2$;43A~VCjE+RkuuMLx)?nI>)}qUR zY!j-WUZGS-q$Xhn#&N-mUZ@UT$Ro@{QlPVnbYM23Wk*KB>|`>SMa&P(=S&+moQYwU zGCpPvvx*F7-Y|EV*-QuK1}P`~nDdxN^1(7IxT&JKd=fv3Kg37#E4dN;Y@sHuW%di} zgz=~&$kbrBi7af91buepi-I zI@`F}QrEVms(0DWig)%^!CutFW%E1PPvwQGPnCj9A)7CKC0i&{N>t1uu1jE-C)YLB zHPC+Ca@|_PBjFT$l<=dVCg+&zY#Zql`547nd9jpH6f1iv5+v&|N12&NY%lfz<7c9n zu0nNSk+-|+H|HPjdcH#6OqZ*2ZOM1#yfzcG3Q% zJE4=v3j*8Bw&FzvZHfcN_olbS{qn4ZwQO1Z0J@JGz}FNRm8D_Rbpyke>RLqn5%Dl= zi{cWi5ZZE%z!dLY$6(9xN{eZ2HtG+3B zN@|M!VQMkWnG#$9OeWbvXRbwHpf|zU+0od2#1nLAs$He&#X~C=yG61OhR@?aOE{kx z$hg~JN{eUBy4GgmI_OKo+l22`^bannl6`pit}t(6iMF)#{h6Fbxr(ZQf1&UdpH0(b zN3n$|obEp1=^IosJ~odD zXFE#UC_gAJ$cHKiXgX_JD2K>r%5I9sFhzn!s6jUi-*B^n?*hYp!(Bf)4mgNofNiNc z&iJJ0hk}ZdY}Z4@@|f=u`Gkqd7iz6)w(S#d%ciyK#;Fb43_HUAmDco4Ha2^o{rcOy ztkPA*lizfGQSZIhaw<@RpTQquzff({w~js*)h6;`^!B(Vv2DVe$_I*mri%h&9eVS? z%9CY{itm+vs9x+ycN}*N@zp2I#k0kuC5;q2Rfm<^6uVSowaMDE%InfqqAtu6Iu}>z zc3~HPoLk5h1iE|N_SW`+j$ZbD)ve2x=l9CpQylJADO8aUViigAGZGrz_~cIK)t@}6 z(;^`!dRp9VgH}4hEvxu6zx4HwZwKYExjkQe^?3KIZRL$z1^yjEXUPNgPZ8^4?#G^p z{W4)j^2Ee1A~$K)%VI>UxZTdt=3x2g!jXBgg>Q}5swP#d?ArswSd+L=DpFn6>a=pz z4P^)I%`^N=N2FCb_cZKt%t)o5G&RMQkZYWO8o04ZQ zOY!!RZ8FFbPNpQ)O>4Qb)9NnLHWTYfQ&%UF_~YTd#O0p%CRIWC+grJ7^8&9oKUw-P zCA($uJB!AX9ULeUsm~Y=$4-fT6mqLQpmhNmH)a_ zU-8n^)mGwf#@Iz>=?Fy~l}kBXB@IiCm>fCCAn5YdXB7LSo!HlW^I)^UIA4LgxATVG zY#U;|R-JA#m$u1Icy<33Q`*h9Rra}Vb2LevReyP#o?WkY-Oy%m{rn6eH9o0JM3p$* z_pI7cygmOx-s{}N>;VrKJ^1d$pLxT|FPIhXxqOOjkiKC?<+9{>!J5syFHHrK# z!VqSWe9mof?W+E|M4Nvz&sH$?L!F`~#-;WhUT5$;QzdJnN>ha?e^D*b6-WA`7ezIU zc%f^hZKjwZ$`K^Nw?4Di?Y-gq#>cp~+Be&_Su(5Um)k$Ic~|h76iBLf1a~MB4Xfgr z3|+&vZ8JOVYTvi{KXpgdSeJ4jwj!*ySms?(xu(Qj{Hpj?-qsiGA54AtWA?OsQ7JKX zv##_%kZ#qrF&v0$ozOMyK&mc&O|&h>5?!tH%fAsP)9cPv6)%g97ws!LURYAR&e+H@ z&3V+{gFa&oqI6a(X|CQKJ~-k~#Get342KL~>)UC{rQ?~UTu1*o?^gGE`wdGY+b{N= z*4C9LiiP*BbAQcWUiRA7JJ?E+6s}AfTCZ_yro+$e`nLF~;pMu9n%5KchIr+B%6V!! zj@bH}ca?2;zaq!<@^*HcSL@y$&7WM-xvIZ=JY}S2#R}b{=sB?mqL@f+v?sbw)Zd0< z`Y3gCQBJU?@2-2Pb*u5ya4W}_lrN5|E8RyZlu2y9f+wP zz05FDU#-cJ_Yij>S=>n95YJ-Q9>*x#@#^~(=SwP!P8U8eT2sEj65(4y#>oBQili2` zYi9-PpR89}=U}Zp8OEgcv1j$4sNRUL;@(_*;Dlp+rLSauk*MhN!gqO{a+~DOE*Wlm zZq_*``?t|DQM`1#;9m0;fHsOwb(E^r@6FG)Mu64^v~U1b-=G3jE7 zQL`vOnX>WP(t{y$c-lP4~WZ|L*$Q^|L40UlQ~SE^nR-BP< zlJAvompv7mnL$_|Kf(R6ZNlHd%ijCWQC4H+7Nf%StWs*(=osj`Cft)O*IYN$j*W@q zV@+{$$DlOjwCS#ky?@?(Me~@hA8eSdnq-l_iCNyglek1l_ZV*o1Wkw1p4}F zdsz1o_YL>Ap1a-#fxX-uK3}MTm12+#VLD@W{6Q+0O_1894P@yulXy9qz*T!wooY*V zWqwr$%W&Ie$6-$w?l)0}x@W}ZsKwEXVnlI0;v2_%W1mJ{&>srx8P-G>9X3m87yU>h zd6WMi*Y6gYsb2YkvS($NE0$FFjqR#=>n4ZUIn7+OdFwou(fZkrzW- zDSl)^@3=A1E`ufFPQ+7vl&+WhjO-uL6|5wuU|wYOmmMfOT{^m$FZ`=`dPSVM zk^QCfvS(0mGQGld6X!}N%Rf_?loK?2!Y1gB>8f>K>l)~qXuBwnN_@n{7Y5|MpFJ&I z!yPLf70#ZXHC~n97udreC9Rq3%trPr(I=A860d|xnn<3A?lL}7gGAE|?x|;{(`Dad z>t>x{ec=eY%)vv8996tA7{`*3f_@PX$D3~^cUEmj!J#0(yb8$b3QmU1A#B5kcHCfe89ivSPJEsk({IV^Q7oxqSHh13F z!Sjjx8<)>{(Y?eo+#BOR7)TY&xWq^zTuEoskACg0IMe5C&l z?ju#=u7Ma=EPZh&Vt{*}BiGuY+Gm<>DysU?+T4EIG0#&^*e|`UexX}rXb}5H;+xcQ zsd-6%CXS0wja?eKJA6ghI`w4P6>^X(3mywL^If+8P?cv~Z5&=cq}ci)qIhlj*OlGP za{E3H4HgR-0!@_T9 z%9R~uP1r0!%6%Rz^ZnwSVfL4AEAV2)s8HQofAJexh%t8Ykcj@8lzMCCv=Eu9@$U7BCNLhy3|Q# z3Y_q>aNM`Y`m6Ep(nrOYKjgj}`KsWxGe5UfY09fEaqROa36IER(K1Q6q>gACJ6`fy zeqC9lOi|a;O*b@*bm^#yRkHHwlDQ-&xGwlGcq^F5o#J!Jcy|yDm$7oDp71nq-22YG-hJJ3(YwyO$D8Tx=BajmZfjunn5G&v zm7C0oj^6HKf0QUWY<=8MDLYfQr5&iTrPjdA=_!q3eG&C_Z?u1C&ZtrpMdDNBcm7&% zg8!N0gK=SDoxCse)cMQb^?BPnzocC6c;cxO=tiGQ2Wf9=H)xJ&rfGgtFH&_;-_+g< z)9McDz6qZkF(~4w_O)~ZEycRGud9P)c~uAV9IMaT&~d`^uYYKuV{j5b19v<2khV+? ztCcpEzmVn0*2@pe^|D&x7oXSNN&8|qteiE*nUi=);i`coGsY&3KW9}w0- zqfqXZ#fg)7&i&N-++5SVs`7IA;v&BIeAPeBmA)VS9%iFvtG+mDYh0rwdGfr39&zoW zI)`Ve{uT3r##iQ?_xICU>|64wZy(%6rzouNMX`+kQ z{iZo8-y}&8%@x}C?${nyBo!!gFBXlkhI{foEB&)2uVYEps#fXEduH9M6IQ=uw5?)8U)M{}d*~~3;PqTbeF~5+@{p{_oceOt(DywhYX^gks;I}BI>#j#MiTgUW zEc3^DtLt~E|FCvs4P)X{eL(&zP4ynDK2tKjw8Ge;s+nnl@%!pBo6Ggq-QLyHwZPk- z|Cx=F)l;!yPxQrx`w_=`nzQt4_G;`*gIg_IZ-2#?&mv4*^K+F8rLn?=dKLT%7BqZN({=5x-C(UW1q(ehMT&*sDe+C(;6WpPpaH~XJw=KNA}#;;H{jl7;Xv2KG7H3w}QrS2cyYHZEZ zsi_&&33;-{&c&5~nnznEmkoK_@LAFQE;l~CYQDbzX@1_)GLdDJuswEkz2$WmC+&&( zH6z#{rOpPumMxKe9qEbuja8dF6&}jJky|gj-IIjJo|k>zzo=N@Gs(|J3Num;7*jdi^tAkHRXeUItVu$@B!A+-jPRzjIv?!Tu8p$p!1Uwkvs2qf;%>3|VyUZW zSzgpD{`sFz=RS1Zx%%kv`;5wZwo_KUcedg|qB?VV(qnxeLt4#FO+L>YteqfwpgtLS zUOB+tJ-_?wGjI04`tezpr}wja75q|u+j=q3S$ZMtXWe+6$uK`TP?O0Rob+kpkc_`- zeUmEEhe<5-U_h{y8_!q7SJtm=S@qsFFt~wjO|}I42FB6jqM@Rm>|@p;v#Q>z{#AS? zJ16ZXD&S6gO`fJf$}J1L@~-j^r@xUK!BOsBjvt&|T&d1*yV3Tqs|TkQ-4!hewlKdb z8d0RN^e5ZGzK^Vmj82%C73^y2U*4sv);WEFmW|k@8_)h^jVS%Tw6yY=d57_ff=MqM zJX-N&^;=W{mk%ajnbeQ9y=tyjVjEa=)K9Flpo;t8b5zi^ToKF z!S9ORD+)}-AF6m?u{cO9L>Cyye&!+^+&?hz{xZK=#J@qX=^YOgeRFpqUS z_SF|gFx^E{#WTf4WMOc!udyfE+sQlLTj5Izwj||jJJ!aDocYG##VyO5dQ_^8u}r)% zc3N8FW^C8*I}dHZMQv7y6&~#g?I6)1*UqXpRX_OynN#Qm7HgAsm zo^OS;eq36rHfEbbAvvwVW9Pi>J}rJu_;wti~$xffzZS=YMWI@2?QKR~Z> zf4Iu3ADF4DL*Nc~fWIL07xz&Q)$W(ip||{N0`{QKFZMq39^#vdwbFAe8;jNqvIsM)w7F;YnSzXte?d}vDDjpM765-OUk&87q zxO%y%maW|ViF>*&yZW5%xNki3TpUmT@R>a%_)7e> zdWOO$-XQKJjZ_p!3WAs1jeSG7Ny7f%I8Tlz%WvXV3rg|_yO(Li?B+kkJX|IDLXyp& z@o)1?@T&cty^B5j9EsMUrt{_hSXlBxS1WQrc!EAMS=6|8J4?%nna85PiWncWJAS$T zdv3c)h81hEQC#%CPJUSux_(DJu?1^ zs~`21;p_0dvLxR{b5_+^>t1I+dzZ>J6}`>RJq46yThVmR6l^b2DL1LRDRdH_I6|^j zW|q}vPkY+it*+kwaIUF$m}`Y+k-rcAnsGuXr?-fm-^dMQbmBLX_L5mbG;a{5a}NJ> zcPGzi?=;(ivSX#S-0kG^^&Rz(!m17T(uOy^+iFIGij-3kRQ-v{pm8Z0aZ4>Z<%3H0 z7aL18#k*fmdTx7}lYh|S@-^oh((7z5?c%r*Nzt)uBNSm`U3hGE%%7UsxbtwGU*w-= zzfwBwUEW(!k*{KJ6&|n*38uy6%mD|d%7oC#I#Pj(zfn&m7>}k}1ouq-UpC{KpGWa04&L8O(J3n*S z9VL$MooBdCnz>OcqUXgVCyuFIx7q7POv=virs^Nm4oyIoL#v&Y#-3$;O8zJwG<$?Wtqhl>h$C zz5MMJ8tZu*arW{T(Dt$m%D$RfntxOmRf9A;;@+o_WL3-s&2p_zcR@E{14|V_qqS1 zP1r?jnk-$biJlT|*2YSHVK%5AN1cmER?QSqW;c69XySC2U(D;3mt5Mk@-pt)J$2Ug zToZ0ex5!>7GNnx=ta3zTMe^O`!3hrx2XqhhUc*?OO1#G1&a%>-Nt5VCkE`mOInVnh z7#@&%9QJWGvF|rx68q(L@f6Wq>8Em!db?tcIGX7$%x7-1N0@2CuL9v^-frF<-Y&Rj z+0Omg?eZQF9Q zzEU|+(N--F-=t5}nY8WY{UzDLOHWPfkL8Ce?pJ(OezUT!HP*Gq_jj<6KTi&ex=HWI z4$1FoeNl<=P2((4I}AI+uZKU=zfiOz3w*oWO@rTYPdJrpuXBRi+Hc## zT~~eU2xa=n`pR!9*J?It_o`ClW7#Y^nbr{RX1-+42#ayQqBziwZ|b+ZI@({`vvL2Z zKQ{|3V1zG4Hzh~eQ*;}=I%Mr=?e5QFV>FTaRr*HQ+3=i9WRy&M+{JrGgE369woc!y zU76PSDdE$#UDdqGFK!~73e0zGwskOnsJv*bEYRlt@TU4hbfw453p?l>#>*%ab+wHn zPHKB9(i9FA6LBf*PS|MmIfYTumhWa?QM#efQ1Z0sc1fi%%G$-(fm_Z?$Yl0Y**STo zI8z=THZ3wIE;Ihu_?q#Z<2A9PbvcrKRP68OzUGs-$2zi{6P?BGJl8naN!KX*A?rW3 z9^SXKTJ(@zEAF6tqLV~9b;sn#L@jv(_lSPUI!SNtSYSzDWAJrwmVdbSfIY%7$X({? z>UVnQ`^NGz_MPaZs4=-7xZpczukVz2j|Oi`C#pLcnnhK|cF(v|`-|Fd9 zv93ThOS42dN;OW@Dmd7F2lL6KilY@9EBaQpv6s1)1upR}=9XlF^q#~oJ*E-qo<#49 zPLKUPZfjh1()on^i2dS!`3hl_-|Ks4cUYQR4qKKv%A5G^MNDIG0K(h5ZNa?-g`y| zJ+zfLU(!ufE12Zj@2VTDEj}yXuU!;Y67bXG--|N6oD)&pds^J;E~*>L$E54Am{weWm{;{I9U@HN6yO z$r@2}y4wGPldC>iHmvB|;>>by#Z?or9rVv-K9e3)yi=Z3J=W$##2DoI#Sy$A5PLjv zZo=)TA-YYP)^dhP;oo>3TPjS)Oh1|1*pE8xHj%l&{M=g8InI08w>9`gC}k^@(YkL9 zHDaDbTvSUGyX3z~)0qm~Tif9}=>EYg4^#*2-X*RIM<=@t*F8(@j~okK9lWu=Q{L&G zD=r#bAlno6QvXH7tf)mv-D~e}tZ4Q{R)chVY_q6Ik()IgNv7kt(NG>+HnntInX`0D z!I4+~Y-@f>)j+%5y2RYc`AF0{A};2SsNPX&G0Cxaqs;pLdQpVs~B3 z*fPGjxNL!Wpp7#>tFG^O!igA${ZUd+xlxxKX^UcF)Ukt-YNuo*E{%_lj*Ljv&XSE_ z_68<<<~v`QuN$4l7z=A@T9t2{U_5CSJDYot;I2b5&6Vy|SA>l+jE>x=(`x4^y^3Gu z93$Z({k?*xxpqOdH`5X88tavKW;qP@wYK`USf|^4#Orboa4vD}_D6^V%GqI$bXNV5 zxY&&LSz(RN)LEZ0Eava1#OS4w*OaG%!|hH>oUOU}i1E|X^##(mO>;WDSzElO@=evW zsvG9+!I8@DVP^dfgDG-i42kX(xjAZR)Exau<#oc~uNg^RqvKRnBh%XIe=8qWZ8tSC zKD2an_4Ac;%Scml9cBtNJy=T|B$1?9_3RMH`Q0?F0G-9JcX}F{{$@YntvGcHN;wvu6 zd)70^&3adR-Z=`aQ_b@%QC6vSnptVP?i_%O;%w)9 zYj@2!o;);eqyCt(iFl2#j&+FfVQKw}YU7gP{;w@hx;!p_*VWk0R92p8oaoev`fBfm z4>b5=NWz%p`pF#=1F`E2^TM9Wr!u4XFZq_9KxJXY5d2-UY32IzO%=ZyW2&dx175cv zVr-&w>JyRAqbEi_iQbqvE~Qz@z{D-FQw@r+?drwy#mr#8$@!mKS3hV>3_tiPnIsZNk~l_W@F#eQPrwg<|*a(4rl z!SU95(R$gEZ`p4>YCci@#N6C6($T};n7yQIulqS-Mq*UmjHc=qe>S;Y|8UJVX};tm zG4r(wwvV@u?f1%U<-?0-71YX2eeucD<}cN``--M~(0=Gy-q%^ibX3jI*N&PU*DCp3 z^4H0pluc=E64Ue^*+RA-vsb9}+_&h>UCh_2FICR1m}op<>}6_Db<=X*^E>yH-73GR z`6hC2?6btS$xRbe!oN_Pzh>-HzOX4=$6!o58M-c2ea;9FD6_GuNo7@oVO) z)Hbo-g>O^0QY*z4|6uFj$`RG6rpx7xOWKuIls_%k82yzU9QwcoB9ZJ>jo0^z-XH&a z^1}47$rln@#oURogul|1tLjKH7_8GgkDX_%gDuUgMw`AheyCVz+-f>%zTzOh&Dq+OUVN}DvRW<-{_@qgQ+1WoT1OG1GLTWGw9;_}BA|{g>(_lc;3z z`=4Gpv$wwK_x9r3zHb`j{ZUbF>FjGvx+z`y^w@=oHxs^3d7YM*b|8M0Zku|ba-{qj zdndTtbJwxXR$kT0m|Jnq=rd+iUNV>2xBBYwV`+a;BUP7(gHh>V{qX4X@k?V{MkVVf zYgeh3iHDLId=$4ZaL?V^!Qgr@&UD_iwd!@{HOowUPw!Q}2GL0}WMRtlsw$;Yu|nES zc|*NGJyLO1!pN$n7V&1fJ1{%Y*|)&6#r3Owux+5ZLAArAsR*xJZ$0jDaDGuO_43HM zsiSL6t$(-v%i3>hH?CnxbVPlp9VertG2hwW+r8GdxN1zfw3z#lRp5HR;%&RvPhT&4 zzq7cO$>^BEPY`ca&y9Q%-zq&jZDPutq_eSIBPH6$^4_9Mb|teTSm-so#(C;`Ry*4} zN-dAAe^~Zg+Bxz(z4%FjlD#4AtNdDbIzkye%+ODNN}H|ds0J z_yU8yV?7a``}V$ew>82s$9c?o-@A-o${Hk3q$RQ`sx|6rd1t9cGECHh{g#2qP4#>$1^ARl%1n%5%+iMk@RO7hch>)n^Kx2G>CF(W{G># zI)0zGo#&8anDu?-XH^X=FB|_TcNUTlvI1%0gwn6d$5%eJ{^aws<;u^(w-~NPOX5C> zONc3p@M(`J?Xq{`nxcu!P3$F8V*lV0?D~4fb@X|i;f_4pY}=R4G>_8vbC6>yfdk_2K zf)YAhbXVF|_DEJ+c2PD})?f61nMbCRxx!ekd2pYvR&ZR<8>r$=a|4BEP%HG2M5y>M zO~g3EM1w9`8Cx1N#GuwrQEn0cMA~z|`)~OgxW+nUwh^}W_L5PJyc5QbI+Mh6w#Zf-gEl-tTR4@3mA z1C7aS$#dCY*+JQQH6J!Ca!c$tasHTy$j;%dG}TI<6Vk$e{J(^ z4;`#q;~D4I2UCK-1?vQU40wVmLJ{?lx01*5fyzgUU5dJjSCUQQDrUOCV71uVamTsc zd&L{=uN_cv40SWDq}L?t#e3OX%rTY>&ZsR_sjp3Pmym?v{bB;bP?|& zCVsfEMp#2Cpq>gl#G)M6UWddK?C1V z*u)+WO@F-~Fq7ll*qB9n(*QyJ(_Zwv(7jc8QIWq2ha@#^NKACGs+vST2%J zmL10&`<3{tXa_r+QL^*c5sa7a2ew9$c zALGt&i}`55O|zL;ajy6~7CW|uVca0JXyV)LpC4?^A7eU5w#pBxZ5pgAv=;R}<#@$p z`BX`U=pK{I%pgPf;^4txZg5|qov)9t%-=lt(BIeJl6xua6V7wV+>2mazL8KE?9TCA zI#1|8z6qbn&%tc?iN8VMlOPQ`IS+S=W?`u~%Z1 zKT~`wZ!C+HFOWZzE@J0kuke1nK48asDRya=3Tk>rxWxD4V!89&Q2s9er!We7p^+pg zJQv0a?}gL6pEGlVa3y;zXbOJCo#4I|jtDWtz@~~0Nt(zdiUo>3iq47xnO>p<=Xlyh zDB|OwB6MO0v4Qyk`-^XbJ$|lna9;3(-{QaLukYU!{FV#yi~0E&bpt;G_e08pn}XMa zMZs{60NGTbuiz4D<9gOgH`4EfFkJu75+=gdF;GnOWinWfT`9gMj*(c!YAKZ&WoKoR zWo{|*Ri=_wiH@-UFngF>RI-3@jkj~7xURwk?5~axY6*?8&;OzD18*1dg{R!npn-eI zSM#H|E?fcUA!zo$3Oo#62p;nf^gj*s4=xV23Kns0?jU!FJH}P; zP@NGG(}bzdVCica8uhp!!i$53DLxp!O?-v!ZI4^>ke`ViQT z7vAv$`7Qhoel+e+9AR#;<*ZEfDPmO0wiGpG?80LqjhtlWV^_>OSkz8bF6t*v6qmC5 zv4edlc9}OO1LzI97Q6C)#vb!p%wLRwEu-O3gB3t2@S4vM<_T5M8SJ5Z;OjJK5?4_J z_H|bwk4JE7{ur<3yYV&oyFwl29TU&q7fD1*MPXutxGTF7ng=&_j8~$sAxwML&75E| z88iBQ!DsOexO3dqU{0_<+KQO&jE%X))L=xU1-0`=?l{+)tKdZZV}1}?dq@#>U0j7) zU?Dvy7=@F9T4*Hn$DaBavV*B)yMirh(O;ri(KpN?(h7Dc*`DMt{B1{rUD-#lTY3)m zVgHV^7ek}h3_IiR^EHI2&;%rsFQM8jgW_iwc2sYm1;XD#C+zbOK}%v029f#9C(wVK zB$JtQz#NKCD5+BE6yc?COb`UKFkEO(jliWdG$!dxXXYq&gWqE=GS$pbtUEH8edHlY zW5$s!(CBujcZGU_j~~JB;S2a)LS5{E?TX##0#v!NP}q&6mxZ@b6nsHn2n=S~lUW()#hJQ0tlJ%vkIJ>%lyN zI%%ZvJ->`^3dCv&O~8Bu`HBt^gU77P+|$H6fl6pf5ZGbU@R^o5S7KeQY5p@y187tw1%d#D9&LhA4S(*Fb|B2KA5}HS-8P0qv>?dl!zQVw>q1=sx-~TbW+WWoStTKmj!x z8jD$i7I%&3@qKuiVB|LlRZw!MLOM%k3p5NW<`cFByBrF(TBMTHWgbF((4X1GtR@ek zW4_L8VTM5CF%n9(K~SuuqxwuiES}SW7(EQF4dYpy=QGhRAXo^q6TMd)UHeue9p-csI-WS2_ zPoXN2ieDv@T}H*I;TTMt%Wx%1_W{SJ8&_u<$eelWwDb!_z`2 ztd>D@bQZSf3WK0I83Wbwd}v*Ap@wlm0ooFoeFWN`XsAQvsGc^`k?D#mU5#i?#<$Jz zY5{~Yp)RQlO;0zx>Vhlx(a%1*0vtRHY%amFc<50-1u}l*#5-`~Ccb+PRz8L5{4&%f zR?Iz*aAXHmJO4m8bC;T+~Ub>MRB}O7T4peVZTSDRH(OeT6KnfM*u8%7fa?35%qdO%3QT`J+#Iv{d2DP#=-# zEfor!S>RF>^64C6_XFbj5I)+`{tZSCKs93n#!lp885CK$rj8qAwq#jD7&{F}FP~P4}t#!aE z2@sMYcC|2C8piq@nc5%xa)BR3Kqd+;c;s{fP;Y|hor4N`8LHs>kE+}nXd^?N4n03W z0;K%Vu7#rILhl}E6FVURPDCVvvh35KS}e%Y$b|pnMA1qen)h zf)iR`6WZ;36c*|6%#4QcTLD&ubktJV-WhXkEF!0Yy092FB!A4>6vWR8TwWtakFY9H zLX+x%#`ItK-3GRZVPO+6)DL8ffJ;6qO9uGZ02l~(Cc{xw^+))|i1S-R8`>7$m|(pd z=LQkSH1tu3{CNRB1W{3=Ne7@9gBtY^t*nS(GAii^jAexNH-Tdi*5TO7*Q*f#$lxvyrx+!drhAp9|vAA)z z7wu}GpS5_dL;+%x4=lrgf&$v;YQ%xV6AZe;yEb5e3K>^`{4T?BC$dw5|5^Mf&|fmF z&4>MOQQ3H?=u&Y^1wHXwFeLy#!Vrff;Sp{UT0An$D9`ljewSa*YBV0#&4O-T~D52O> zWDNx)eQ0gP`;h0^@IwTAT=3}*;%kPLS%_{M^sGS3NX)=G*dv1HRfteqc%T4B?a&%B z;IauUeu8=NFf28|;%ekcGgP$BK*EIllcH5QEDq^_-C(O8{fgm90zQRTx!_dXxuVz6GQ65uwmAFBmL>RSMLM6ma?zjQId-pnX8D4Yc@UFak84 zu!)1Up{LUr;DH-RD8b=Ah+-HR6UrzDdi@t%jD@nZJ?5Ass8WAJ21S4$_2BV!DE zTcDEkLImI8bsjV8Ta42NxseJUg*SJZ9v^U_po@ zWr(Q>*2IHv31Fogh;rcN{g3vy!f3HL^9eqQfNUm^t%1)zAbSzVLNj<27?uEaya_LY zxF}Eq;!tU&;EMvay9{xv#uF+U!Q0Tx9*4?g!b925Nx+^EvwdJ(XpNE!KQj^2bj+7wh=CH{c)`~s#5f#? z!~$oUs)IE_CcIDr zHwtTc^cjF1ZmbwAA2TAR@PnTnLX0K(Pkegw}8&4)DNI2~H@0W#|cI1QlBb zt4q;K32>2Pj0D(hLZ1#qzY@NyF=}XJE&2#O4~oHW78vPZZ|Erpp&3GkPZro+31$_* zCo?e7!l%TKJT~H-P)tj~u$u7C0DnSgi(st*$FnhV=xGuuuqgE8GBv!di8zbli5YfT zU|lGpL0qB4z`qa=L-_b%uM<9rKBDCW!Xk_rkGRId>pB=S0E>NKz60Jch>8>b*kG*| z=Y;qq#~B>Pw%{E0qgC>cyw3kPq6%Y~(LeJMQv!P_RuCLGs6sUlT_J_$FFkw@y{f_S z%8y7GFnSz3)5E{o$VV+AOt6Y7|2VI@(T4&k_z?wy-&RC9`(usMp;f4*6i*U*i~gU$ zEW!w*Y|mR9 zeuY_d;D?~H5UWG5MMBGXoQS`*GCmfA0wVjw<6AKTf;LB@B_DW9tPRl+60=&tjRbHk zA9C^*@8sjp2hbCPnI#^gJbEFkP7W=rm@ft+iNHH#z9aA%k%s_&NHlIkdwz^N7cfkw78gIuEYtc#>ci=0hI5m^BH1Q)r*qR~B>_ z@N^zpbfX8dav{El#5Qi&7#Uox2-=OHp3FXt{zC8!v7cnsEo1EE@jQ#M6DllaoC3o! zS5Ew-LC;<2k=P19=#XLid9;-U8f5S;3L|IBB*2XSg@JYrpAzp#aN+{^zpYGyBxrFA zz9*Kt5Z|ZZUlo!2*WL16^Gjloj#%cv0Nc>w03K?*K%r=d84fu({s2pfrKyPG*NPfqLHc94+=rh3& zH9>-iMtd;F#F`SDM*1VYkmx27Ef8Jz;8~*YWV|G69#W=X`Os5hF@k7;*gOqX5RXWt zlvqIGjdZk1ddo-0WcF;B9};(G zVRnhF)*%}tRv?iKu|LGVXwVx9zY}^bpywT)Ct6G-oQ#a{L;(Fp@Qb`v8S_d!1Bt2$ zVk@zcWDbm=$pLvJS|OM5g+gv9h_OcC36i-A;yWisLvn)#&`Nk_K&w`?MlutmM`9hw zypoJs5IwUP4T)K7Wly=GZ9#A>4}C`?o+TEH#BE}k)D!y_ihhWVCD!i~{%6K_#MT_qJuJ?E(T2`k|wPbgEPdUvdDZ8e@oVPJmxK=O#dRm8^Tdy z^@w$d#=i=l(ZQ)w%qNL^HOM{Dc483%ph*W0jo>heLy0XRkrIg}S&W0&L1LSUZV-th z_L96$-qXrj@PJ#y(vfI@pwNbZYa}zoqF*P*LOvyV6*AUPjNb!lNi0aL4v|e_Pf3=R z@Xv%kh+Y#tkA+|Pi1&%46HBg@#dd{dl0hRvAbT6cR}=Usqq<@ktCUZ~vAn^@B z>~i8c1*0YN7=T<69XFNPSujR}!TY zN&VjwvG^tjl-TgE4HQ_RS_)5=>!YKS)l8@SDUd#99(-{{ij#z!eg| zk_aFd?U5NHt5uQ(BN=%S?GOr7&_bj<0GUgG_Uw*CJc5sOdyBVLsUBz7R56Wu0pBcYhsT;it{yicM~6W%A0Ea4Ne4@nP zKzugondrKRXZ~k{$ZkLwWbS{yhiHofPmtJ$c!vL3vr^FDgU=>*io_FyPZU}uBP4c# z#0Es#3D3xgi8mryG~z*sjFKpw=n%<33K$Io>qX?3_~jsYLa=p-CemeNlo1J;@D^F6 zlem_wrz{vbi3Lj0rv|FXn$QB7j6q8zr)dVY#5R(3bUe67G>Oa&kykQ{QSe_%ndE)| z$B2ggZ&mp}OR0f7!~*1j+r)>E2#d%a(O=T02fvXq8!%UdHWwsIE7KQ3C-E;tD#<($ zogr4Av`9EaMyz07$!`HjX9?(0p^0W_Cy^d99}31p)V-%# zvA!f)CYeti|1ZJZuw|aIBD^@sd=jf4#OK7Dlc?|mqV5tr8G_%*tdq^Bt9f-K{Kc(kt12Zl9-#&=0&??^`jx1 z7lywHssfK^NOYNuStqN?JoHqAAL7AC#w!!O+aQJU&`Og1i$LpInWu{^(|sb5WJXHy z8Sw{XrbImF0_R9lkh%St5SHPBCK8n zxa2|$)$lZtgCL}X%rx->WW{2`Y!gpRRw!n8cn4OpkMVpGM*9l0nTSjd@qae-{=aM` zS+RaXE78b!HU=LF#i6iXF7SdvPQMa3C*XUsM@ZI39ISJ4nWcY*e9~)31o6JaLKI_^ zA>ajx2T5d0A|Dd77l5P0#*@`4+0`R*OJYc}ii<{n#8c+rM?!pAtL!}i?-XE6WdD|| zK8o>Eh~NKb&xp4otEM81FdVu|tP6?h$y$Qs`N%$CC?an=_(^skNX#6M^#qAKNK{2+ zTPf2j9{&;_LSieD1^S;{5UWqD8KFG@OJ9s}6L09ov+0m374y{!KFAKaBsTLSo+CS} zCOkuIFY$^>*_w!~8wrAJ7V;%t=tKhOHR10#_^$uJQ4+OR1bxJcNs##5GF}o*C7ekD zRm3L}KS|;V7e-8Q;z`zwWSQP!EhK_xWK?7?k%7#)z!PF=h;1diTO>Ll`IQhnL!{jW zZHU6>NuY!5=4F5@B+e#SB2}@*CBBWUl!+%}KrONE#2(Z}kHlt@IEcg%Bu?WYU)7OE zuMXaM%cu{<$jAzrNCN3S6aR+c{S@^52K|r>6N%?TAtip~F788uR9Kjn$as=pZkdG+g?>arnhUWuBM}Z+WyF-_ z$`$nWq>Kw>{aw8*n?`o3DuEZduo7gR3(>0$bK^h`ob1q&EI5%VD_S9$H?p%xvgB3q zJBbMt@Y0K3h9_coNJlv2o@iNZjDql(%;N)$h3vm4;j_B< zAygHYwMO>h?T{rBPY@n{#GI2{Rw^_uxs3N&knMbMf#^dNxJEKWWZhjEJSF*Xf}u*{ zR1%N4!5^|eSb#T3L>B>lXaybfLDKUussixBjM$#67zm$;3@T;sdGR-~Xa+o;inf!` zw+n5MSdY*}ctAW4v0()3)()K_>p@~U$j%kn8zXj~$a)DzmJNQAom8@C84IdY5M@5Y zC0C0cVs_|thT)1Y619c15b%pBI`YGS!@=DXX5ZYp`XkKnI#syZHZ6GZdq;_ zbu=slSwqB@$uP0m6~HMs#`nK9qY3;b_PrQ-N_2o^yNFE2W4s=4h4@agXRe}k8G73q zKNax=$-SgO^6x_%Nj~C3nYAD*GICC%3G&X-kaaTuB>E@kS0bT_AFylo7%h=8#z9*u zVu!Ff{tW{~UW|jp4P-}zD3yzdve#6Q}M?8v-K8eRF0)NR# z7h?HDaO^(pX&$&iq&ynBM^1oL!48cb<0qpf>vkD?WhtPHtno=y;X_XXJS@p+lNB>r zwN?Rz96ZuJ?0e(_Gq(wThzz(f4w8*0J8(n?lrrB`h?aBVQ;CnXfhw{dHGvc490=K= z&cPcbgF$RG$zK`DGLI(YuE|*rlB*{9O|m}x4;g4+%VH!XpB9UqwmN7-1vft6Cku3v zf601?#GW6Z=a13)XTS-*iBq@Ba7thZ{C7OgN&l!%0OIUwOQI$Ed_1nU}v`tP={*(%Xk@1x^306)l)dtXofV`0|oc6 z|3p08Q|xLUgB+fwY6E|@Cs1S0q8gz#)fi_0y*Oc5LY<(iFt>3FNvb|U`$a2{(}(lb zBt?|Vso~mtg%Y-xq~-pU{lZn3-9R2en!Uw$)Soihf_@Zj3emB@?Ww{Wnxi3)DWaL%E`j6;3ct-@}F{B$`nRP zE%jqKo7zh~>xs-;_333sCAY(JtHPps0~=hbS-H}bq)&E>ovgc%$D_G&) z;`^o4Q_woMQ+~^m9YQar$m)!0S?)x&I;pXhOc9D4nfv>jWv}x;QH7GLz?D$?Q*awI z+UNE%q38zx<<^ zElc_gRI@kI0?HX`hw-u1XIet-5;_Dfh_jFeILTM!j8q5tgs?>%sYGh$)wAkdsu${e z6jV6B6`OhomL4sg?4DTiXThrMS$X?|)5A7ZZ_#LAeYVMDzBa;?9Drn=3EP_qX$%_v|nE&1rTwkk^$b7x039J;6)MZwT2pF? zwpeMdKBOczn*CZ`?>}1d(7C9jorm>)EZJK4dBGV^jRBdal3;0EPg4UOvWxp9yC8>A9!-!^<-SjHS=-cJolfN%- zcJcc_7F#joUgWljefB)Vb5zz#R5$Ie*fEe1XrY{Dy{40dQ+1CVp>!A-2eG^D*_qwG1lnNWuC7A^+RL|6GIp+Z2VZX*(5lJ;hXQ zN$QNMY4Mkh3Eqd98E^Z(e)PJ}+nlt_?9*-_U|Xz?$Ci408lB3X2EyA*ARg_H(L=v6 zwWKDA-*|s>_AI!WU(+>6d}){-wl=C>#C*G8{F0+_Pmf*cEldhD7N)85S;3(2jp=>z zkHRRiwK|;^*ehHUev;v!@qu9oBS>q#-jY_HRPh3(bN_MK^eyeB@>0Gg{Ut2+7nXJ} zYLt65dv4(^;jwLBxtX;qHJsMqV4eLa>KhOrW&cxcT+%$d`bS^d@lRW`R~2k19_Rem z*`l<4;ITf>Iyt7We7EXXQU_M;o6y6&+TS8)+xr!7?z~O6ZAV+*yy}zC-QM3ho!(F833H!Oz8bd9zZg;*<9Ers#LnwMv>iPIgOqQdeb@p2Tf1{a`(9 z6-@0ogT6$X5X|@A_CE>S6I#fN)$;Ic&v2L25%nu+wa@MxmwPySZqY=!Y3QvAf7Uuv zw_cqtwZznI)kdcjJ8p`9=e&6{?&XHJSXX|=h~dS%ze44t57&YH#5cAzX_`pbvrr`VYH2-M+wb+Q~iu|QoD=WJ>pbG zJ|eA^TcM_+pEOnaq+a1R+A_m>hu3thFrJ|-vNgEJ*VQ}6Be)lN4E`fxf|cWQ{2P-q%N} zt@WZ0ahkEv@}u1r+9!NtgdDysl((mtM{umRJf*0~alsZ+OVz1y`V!RNOruKa-{^7rbES^*MvLW+nq%#K>~k!A_(VNMwEI#^ ze{*IQ*LNLq-wRw-zGHS93XMv}G1tP}ysXlKIl@Nk%A__ma_jO9&euO!*PJ>n z5( ze;{=5{pe}rndTYm)%<@*A{AwY;W=|CZ^^k(#Kh*x0#`>I|M5r6x(^ zg=^x!Dvvt2Z!BvqX~uVqQLQBSeUfLGyJ6|&(qiv)@wonsTWGvy@Y3zXjjqz1TG@As zMCrNh>%a*7c50 zp$#J{MfZ#AmQW+!7?o~o$Td?Z2^V~CORY|+aAfgu_js`}UD5cH<&E{C^}MB-`4=N& zsLYk1cCG=U$q~4zVUto$)B=@*(?m^qNd?&d_(6uVd=_h_2djP|)hCpsIZIsCy?w=_ z`b2KL!O!6wudu76sNjdfexCmNO8cs~nH9!VpHt^jgT4(SYj#P#V5=k7b{@$7J>yp9 z_c_z^CgqjS{WoWA{!Jr$2vt?e(sACF$V>O+6MRo8Rqdh}!)5iFK3h$paIP zMsE%qVUMybGYq8%4ylj!T2|2V8f{4#%X>DIwErX7gmjYHl90 zL8pO7qy&x7@BWl9CDE>8M|;tb+WWD0_6iTJPosW-=$ER2_ZsNj1cZ1I`hmeRwX z?ZRfN*mNY+6SFSmNY%=9PByqve{8BbrA26W`kwb%zLLHx?Zvyr??%6$^zlJ<4Od5D z3}xrn8WtP28TJ|XnWxzHhx82pDdLZCDWtru6#mbk{N?GMU-a?Ko35{;-*ryEm%qd_ zP^rQ9vFTwuqg%(1O5Bk6U;L#QCbD*DdD~Q@iTM#|dPd(j?qwwlfV?uibd&e@U|XdI zUD+_f5@$bcUu)ZBSz$V9*u)*6AFH9#v0&fe8=;51Uu}b2!%&>f?25Xem0E;yQmEzs z$1~IO-n+@)Be2|`>84bftq@rv3Ka%m}iMgl9wjBVh)6x9N${1;^{vqUfU@* z5}kqS{-!>MZ*jrB@a^%>!awe zf&Tgnjq^!LZ*f|nz<1Es$G<$VIJhzx5$Nu{UixpT<~yt8@M|0^qhb@LmTO*RaO(A1 zr&4cMa>w^UK?LPv$ZavZnW zERBr09Lt^NGWb<|fbJm=@P1L;BrhvV$_UG}V_{s)J6=)v(IJ1u{FJmkB_g?Q{IIAl zp~oz3xd`oom=#omH--9Q8*#ljRB9+sM#^Ec6fHkgy6Pc}i@RX_U|wXYXl`h@&TQ60 z)jo20X^yy2JS5#y0{TAIWH`bPViu|Ig}?p7d{?~NJ>5$~oNQ^x?hQUHT~rvK8=pNhdvETIg8j}to_=COW`ylc z)UCuy<-*D>NE#B?D6$z=sb5nQ#VOwJN+!EHmA?1>9kj~B)oOYnb(pElmFGM2zjGVe zZ`@d@T zwsrc@!nXc3T1UR8d5TrEJFx-(VL5LZYYsP7=9FutJ?4nPBrqmq6c5^G}-f7Dh_R`SU$loIygimq|wB9z~GtDzJ zr1uLEu9sOOX63EDSu%YeV6V`104>II>tt)+4N?;$_Ot}Pc+?N;sW4XQM9 z)oWV)Wx^(FTls-=arTpsZQs9pbL#c&Hy_jf&RS5c`QK@4`4Dp@^K#?ghA&M%>!Xmz zVb{Xug%&!t+yAq!G#z70l`6ivg?Z^O-rangmo_ldQE;$yq3A?HvvpkO@l{bnD7KR$!&m1d6(_LE#>mKIs6ZNK6jX{&b*^1 z;O3elYPA-w)R6?-mGM3FcE5C8xu$)OD!ch=U-@ycoj1)T6qPUTQz`_%q|Mf=k#&+6 zRoPlotTVW7)mnL#-^IVQ{jH8JeVEH< zf%(M$Y56wfO!(-?^^vBCGa+{@N4dFbChqM?$awYkv)9452^s#v-~2iHThrvw`_aZk ze@bTg8|5w~G>qWP7xd0z_khcPA`lzg71-&I^oImq3(u5?)FQSe*Pbn)_tP(#f4EwP zs)nW9d3uf}%6U=+dA|~;{i9Xajw%D>Y-xk+mOCp;<==&6zL=8zg?$Q^6gTkM^xl^C zkpmKctuVgYqncalG_G^BMzab#qAD5Z3cZSJW?%T!`(wh#jE@7;Q?tVIkGWb0!qg)A z1KW`Og5H42lTqvrQ1=&wry%^j|IBp55VN_xFFKnKI88-u3~q z&vehxDdO*_&N1a;{V~N+Tf#P4zv1Un{naOORouChEB-2&5RIJmk0^`67a=BXj{A=K zvZ;J|iUphH?N?u*jt6_ zVc!;awW7PmraC3{JoTE@DX6v~c|hn^s;|3oj{igb_k+@kK89qz&)$+3SLAb5^ex3& zE)=&;?PNBx^$iOx-jF3x8{_WB7sU(=TWTt&cMtsGI+Wim`)AL7+55ax#*+ zzT-m3mJr#o!11}G9FV|stVZBdjA8bm8ud0RNw;fvq?3VH-n!lwzEI(?d{IlM!k7ol zpKM1y4N(L^lUz-$RP#{B?NhdB5^gK2L>X`dq=JBjCg$Dvp_?l65 zE0jyUQRnBnt82fjv7+)<$+jrkGE3{{tLL0ha5#T~Cz_Y%gp-+1oifIBEm+gEh70KQY-;`h>AU=^%W zzEh+07W7(n2mg;@q2Vq!jB)FqX~pV&tvT=(La9dz9~@93=MBx8l~uLihW8%jx2MDj z<@Qx>SM_$4uPYrXcRgWp)G^1O#vSwoWs)$^Kh5)5Nm9}J-1eD$KOOscIell&y5c^* z`#>oU7$?}+&=29`qVl85#Vm-r5mv{R!k<^42cDHyD^4s}nRh6!TfwN}-KDGib>(~1 zDgK1{JUm5Vh&8lj$Vz)J>wD8jLv6kR$8%}y3U(56iTVL3rWNEF(j_TM9w~Q}w@8Y3 zLHtbGDX&y_>P_f<^cmdfxJ`3wH>s)Y27U$Kk@-ii;2mAKEc+3TiFWd?qDt(1+{$vM zN+lJ$l<$y~5&I&tXV`mtdCMF_9#c;5Ajbp;diFaf7nqSnshzbtyJCKQr_~!Oj??~S zh8x1o`Id&Z3HGy&8zJc-(_!l~4DIMo(s18l*WSX{`9llN7j-H*>-|%R*BY?j82ehR zwtKceZB|>Bx#QymOh;(sR?Jvs)F&C9Q&=sU7ICtj^}KPuVEk zD{~euaHThxsFI{Dv)NE2uFQ?dT;peic7U`nGReAyO=Ikn@O{SKLm~j*GdC5 z+(W@_;x}?JOpHEFSt?Z&4+We0H@Qza=N3IGzUq0Q)H0_;WhPB6zca-hSAbiHu2|Mv zA2_y#{~bw3J`Wjh@@UU}3yLOY4@w{UX;jA3oEt@bJ#z)Cx)(Qu_SS1?7t|x#&-4yM zvVDF;L5wT*QPh1$Jpa9PzVx?(W4SN#>J>+No(1ET3EF9`gW63_kgiLY)Y*{OPWGK) zha-bgcO$&&eOihg8Y?d+_tGu=)k4sDa_0(AIl!>$C z8vC*JwRCY%&=ZIiV#T_`c>i_xTvtuka!;Z>)ld``l8{&~FXcwUgy`*I(U6@T;WeY~ zMAe9BX?>m?j@ne^*wEqy72r?WOfSwSq6TTE>ZvW)W$TMG>Ee zt+gLA#~8Y@v*|BUf0dzMqG~fU+4XEXJwQ7z#``b0kCguE=@smz7BkBYX5&OIlsYK) z5rzjFh}#uZpfM|`5%Lb-8rN+^U%La{7{$>zE+V-wu~y9FkSgX9Ll<+ykkip861ycY zh+iL8#8nM`Rx~=ReR_|KzB!)?>z5w(9`)Vxmb$+xtyU_#V**{247R(iW285(RpQU_ zXCn)&JYy0gy_4J#z75Dsg)pDf=anP=g|43pj^tk`zUI5B9W$|^6C%EiNDFmhJ@S*G zF`vw5^4AQ>#!^EEgNYXy)PT!3gJ1dTc?#U;yfuR(@`J%t z5~I>}^^an-x32SYVRz?D|1tWsqfy+_u2l!w67X}M~<@=+S1eAhS^l17UdKYzD zyDWDOe(AaIn&|r6b5ybzyrHSF@$s9Z=Z9=E#qfs>P3$|Oy~#xtj+JW^d(paGd*E&7 zyjRr8b-;5ga7ge3irv1#!Ys$9x*z|^7+Y}FyI#9u)a-%Kd!aoXr!3`-|8SkTL;M$} z2G;8K3ijES%iJVok~ge4GJjY8Y3Egc8|@Z%(Rkf(lTlSwnkrw{Zy0Jg4n?et>K<|1 z{gW%dO?(Bzdp1mOBlhr3b60YA_XYw_rA&1ecKUu*cgj_z%DCxeuzFBC zq5ZDh5Zd}u+}+(>1Hb4$T24pojcppcIiin!h$+Q*-4Y(|i7S_ql2S2lx8oYKTbSap zx>tE$1gFS7lpnV7MB%`82NyVue5pZ-gh+8xlAq-cwd#N3*%=myQ8zR1w-KovB^y@7#K3 zthymEvGix>wvq#Z7&_OwBSMbK58rRQZdk*;Ft!N|O(?9GRwb;$h3FloNlI^jb>HjY z_i8`-EcLIH;(n96_S1+D&C^fj9(1Momq>9yL#qPZ;~~OKpxk%m+nKjm*V~rZHdwn@ zrkH~!yUAh5VxshboF*~KTCE-JW9~BxsJ+VPg3&+OXAH(DO_;oo6ehg%QbVf`Fm4k(@o!EnXV&R>*Ul~kUxJ=wzHQeMe__)sOeC9CImvyp! zHWmm%o3xkmpJIkEM9h$WQ+RE!_M`Su4JwtDba{hPPpeE_rW-Ix^la>%>{07d6`4EqU9Fz%5)KN>r5kD{70EVb1Jnog zsJuchQpeCGYysDW`<}Utvn<*AQEDrFgZ@a_bf>aNj+Oh##aN9rQ5WLA)*EtrG0%U{ zli+^j=_)j)CK-RXEH|A;-XlY)!Ys0SBUi^yi60d;)6v~L#T0Ej5LOylK4On;6SrGy zt;|wV)x*j&ag1+)b6Vb~Y-cW4+`RPr(pt{x1?}?B6@Fi`%GW~lt5zn4GjK=QPW;#A zMfS=e;~b;ym+Xr|T7-QHo#nV~osVO}q1*tPrK0KG?5~J_b6_25>lil zN)L6A(n{RmJ6TfMnO?F@c+B=kHu?qgLVO$$lqKeWqbetw6Y54a3^`}p?Km8f7kf9} z8$H=ki{A)5m%o?=+%kHz)W~zP@OjRM?1y;`oTcusJ@cG5a|dLF=H?eu-b(%@$b?M` zzLekTG3*AcSRD3=w#{azv7;p_bXQcpn30h~LoQf;Hx4rFFibb`R^HYG_a?lP=L)P8 zR8y$>+G=s9{~u^pj%Zc>RB9-Dq^rV#;1r>Od=7VlzoNSX11y}W#(vJ$WUk{Z2yhOl zT1*0KXaAtT(^J$F@@d(i9@32ZP*p;fwY_J!Tk^eA=JWl`G5jF;f;ZVehH4nHC)OQv zE2N%rI9K2F(LqISjmn6q5PHKp)U?RRnb%obo7->~kS*yDi1)AaZgp2Htz42_d?NpO zc2#8Ej(Rfves_AIEq{k|lkk#0!F|CD!A-*|y^ybCUS@w7HY}oi=vMP;F2MD)#D+c% z%X3_|#F}OrzUKNeqnYyja8q4V9&=9i1l?k|-ijGdb&+cZ*9V3OZ;^ldLO2{8gITMf zBx{>-mL(Ipol(kiwHbAbS;#hLdH}B}lA2Bz(zmFai06{jLiM^<4){L;a0DZTUOtnz zWv~PFh3SlG4rK)j@I3joB{Ql))MwUDK(kt8xrRFl*G2CNJ!AdB{K#~`wAwt(ddeDa z@=znhJl{m0>hBmFhA2JByUsPWkSRRiQgJiV6nB?=Yjzxp*lMDcy3z+v70>2ih*nI$ zX1f@^Gy5%b%|Sz=@h9t{u;-CYB6^0nZG-Kz9rx{Xt+UOCO!tjDc|X&PDbGb2`f#Fp zCUC@iD9{nU@s%<(SlJuty%7q;cMl{L4D70E%sVqO0UdmQ&G1ZXS!fc=) zAiMko2z3T#5W~YFUsj{E7y2{$IJ1jR*KWw)Nd?Lb2I!fF#nd$MH(@1p#M(T}VPDMt zs@iD5GBM(O@l4M!!R)sp7-HeQ%80?^@{U;l2}? zE-eUt>uQ+$K6jQoQhUfWQEU4)m0JBPl?%A_zAmkCMP|v8X&BDbXLp(phinb$X0aP4 z8_(Emq1}K@o?@DAF0!09-$Wj3tFgq~$n*x;+?8U6@}7D_T~mG#76!+NF>-w=5qO{r zJimHkee(l7#Lid?S;f{tN;n`_r-HcoW+4@=C9AR8HJqeqYdB>1if_SQ0?Jxfwl<&3 zH{=>qKg->P_P}Ll*w(nGavRkYak)c&!rigFHw|EVYfzvS?Y+iFXv|}AA+Mr+zzb&t2q=QSkF_txL@Itl2gGjX_V)A?#0agg*AdXxM}o7 z>9LYNf%obby{;;#9&VmZv$Zh{pc2?g_KndqV|s;+v(~lN4S5yTICPzDuH{FIWWH?Z z#+;@`vPs6;#@@_T`CV|Jz{nGo^0E=P!*2D~_tgqS3B84Nf#trTz7ByugsI3Ib(I&% zh4LyTO&vhBWM6XKxiic|dLZ+N?ai+-)HXgf%;ul6e=$42pWzH}-?X3AWHB^qH?^s@j%&}KB{mD-@J~5ZW{3lt9?3W!qt=IX(%sB3y>6CSdnwAapKa*MR8YE0 zZ}l0bL|X^rui8k-pl#q=Sq7UE4F|bY{t?%Q*-nW}UDGX#!MI!hAUTxgR3|2vx}|ki zmq<1JX{Ci8PL8D8soUHI`2&is_{XSk<)Cl8ySlHHFkToEU9(GS@WQyxi)rZL%J;tqUm$y=Tuc_TnD$6)eBn4p@frb*bKJx{|B51ukQ5oW#WQ!=1-ofqO`u!GOCxgdiaR2x;EMrW_)d21C-bE)*bd?wkk#=JxIg3OYA6oPo*or;`sVn z&jnAa@VO=`QdwwU>ntIi(8ZI{{9Z|oVq zkauz)*ss_ztclylwdXc6lc_I(D>ICmLv7U`s{Q09Vq>wpvX1i6J+!XEbN_s41$)XI zB==j&VOkHvWqX5=Z>^ePEuUd1G%d3{v;JUnTE-f_rEln!nS8DSC+bh-JK}!fZeW!E zMW7W*fnWJQIrkQZmtGVXYgMF0?xD`sL(#pHyDkwp)K;yqCYrH_)*FP=N4<5)kVrwB}45+tz*513-ZMDu&Y97>f&iVLL$atHaQ z)LL=|=XvfGk1Vd>{Z5WmKlsNNUnt;9n~VS8jOJY7>tIJQQrV~-q-XG_j57b6u0c)b z=GbDwD}}YURyWuTsg~}JmLbjUKUl_@SYwRgEgx;T%->{BQ`NN!>JXLJ!nF^|3h8aI zMqp2%NN6o@!+FGexDlm|uv^}wc2#vbRh}-NM~-bEJs7vDb*JkCg>fXai|=n3!=*rb z(wIW_J@8XoGhOKfsypmQUEpf&*M}feXOz2$9-)mqO>fEcM7iGzpXmRn%`_deL~_59 zo4M$JEO$cwwXZVyI2*shc*649E{2?Qk!if;89x+v@{AQa3NwP6g0WIv zrAP=VU7lZ9u)+J4@~>R%xlnkj@R8dgZjv`kH${W=2XL%YrLI(OQ-QU__=uXPjpEYn zQ4xiq?X9e-vN_at%6`l~#@^koTd$d_@Di)>yG{K~ZMjFiNa?-4uk3cfD==qlq(%qYGXygvu&w!YHi%psBu*dH@S&S4WNLw zV!z~Cah(u>E(P*zC3+j`xTot5eJReT4N{jW?d0jwY`LLUpSq|ulbYfdjyv=u(*#p_ zCR0vPzGPonr-yiLeN4I7xBn4$)B3Hw?2ByeO;?#Z)B@&+VVUtW)}Tg<sEA zy$DHgBw04$*z^+13j3Cj<{^ITGGlpuh@m(3(wCY}ai{1$)EUH$YqUJzcz&l27Z3P< z^4Ai#Dn;^N!CJoZzNdko#GtfXUM)Yv92^vXmTzhttx|LJHrf#Fq&}JUFelg)b|RgP zx|Ap;9c#=bOeQKSrcjx{`{r>=>Pgy7>(o^3k?fOeQ;+y<##_uqi4P_!eUPzyYe{5U z%9WKV>^;jn+X!8%h@jtJxxnYM#C7K%1cuOy$)NQtw9M&CHa-uRJa+eCn@S) z?I$tW)2SrUXI5tGYvj|ubMAw_3*vaiq!dUVX`$Q$^VwN>ihI$9b0(&Su4+KkG*mOs zH&-I4i<-r>q?;?vf)xYL zv0rjbjuLAJ{J!)4fkB@TDGiV=i={#fu@X)|BmvL!j@AO$oXzMT={xi`<_bHSOXbF~ zxlBIOfIW)+u~l3aPBY>}KF-v?q_5&6!AI>^`HVPLdZ8B63)sxZap!N(OgAY^Th1jD>uvVHD0MPB0}JS~6~}ftIabM6_{Py(r!e zP8LeU`O<6ghHyFfGB7<55O?} zVt(bDGtHD^;z#8#W}~5zVJ5o@n6a0bZb0+?%Glf3onOn0pf@o4xSITF_7*T+=jm6~ zxyn+t3J_-JsX4)D|AycOWxqB+9uTl>UeU6zgue+27@7341RdvUbb5S(hKFVlLe zf2u&uqz^JZnJUaPrX}~3ziGH@_{`ui=!RcRbIoa{<;K;9Y(ui?hN%eh6U7~1TXAQ( zE9^m>YD)$d>K1*xc1tlRGZe3~QnAYq#gXDpv76+Rn&KSeJaM7$04IjVDhVp5BueeY zz0z9M4a~DlWcWL2)2VaJ@4(c~z)JrR_kb@3zdmv+`2#q?eTP5B?q+(jCvZB~&aS6k zYdf_?`Vj2-iP{xKmQv(AWe2c#E2;D3%{a%>7AIWi06TZ5z8cj6JZ%Ol_Y}sl}XoTmz_}XEU<@|`1w$}5k@PicG4-CBL^Dh` zrZ3Zr>A=iqim=j5W?wMdn7vFf<6v(x?U_>g0ezcZPbUDoxhZO5-lC4Bzg|@@MFmI) zpu$(wz6DLAv>*_;t=f2edQdy3@u)Rf2PA5rHWHZX9e}v|9}t3pMMf>A8M+bt!+BK8 zgrhwPxN*bu>UukUBG6D91I@XSJ_h)~(ZAC_(>`hzH4iA? z9q8epDII&y3#h5UW~vV!jW5IXJ%+OlSF~^S=D;9DC8zcsP{8M+60Q<;OYZ?>=+639 z)axXn7DxtC@z1~_Jdaw)p=iZHtwp6w7xdd7HE;)kyj&MltwC+hVN?rd0aH2>w*DBf zuWzCvX_P)02ybstk8=n=F3Lzh1#0+M>M6#k(|^+UQQH$s=~OzMPDj$Q)MnHQZ3cey z4p4d>SjEGDYrGHmgCq5!sQ1}})*I+m^nTEw%|Nbhq<2KM&}yv04uC_qLBU+`=Oi*W zhW;zCu0FOT6xamzA*!Jq$a#6O=eHVnOr%pIQK_^St@^1RsQMX<>Y&Hq(EYB zoW?U!=Mla2!)eROsCRpZda|i{BIdgkIOhjZ1$7eK z8|p-*S{8WL9R8y%>dy$SN)zy}C(tCjfIIEbY9pYZ{|Jd&2&~HSsK#mzeoqH-_6VFM zevV46ig>ai-uoRG@$-QUnFjvgW_V^ zFj%6gkQ0K**$35(^Dw(NfQ&v2EiM7q?2yS>;N^12Ri-|QnuKvKg;nwEHBrZR404x- z|1|?L=3GdLq|Zi0SvK&-&*A^WsI3^ijuB+*eZYg^;0M9)O2wE*fe*DIIcXSAOIYEl zsC?W7Y~4ygwQdVeUdLQm!PhYA05lmlYC>vrQH{0?ytxZa$cH`K1XS}sKnX$n^FqVY zprv(T58neFAOh|5!uefN(<))^Tfx>-=({;|eHgU;6P~exZ{5I)zo8!tKC6szW}_v7 z>_;l}UO+zxVnQZ(`xJGNr1F|z+K{SXKW07+x!wk-EDT3|9>Jh!21(6D#oq<|VeyTH zdI(LqiVDdj{GZsXbkq#CM=L(?^$qGB+oB$DK58BRLGDxrAM4;d3cWXlED}5;7QYjO z4}ug(qo>!P$VS~kD+KYfF7^>_fD^d)2k)A}&AZ6ElX^fJ^pM(ff)Dx@Rec2gj^Im@ zDnsm4K-OcZ8{leXaH$74<3cN$uxhm+*;eq6;AHtBHKgLY26Yv`k*fLjnAwW(Bu?17 zJD^R*tpaUPzig-O!kSn?DZ#t2LFePZ^F_8ZMg*97&kr(RoVC`~%@4gEb@NdjzE@b;XM)Vunysftb^@aKpirCMU zz^DHV+B8$&3z}9?dm*D;;49|nv>(BBocuDwyWQbf%Kf_#5E0y+{&f2ED1dqV3sQA2be z)kEKiC}2LGos9_lKJ^GIGYYc!B|g1{S*-~#vPjE7{fne61TMdw>IG!^2*fW5@bW2o z9NiLcuEQ7z;^sW+6u8_UT2u|PbR6dc+^|sXp*yv;&dg54Yb#I@x(J@}31npm@bRCa zF7qCh1bY63-T6ryMWZSclIjFDe+;B%HN4GJtvfxCs;TAE>-F2(4SEH<*iG~u%REy% zQ=^$Y*~j4YzLv_|)PV3!4b*1Qr!)(yI+p3BbW5E0`T;V%l?sP+^?;R_f+%DN-B3FR z3HU?%jqa$8r)tnP*p}(^kJ<_FmDAUN>%el+uT$#~g&aXvCx%W%l<_zHRBHy!>_Mf$ zp6;VUwT-wD;Gh!5#^__P7kZLx9Sksv^$XC~A^Ltg7Ix6id<|Xu z6899`pf+nw=p*`iEuNXA6ya8n;aYvVK>LTPqkpGQqhDz6sGv53zJ<5y(WkXF(EQ2h z^9$^}RmZ+t2|ZTp2~YBcUP$lItorY?MXwB9U87G052|4ACmp)^tM)B@U#oG#maBsNPkQh&2Qsv~jxwO3otUeuEG*O(0} zBEM1kD(ws{sG_!uu8K@)WdJwr(<{;wR7ttdzgEQ6L)m9(JHdI z4EF#XqONFv(Pt4gCQuu&i)W?2qJ!E@?0AOK|4_X(GwpzlSdU%5>*xpfHE0XzqW~Rk zMVG*SFV*iOUW(I=bRuj?8sap8%GR3GFM!g_P_6aN^cgjk{+3z*sq2HNqd*$~uO2|m zxm9aQTlE*PI=^7;K2EL3j?~K0ZmLAdM+WGQjukB|)NEJ-f{xyk>ZNtSy&NxKXRP`a zNZni5LYiKVwcR|(;|uDb_6qB^ZipPGX&>o9RId6GKKi1588>Ithb{XFbe_UpTvecb zz)HcGg4z~PB2r$Brx#IkwZ-~y=+zr)gx*b$rv2J{{QZ;O9(L}z_5%0DxS@?k=;c#= zCY^!(p>OqR(CnkMi2F?bqX~+1I5PM}kmp^%ImX@z>`fQKq)LLycb`M)>%c&7` zb1eXS{Exl|QKgM)hmqW%y1@Q5hfN?iz15`Mh@Sq0CAkTGEPyRu3u{e7o>$SaT9*DZ z}x*j!CYe4PO%h6SE zhs|X8a!;9`I1Z1x5LMr+=w6Vh57ahTu?u*=8J-nj-S_Ar(9!+y_%5JgSEXjb&dx`K z^EIq_E)W6I^v9t6U+gFB(9MvK(^&hQhL^d8ede7yN25*t@{CbbzAhG(>S#s9MmQa`X_$W)z~i zg@}I%8iHL9*U#Y|qYnB@jojOGPTi_*M~&n=tSv974m`0G>zq7z_|K?ixc90ycbt8Q zyQX@wuecf9H{2)I&ZtZ)W+JnO>5e>D9JO1kgp=H&+FlFCeUCp&oun-iEnAc?6t6r? z-YYjzPN+MuKHLkhvIOg<&*&Mr;b|Z9nE8fj%gkam?BvvEUQ#vbMfA^zey>9BI^oZD zZ4>S+dW1X3Y7+1TSs_J;#yPbA> z;d1F!L==_SJJ_(H#uhZTNbqfy%$tK+J)QeDZ75} zbJiF5iMQ;t_gb@N&Aju@tUcjyJ?*II*?4ktpz!&Q_MTpxo>y^x#XZ@3QoDLDq<)k6 zA$PG@@*eL`?nHN_Q?$G6+4e^J7uEEC7`_v{8-5-gENz~Vtc<=4chFl5{u>XdGtrIq zWNU#k<8{#`@!##kov-Y7tv%w(=<#5ua9G@C<((JoOOl~sx33y)^$C04UaU?|vlQ(= zD)bSnyVgO@0_RM7N$lw~+H2Nqr`?KzNyVw98?BvkN2vSvRsQmUcS|RD>6-8B_OAND zc_&(v?C2d*F{7d`bEK=co7&6lJ)BdLY0AMokpCn&-&$rL5smM=w*8^@`T5P(?9`c7 zHotez)V|*bci8*t3`SO+(E7dmQv0~~B>#-kR!8d6id`yqOGnA9@au5EnjpUX3HLr_ z_V-GDj;4f_;ZyWZMIoZeQ^7xBmTYwrGDyE97rEN8#%xZbJmul7Ifx$3Q! zb*{8G%NCDFs?}L;LA*R(8y^&X5bO{P^}q7p4b}yBM%D4N(W~Ks(M|HPMPa|O>fE-% zz0G^cYjK7-KiL0vmZ}1I3-?#+ae0hWtgQ2vaMluk(ZCgbYx5P!)tSF!Z?=~9&+A;! z-{789H>+_`)kV(f;UP(7`o7BP)h|{4CihwTi1e0XzK(G&i+=ErFNNVQ-ul#{df zHm==R>RaM%QL(RmP|t2H_qW|qs8^4(Z1kdkUbMHjG_|kQR$5qWOa75d*S%KrL+YN` z3g1kor)K9KR#(8?RB2al&59Svd!8qLt;M-Qp5~qKw4hnN@m5*qI}eIeSs$(rzl|=8 z4;0?r!|qii{paLm`TwhBVMj<$BKfB)l4bGXaYM3P{`F(4Ir)1uUJQ_@_~;1PsY}I< z{VTp#Y_716Q}1@$8-%$JQeUDoozvaP?tr~kNaDLgKk<*K)qgd=e4xE> zOnkQ2?Hy%>g}8U8!k6}6svoWWE;BT~CAi!vRGeOOd2X>-#7olSs!pz&lm5ZlJ9nbB`}9ZeQ4baq-qnU*(y0z3L|)>B@&Uc+-;!{XN|^#hfY*?~W>Z7j#T2 z%uSzBy;tt<@%P2AOS>hToTJ=1se3YGa$jUxoH_CNao)Kho2h&yJ6ze_yWI2beZ}`b zYd@EIFE!nrk~|sg6U~axQGdFRB(2HvPYucKQ86g`XQ)Q&J?9>`!@4{AcW`8QjJ(!A zoW<6%c!&6D?dIRBW7sh30x<`5ai#jm1@SOhqpf5YM_RvAwbi%QoAS2<$yoK^+C^;J zTsxy&$)57okE>ROH+tpd37eDSRA(@^biLU8ue_O=KIf;>gT1#5EVC-BSJ(Dt4hT;z zyd2(`ZmNE+;>gtf_7~3OnJcoh({r5TrMErKmZ>21wD-BaCYX>vwC|MSJN93*J9^vY zk8JJSys=Q@U70$m^kheUXS4r;_r7~x!S1@LZ?*kOdQCf6-%6N`tR)6uYXK>T+`I1RBnzxrSMOyx$>0Skrgj_lf2(#8gd_| z`t9wK!BOlF_+BzRy*9Hbb~>M2`*O=1|F@M>D+m1UmKAH?>1<8T%x>W<%!9^@v(xjEouVDAOnOPh_?n}uYBJkdv;BFR^RueJ+O-_zUK?NF zpF9xvFD_meU+Vtqo@uS{4=cP>+$s9TJxd*b-*-My54t|_{u}LM>^f(s)G_HBQ@1jjZ4+J{o*q3MeWhr#!+PGn%>I+}nR|e`Sw8Px z>n?GhbEkVFogrfM_7KKWcSLc)$A&kFss6BdO8?Bh7ybIw303nm>x=hw+~4_>m2P@@ zBFF3O}Cx7k0Va1uzCg%^{L-r))t4}X|RNS(3S@^i?sIs^wdwnv# z_;5*Y!OVD7Z|8oSp5`23wZ%I}yT?CSuX#tOZ*)h*$Cp+N?2_NB^q=T>_ZjE+>igVT zP_{9AMln~1{i*9XS!Ff$iyZ4c`$6{x_bKlH?;2-fTpyhf-sa@IleNE|PNoE}`swiT z_GossoMrnWe^`Ez%^veL6t z_trhycvj5>Z$^|3{t_LlxNS*cM%Op(z5RFC)fE$*QpZO>ZNG9(_q^)&N1RYUBj3F4 z^cFw4Y|w+vd*yx&S44}m+tgoI`C(-BZ!TP#KBDp8jlZPwea)@?9Yg$sQodf_JCL~k z3jOiw-UJTKewRj2gkDtt@(?hUT1RZWb3D3 zPOwzHLK}i3OV9ZelS93SQoFe$l1=_u!5`vX-PhCCrML2eWT$wPy6>$JJ5gne?~ZDM zz5M;dSbsMKL8!OC3&~#}E!VlbFP(|$&|B`f%5UD1`lENPeRjNM{E?!v)8i+?{lf3V zg8xK*T~Bjg7|hN(b#p5|4zKMV+;gGbJou8a`wTld_ha~OxH0puy47`GW?xNSjb>SI zxRJ9tc(r#z+pDd=?Rz76E<4mcyE|O<&QBXV56NYQoL=`Ee_iVzIuEoTtiQbBkLk;T zDbZOKKQ;O_YvT5<*L#k#y!w3xWwUn{n%ie}&dKj;U6{(GBKL*lvGAATFU6gs8O}D@ z%PTfjURJp_U1QA(o(j_TTY95VL*?7)-^CXX?A&{F@d9hTn@{!+p7Fm4H${Vk1B=s2 zLz45o!!ymPg|;2f3b&8b?w&eddUX1ew`z4 zP3nNS)cSW^uda+W_7Z2Yv&sF$3A9T;w&vLt)|2t#@CN_<;Mi!gIM`={g9?ZDP40iX zG~L-kou+rFI49m&nfO!EgN9_rd_QDv#mM-(Xj5vRs<`^D^m*0^!Q|kL=)CaDLS0w5 zc}d&L{RhQYdZ$HGdQMr}v~i!pBQ?W^d{#BQ6m$&D|B(4!CxLcP9UqQz237r3_fTf< z(wkkoHBYSu6Kk8xoOgJfD*LP&T$^VzCh93xyif@a8(lx4)yEYl;y{k72{4w*7 z)OS`Q{P|KWkd`|l`?wwqIrJHL;Q z3}1+Qt@X|{?>DKZ(x=%>d*ZqFUG9O_y3)T3N$KTiPn~a@9L^}@du9)) zcVpGHjT5U*w`vAv6wl1;-n@L&@q=e(M~geZHanquQstJ;T>pl`y5g_ow?o%1)G zx&G$vq0WTdROg+ZbJp#!@!^56a^c_^^+(xr^E;P5OU1SC)(lRsj-IzCR!pmJs_cy3 z?|QTIuHqxL2U5Cq&uO!NIZi{(5J^U#p8*MI-Sk z%IVHao$I}!j+FZMQVw6UTUek-gTYT$xG3W;knTx@#5%EKV96TIM?s;&n!6ol>=`?9Xgv=pWP~P zOW*k`QxgW=Gwd$Kkeh`-K1@{kr5>E8H7&0=!8yquliIAj_d9MbnH??v6f$UyDm*7o_Ui#ud8df&-)sWHJ}(YVxM)hDKw z7jExZ+wqvcpEoR1VO=_~tb32{{RS3>_u83^pPruzyceA_+_~=6=_f1a)f`_pz3Q;+ zKfOiPtI1n>|IieBy#0PMrPQ2%zi(6jlf}NmUZoZOn7B{%uV2S;@IjPy@62qWlMeUf z)_Q-nZnA&lEc3Q@@3vOO3&OSW_n{v&mtGyP3pW;T2)~HcF(f%JjQrn)!<{43Bhnk~ zsg~{CkXu|iG;^0dGJZF1a%xjsr|X?*(YeI~3Qq*R;VxlC>8HH!KN($^Jm~M;H_&@) z>9Xv5^;_1@&z&7z=~r4Ss}39b&tbPW+>rV-S>oKBnU_7sIXrl}AnzGW3R9()-f?ZI zmUG&&`4^q}nf-0I|J1hLo~@l{8&(W@JGUvG86KMMu3cXHOzK$weE(o~S=H|1b=|?TGrl_nke0a|V9ud#|uI7^*7t`@JjD$7k+Kx6A6io2tuHW+zpoD(|k`JvYi- z7Ou!2k^jiwBe}*tD>|m|b>CHb(~Mu(wsd%~Ptqwl|ByPwIYn$wTk1CzcUAP}w#q!2 zI^MlUoQPP$=tBK>X#9bHVDXB9s|Nn8F6!rp?ZFS>!SOBeV`AE#vfi{8xdW;D^sbGa z(&N)3y(jHf)y=H1kMiF1erG+Xy4!~fOZ+{;U34z(9ltSn(O(tTM~8;1N)Hs~gb#Sf z)@-Q%u3|TPwEWYU%2NkFGc?sOAu~x??M3di^i1!haO;65`xX|~sJHeJg$3=SH(arC zS>IEs18NrKIt#bAwzp4=|Jrcn(9XIutw&4a?Ge?N*N2%qgE@r_@mUr5sw(gI18ZAX zwC_~dPVe(tWw#bb^xfIFqSzMy*A3IhWj@W!&J4~>%buk-$P}E@ylieo)!6EnQagqn z14j(3DE$&u=*-Ur{_^6bI@f(z;ef)QgX`=!(s$>sO`q&uBc`gtotNIf;-boBxw})1 z*2lr;+F@&>P0^hAtMHm|yI_JUSB@U&A2>BU!o4=LI5j$%>n~O1@-gW+vz=Z(euUOw zqFt?59sf42wI_Ltylb6PgG&lc1FCh7E{-pZwh8{Bir+26J>zr2u(YP|Myc9;y>e3H zMKyP%&Incn2c}yZ_ZU8>c~wPka%K2{by4b$)cf%vg{gga=BEqGEGg=Ye9P*Vi9H`A zODp!R9A^EbH}Py5xVGY!rVWjwG8aXEP9|hps$a++oYn(X$ODc&Bc`rIM)VfLL9}H}_QTyv*O-t>Xv$ zdy5N7M+g6io{VowK8P<0z9`(%Z|5J@3GM?jJ7rece+qy0#nX5bGO5fnsb;Ux19ridBy~ zHGLgA)x0?UdBdL@@5{cY_~+rw?`!^D*^)d_Tdk-gRM*`=&EG z8d0px$Hg^4M{=06ck21f((Fyy7t*hI3sfI=y>~)-NNz^%*i3A%438^*Quw7bTYTEA z^;i!Gd_I^J%If^J3~y_ZF*BT;sc5MK+VIPA#;qh&pu!!j0}%hw z%H-hS=fYnKr5le`BK{}ozE8jX@8X7&ONO3WzVsF)01PWo~_@m z`U3Zhc(%Q7W_@mb>TOjjUSfajUGLr*U7ugm*EMjl-yI#1>=s`Yj4%DYxRZZiJl0*G z-cIkcc`>!zYf}~W8L4#ozRaiD$=QMzTDwPo3IczAuzy$^)u>DQ@aPx+@ZuRd=RPG` z=p2_`oF4CeVm%gL5cgY?yrG$YWgbXv>nw=Jg~tV-gkzF*)_<%^qNOiqihjn3D*EjIbP_-k~6{pj$CurWN#w@Nks*YSy||EqYqBAdS3>9IaY zJ(v4O^-)zv=SHXNyv<^hezDGu4k^7`R2_4)M{-;+Cx1fsgFS!Aue07v-<~-?=`L*5 ze|oS_YFM>bnQ^}kcMgBHukyBZw~SW#PX(jw&Gy;WMWxm9fZGN;BvbXqmp0`{Cd)SO zZcX+^rmxQA(nqIvOZ_FaK(#`3&Y7x2{J>l49Ay11{I7qvPDj2K&rr^KMf9KWreKlk zwwjb7SgV~hHg%)Ry}pd6+1EL)JKf$&5*QQbbPlC1ni1a@^{W!>Kj9h5ex9rtp-R=q zmnnyKfnG2@&zb4&0Z5gX=E`DqU29HO?aUq|NihWYnuD6w~H4z+vu#|Ys#9P9!(1ORi^(3`;BCNv?w^b zG&1N8o{rD4Znv6M(fSAL37v4rThG~>Q=8J)rn|lE-JE-)JHg$?yMx8wlmeeP<5ei>rKArSkEN& z@oK$;G7Qg%s^S$=XPq>{GV&QRU9Ak7p)&$*uFP8#;fqe@-5b-`2ZkT9kdb zUFSTSqg!MhPm8_?SA-B~o-@4kF<+Lf^QqsHPE=YFPdoQ-sYWvnQ zRahLYT-KO)Z8A6hPC2q!$={`g--cH7RXo-@*S<+Ldix{~*)LhwyRSH(yCt!?H#qa1 z>y$NqT6nut*S{4%9V=@eg*taw5kC~IRK@3k%0f?4<=!o-uiBuy+>qR=`m)z#9k-SI zit#(~`|*bO=J<|yqjJ`Zl2=vlcYHjM+-$w3in5XRZ0ic;2iC}vexusMTkT6!3H_tNV4bD*871Ii4zhYo0P!U&og6i(lzH&vfg_WKDdO^}6`eL-d}9 zh0$`=iJfBqD>)`<&}yBp%=^XaFMg-8yt@j;ZmIm)GUe5Fwm0f*+>Umu>PB}H*V=3U z$3D?slpG$Pq%7Ns*4QMI?5UGOe^8ceYpu)aNrx)IPO_TyF06|b-#sjg)@gsGGo;hS zhuo7~rAX;8)ip1S;&4v%S~N#B4fV;m`1+_ZUJ>7%+^aWTKVee-_KZQo=cuQj+P9JpEU88@=`o%ibepCB%TjlTQ z8t}6!Z17HNuZ$Y^VT|1{;Tqi_lxh@(SA{NmR0(0 z2UU=qpls*)szv+0Tp6-DsnCjyQ=Qy4@)aX_TVuIn#~kY#WiGE$rP=?fu4$2~?=Dwv zvP(~yjDSN=MaVqm{i} zs;ZMclFjiIs_=Rh&Io4NHi?~h^?G~yONRlC{FMVD4i)|(08ts~#DwZx$ePFjT7q=$9-heh!$l%9h zy(&XH;;f`_hxPB|8LjFG>i9fg-z}8=nVF1Lj{jZN$<9)J*4?UptWw7JcGcT_qAbbh zsy%zeTA*z1*<$t^Rja#Dy#FVvZu_rR>|tw;r1OfZ5_XifovQg?ul(z1>C>6Yw2YM& zT&Iydp8TMCj{Eep(^V}rMYSVoeda7xP0f^U9i+;qv@+q_s@wH|s!@(qUCg1{^>h^a zST(orsk&|tRklr(EML|;;d@n$_!o`-Bx%Dds*d|n~=e53d6Z?Es^)3ZiT->l9A70RuT zR(^aJX~;0uC9G4Hd!(w~wv(0Z)D>@u-SG9~>AHqK7O$(GtWm$;UUF>JZ@Y9KTUVN1 z4nM0RuT?bw)QIk`9ek;3_BLvL+cl=&NXO1mrBovS`?9j}TWhovqz$XIujx(Mse6sq z_2V>p`rOi`hptM)G{P}@`ec1}ZSsij-6N^e2j+dP_-CrwqfJl5&`zGNyHlBHx>#D84PAl;RNb(#ETwjh zjSeP_(t>BDYu&1Bpvwn+12+9{*$AYf&^vG$Kj7+B= zTPs7wUrsZp3yZI@Ptec0BtgBvMfay~&>HFHCjF*D^}=uHxovu%XhC<vOD} z=_g1JXL^4j6S@#rNFH?jrkhiur_w8`R;##L=z-22)zX$~&7f2Bp|c2`vgpS}=g=R@ zpX$)})J6}{-B`D@BsxTrH~pNE0=;xc>8Gs5H_Ed9qUTfn{Hta`kBDl?dAM|dZXV5& z>{!iYleDNyxFUdihdjfYN;9U2Gy!s!S~cdAwTe3f<#UApOsO*f(KRZqc~V|{j# zp4+4yNarDXwAqr%SjmH2`6kW2N|HjmEq$79hjbXE6BK>3=oLuMY zp9!U+8S%F8zc?-EYh86t?u%Rp42GI zH$rRHD5=rot{{2RLuZ7(`&G|crrks5({5P=It8b-b6WH~x-@PhnU2)w=pR9dL(Vq z2htBC;`dSpduH`;-YNTfpvuEAu`&g@c znQmj-s?vChp3b#&728Jhrq0Nh7L3=mU+ekb=^k{Gqu*Yvd(f+gPJDEAp^pze`8s6_ zHfbg4bxcPXI=gf2=CX#Mqh0z-=NdW_)1xaVi4K>vs2QeLVOIN`4pMYE!+P^IJe5zac$3)zgI$DYk00sCsUb9O(5-hq5lsuu2+^ zrDfl^dR|8Jqst^66S3e$>Bl)zPh#fu9Hf&v zJUUh!C3`fAh7&eMHO?=w0?(&duwh@97Gkj)>dXR@AjE2XO>oo_6S z0vpgP9XC%k_3rdiL!;PFLp2Y2z4b~*IPpLSBzju*mFu|aA4z{w4Ok;$q;y6`7c%-8 zT~O(_ONT!C-{Obp#>gjX%d6BXdyVH~>}UmD8mWM%vtz$3K&56})M&UL9q%HI zo4$VZwsti?dZg3g93A3QbSR>GBD<5`-E>aoXRJM=XSdLQ*wN1l(&d%XIr{O?naiv= zo`&9ygC$pbO*ZN)bT_Ae=~G9CZu}cPY(rUpx|h;~2MOD%am(=MoLP~qJ_V+VHG6a-uNejMNga|eUIP2ruIJ#_ z=s$_>&~r1R=Ym`4e2vv&uaD5^(|Qv9De+2l{-RsvfJQ*)?oeaH{?eD6u9EaMrBflYaGl!M$)LR}iwmqYjw}l(%%!o|s))?z+ z=1X78;ra>^iZDH83!oUUN**c|XHx`p3qmrUrZ%39Dzl@7jinni!g#<>C8SHPiKs-g$Iaq$4`o&pt%o=z@(M1rgD!l&+$jiJ?E^gQ~U|Ew9|OR(fA?H_tC(qkMnL;p^^w=R`rStGg`gK5|!;4Q2Tx7~kw(8rjr<@A7L=h3l?Zqme5phmhcv%AnW(+M!t6R@L5+SZdDJps9I)>qt> zj*WCX<+qg@FIEbiz{s)O{G3kr%(F*Qr7Is^FOp>GxyK#`Wz!Lt4z3yf%No*?H>FR~ z5gJ{fBQ9r}=qqh$zLuWKSMnU4ZGIjLtI-);Q4}v2|c2 zPgkLnpx-*}Dr^j12&|39distIwT!J>D`0jDR@>-fP@WsTwy`sb^p@^=d~bRav!Ad; zbmOemJy<^wLZWNX%#_AYM@u|Yg`Ph|Kcgcc+QNJoH7iOlV5}v*Pmve+zC$B28bE*6 zD#?_SSf)oQ{k`c_8%qM6dKMNFYsh##JvFTvVuNzJ2J4JnW2M0&L=H%mKCr9?JxJ*u z?`mxHw`Cph1z1Zon!bSSAI@>n?G#(Z9st2JQ$~z+LO;Dh_?A~|*u5gsF> z-!i>))A}i0YfYEbzVh51?M-?(o0UPg7%5M|%2rCNxEm-8w9I~>_i}$3U4kV-NeVB< zUIy`UK8w?1APJ;H$IGmKTc?>Kc}|=43-4iDu{`uT$LcreNp<=c9pYq}xeK_HuE(9a z3w_SPBFw@s<1P9+CmKDmC02<2%YLEfb<6KT}^48KU@kVq!rUN<_6g$Ma-~otz@XYKqY&tuf_>#HM-<22TY^tPTv&9l55HyYu&(SY=1C`Fbi^u4&+GwX zld!t%Z#vo{S*#8y44uW-dYV^{<_==Ss_`jeFYFBcx$*0!Z$0ZxCs?oy=L+#W^eg7h z*jO+XBL;=g+mbtCwdkeF(^xgo7+Omlfuv3BOqL=LBj`CB6 z>UzcsmIVXA3DJ+ZsVp0iG=8H}&j)jY5o$F?Y%RUULF4o=E|&EG|BcnbM&tcisd~)< zw872=B~(bC(Y)@muJUZgNf$~y1^Q06Yp@{84(B-8-^axXBdl| zlFh~58>!)w@%Y@2KCak0?uG3mqQEzswQ1Ch(V0QzF|g};%g;1e65Gt_Xe8OFF-Q6u z{l)5oSwS%wU16*p^Ug{V%$N~@9{7ZMZj{%Ld*YjrB8*H?za!rCrCIn3b_;eE&yJ@` z=~LJ%dEE#+SPe$vAQcWz4mttM< zim@;~u^_x3NQ2RXgP0S$7yJ;*LLqO1l<+LbANGRvs@0VsKcZHwE^!0a2YJxVHKnIJ z`u`Ap1+D^dV{_ms*mc-AI^wb;(O0ZZuf_oa2Eh`ulr&z$4jEK#EGkh3(K7KBV?$4j zMXS`$@SX;pwd%fbLnans1z9QjTIcmlFgUiMzr43WomjyUnl0mPk|gksXf+H7z3o9n zSp85dz)!h5JBLW9x-2(-i$7-HV^!&uY=x`vOigr)%Ii z@q~_60ZHP^y7ffZEMjWL%39M)w@?4pYJAKE3&Yw~Nrnw&?Ws}JN(=>GRii6l@QA0d zci=Sa7LkR)<*Y6mX_x}y4SYqX=3#6jEE#wYuZ_QE*Ml74eULSt2no^`7re|)fomx0 zsg5*&@nPF?x(EH8Lw%;XERC%8?M7WoE%_DkBV zDw5cspXapqx-<`L9rI?qNRXHb-^Xcs;sK=mSf{8iG2o zp6qjYD#nM$MgL8#+)$P{HUngbUFBa+`+5wK4{6p^d~ld=f3iw&C%~MbN9BnS-;jGa%zctJ|Y1 zz!&5J%#Mep#V#P}N<9Y+HF7k(T&cW1=zgTpF!GYV#+KG=OtCUbu$y$8CqAs!6X~bS z-iGyIwnTNjy`itHyR%knv00J;1+&u@(OYdorB>p1Oc=5QPe7u4u7OYpgPreAR$b9LN4Lb;z0yBk8Bz`uS z083!-ud&@=641tAU5l2(4Umgjt?^C#u(Hva)u(0R)+W$HW!wg#V>9T*4sIdahZjPN z!DR;DCep|8Wgo|U&A)uq*4IQ~M)&$Po4WE9;6@@(>_|x(2g8C^fi1DXjJ!wtIMj1t zw6KRnz@PwwUS0XZu#Amtts=IL{0A%|2oY`o%_kzk@@>{iVxi!y)aqGxAEr-(Zx|DI zB+|~7Jql49xCc*!)%7JSd<9H=vpxq#hp|Av3erk=1lW801=s;}jE%;64QK`Vk2Qgz zV=m}0+6u3N$3xmge8ewUE^IbDGAspn2aRJ-p>r_3#3$@N>_mf}fwmf70?*j4XYi~x zjSl~eA5Ck9opbfdP>K-i$HfE7*ywsj-Ng#1HTXO(s`uD=Q2- zBp+#ZZmlE(gV?BNVPDCglE1}Q!^xp5mR6%A`C~uOEIc(CD{LguIFiAZ5gWJ3CNgq< zhp&R0h;>aUF0)ym1BYPG9LbHZoAkt5{hxL3(e=!QUC21_tR1=w%v_af5jJRcj0W74 zD(j}X29L;mvFlhn;w&;r^}3hA*NhJjzCm|{QOcI*$b1b;!5ZO*SZAynNYP+__6z(Y zY$VSmR#_u0p%MYjjU^3N|Blpi!B%8#&~)MvEF=6qtN~FAcBxmNY|vP-D|kG3L3{vc zGOyo(($RCGUSsKoNgv>Zv0je68QOvm^(8Nm3U}|+cie;jgNQ-k@D@#)vtbPCwZE~e z*aIws*@Xs&__8KdT3Pgsk#E+W(PiX7o)Hd(l{cIQs2;ljZpW^HnXu$=>lOOCOV8jM zxJHmKPlD0YJM?v5q9^1;&dk@>F!}Y$(1Tv^y)YJ;A}b4;c=&`LYy{ebsjZy79Ym_4up zRv*L&@7=GlHp*W#NK3ej;cT$QeR{5?j5)C<(La_DE`-dH(MPZyc#o)uC^fG;p`UOl z@OJ13IRf+mq>POKc^SS1OB|N%5?+gJT(_i$-0)gvZ)2H>;@Anq>BOTZ`wjL$n~1mZ z{gqnbK3xIJYa&8yu%-RLUH}bZ8(9b9JUlvHtD>Aa#@~S%(LaM%zy<8ZqOz}`ODZOK z9<>1MUhEu|8HTOFwo&WB9*5b2eZ}*kF<9SbeaewFhV>@Dz6q`dC}tpxrC zejh!=6L|XU5RI16;q_oED&ht{S!;Uung8U*b#6ce!smu zOAvlaR?1anLrQwbeTej7sfc`;3;PKV!rBKV9+o%q}mcY4Y@U1D+C-GW{8&}cIQrJB8Nj5Y_jqe8IQbIh#ZD(0Fq#N& z%KF1!AOVnrBQ0Z;NC*2!P8g(TPlvx-6ktbR0X;Q;#&PlvzAm!mD#*B~L*4jTuGGj^7Y4)-A^i1oq_z#OU`QrDq{u_VPVXFr2f z*%w4oNB~Vm!{O0cV|*If9&m^whGvjP42O*+hB*flfIzUTruxBTxX6yB^>qAbkF)_# z#HjhMzib7#1F;mgg{ShjsBw~qM>6cT2F=crW{`_**S;jHW8ygwA^sgkHm4_d=q|*5 zSQ{f%WDdUo2V(LWgEe261?*S5=7OnIisl{K5`MTBSceJBKBOJ z?n3;8m4neBMg?z?S28>~XcM*}rybs^yO6g8ub^wx?|?zlS`nh}qAYifMwOQ~f&sAS zuuDi34~Ru0W}(W0dNpho`v&ybs-J)!;Zv~IcoJeTqG>!TD_~GK$SWt*Z;$}Y#|VuP z|5MOCN|F^`k0%p15r;%&$>GV#NuoL6ax#itnhW=VOUJ5^R~e>%jd$l7yft=~{Q*+I z4)*KXp&A2d4?h7CW@Xu_)JcM5@c;NP&VDVr) zT}d2FgA_m~2CFkl)&PA$QpCa_NA``$zhPy_*up7d87hR}nzWuEM%Ip4AM}Q2$M-W5 z&@mnr%)yLf{U6?iJQOIMuLeslU>Nk4h=i}$(fCQKBn>l+M?msqNzklbJqu4w7Myqm zwt-dRYwRMnl^vd!?tw$V#Ap>he?WI2&o)@sg5u$5$VZzjDlt4*f{Yb2HP)QDfN6;J z(MS`uQB?z~CKADO!(Ewr3bJr8pR73^1iM8vhV3zVJdh_=0iRV@)&Q71{&Z?J84dmg zZjt(=-ZFaOCorpcS2%n$q*EGzhsM5vjLCANbwkVRY2pktfcTH>WvDxmYa!|<_QA7} zd!X(YONng+OOtN_%OGX4CT+SWJTdGZ*#hduisffxi(qZR<9rfbLc*XuVlb)$+skXi zZe^!%5B@a~>bmkOpc6!I#Mf9>;_<;+L9Ah&evb6Ozwm*u251SMnrcAS3lC(nYSaiA zz5tdG*^r$eKF2B&`xDJEChR?4k#$)w_Mh#JudFc(?`mSkSU9qcCK z&F}y)TjVV8nynf$zK3c+c507ePHLI3pU94!KYkP}jtAuV_#Ut&@jWa?q&05U_pE0u z%LjVqdgk0)UU|ld9KaOd?2P&^N-V*`Jn14Mjq*u)|rV%E=~QzXk3CR0Uj6yBMrf!%<`g|9`% zX3w)n!Fe#V$O2S}Hv=o-BVa;VP3FTYu;%O$@aI;P+>%fN6~I#12|#U8a*(+63&6WgVi?tHon5uE+dXI>@{{D-8R~c zwT9V8V&oD{d|))1c@aAh(=c1^Gep;suhS`<@+zT?)S$4x@YHxC!*>vE!K;AISVJRG zRvjh@DT7_v4IoIcG+NBAz{8s;1~!bCoHfIafz?1_tPj49$`tl>l*26N(4z(HQHcnQIjz4A%W%P4MXKCoszpRWWa= zuRm2qf1v98?aDtNsb_wz&vN#mUf01tVwv$CaCl_2sKy>DIdrSOmv>=f_c$Z*t*#rY z-&9LtYb1@48cBns%Sxi_@YI$1{TN+=59t#-=;*A8rKq`1tG`}%hPOm7$CPWV@$FxV zXBkyKCAD6Xg%L((X<^r4iXihkqtv2%kJHl(4g>LF;aj!##59}%>d>>+X$-aE6b9=_ zz0xd3xlT`P&>0U`Z_#d-Nz`iWTTAMnX_T8aPF52uZtBMHSlC2Mt8HsE_>ZCbJf}OTT_y%am#GOEq8aff z*3o(bJLdbce5v~WOnS?H8>vr#=!0?`$V_dGd8pQm%*_VLrA~6hHpAP2GVmAZEiqh& z)}>!hwR8roN7_6|Yflas%r`-I8L21zDET)_LRcCyQBC4R)!$U_3Fky2G3^-bs$Vs- zMvb;j>su+=^yqBXM%`t!ot72LH2rZS)&*so?)0Oh%IqxA82-2ttb&Y**z*i zj4$Kl4}Oezrcxs$i^Ma~=z`?K$(wOnFESNQDPgLTrSOWN@tNnyO^HdgnJ zB$aQp2Vj{8OH%BJpCtJ}bL>>_fnBrPd>viXP-Dfx5=__49q|~F|PSXhCDn8a+sx>=KRk0Fe z?3klpwtLu|Dy_k{dSXdWtk)A+$xWL7_xih1yW&vIl+$LLWj#PWLv?a(sTFWKv}uTRXq;pg==+Z~gKZ?eqV8kz z6cswpNwo&pv_qp9r4g)FB{#m1xD~dqS5MwTnm`2s9*@&koD$9*i1Vt0a>D{EVRs$M2XH)Q$L~*ON>uV*G!zDX>>?e`{Cq2fNqdm~g zubS_#vO2Yz3t2V1cDuBwO*%DB(m^xX`D=9dTFJ6Xy4;}?giVqRUU++r7u;_c4cKI| znA_-SYc+?TG;*TKF=cDtUe=gi*<#p0bel}xXkE)Gy9PaPgLIb+1Lr@$XCt+8i9YpX zd1j-esqlO-06*xz3jKDn=KiflN36m*J@z#=2>G(}iO%pZRHU`)PFTdWG-j~$x z?gmzQl%z@4#MV8~j-RzVCu+S*l1_!@+N#-7M>AaW;~aviLo#Qd@Co266PKL9U5bseg+nvEG;mV`X$Y|M(a9ES8>*a{u5*xM`|8L>EvKZ?HfG}zsFfP zn1`>m1Bn65sT(3o^6RY8R?<)Es#>&qoOs+@Pl7Wh?#DvFN)d$++nOkqoFq1-D19f> zpO(I_k(_ecGvFCeHoP9c+fut2-WHE(R)HvRljg&fhC?PFLY&ngt=w3CCM*Cr`A3Zz zIe-+=7P5szm(VLqHBeXiqnc8?_Xi)qJJ$fSgk!4?=}FXV2hRK)cu( z)}AT{d^2pCVPdJ40sRr{fHl#Ejvexq8AFW+ zd>XY3RQQ>AgskuyNt<&jCE0MSv85FRPwV5l3nvobqXzU0>Zc8|HW@OQMkLILsW+e+ zfb0wP0r0ONe(I>Ht;0`ll2ltXTF^FGW6llmJy|!-!(koC?2XsTfQdn}M452yL{n6~ zU<=@B+jJTn&eVkYpigO1&&as)XCLL z=J2W1526JyGH}=A2YrnkY2desWVsG4pzfPdVw>SHh)>{y%;`CNAbcxvy5T6`%Pc(+ zJH>7yt~Wai`vi_9c1El5F6cAWM_9j3J(ZY(GwEOZI~Z=2&0d;%4&b{ zPKDKy2JxOb)4pE6H7rcGu7UX>P6a&}mKvUgeSqD88Hdv%3ZV9kT28VD)K-I54L`=e z1`Q6qve0Ok|UmRPV;^M_l)x4~YL-4FGtVdYiCmY1~a*69gwu<+kROISfzBBD;>LvSwZ z3onL5v5i0IQ^RG`@keCEVD({!$l1bbqpifnyxjq0Y`ATh4ZINCrm644s*vjgHFoLl zX2xXR&6%KKk`#y^{+JpX6OB_N05=V1O5O{0)12inRrBx}tTwqwqyfk8%er8@$zY;4 zAa^LzoJOj}N z`6jXr)bqh3!l7abu>~-#tOjh;czvF_OppRN3*Mbi;6JfJz0F!6J@kyvz*FLxiRk$r4WU94?IfE<6lhp!BSW(-l74LfScn?5z-s1`x6zz z*^~VP@p8Tjjl-vzEFjjDJOy<~)UzNn-W9>~cm|9lEJcUzME;h&M>U4wO~DB~54}dC zksX-80=d% zP*_(gl-Nt`8N4bHAC`$(lJUnPqG{kl&;!ylr}a6Rg6AgJfG)M@T4ETqiCsZW8k+CP zDzTEBYA`jd%$7)i^@E#Z%%*a~)FPpiyeWYvv%(-jQ+-Cp5({n4PV!q;$!I;R1z*eh z@ijIC1Z?Vl86hVHsPZDaLDfRPRv%0R6M>~fQ}Jok0kikf9cIkxGX}JdN=;%{7-BpX z`xuK!wAG{g5l6!qqgT{F5pA+_`pY|j>OZPEU|J3KW=CS-V34qeuwLLcqDS&l%#Yjz z62&(Vu^<=LpF2|@X!85)PTo^v5Esve$z$zJ#WZ>YB0wYXyfD(Ba^g&wOWyfF#urU6 zK6#AhKx{>p&!8-xj8>9;FsDA?iQp=+8@#o^WJU0tFfQx>a1uBMy-W)iQXS44L>N2X z0=tD}GANRq0{F#bQ}A8j0y09>25^oMT!}nDV(d=C0x~PIU_`;4x{iBbgRm!5_f%_j z;2E*L_-pJeXQn`zL>b@~W`bvjlOa#R{sO@hU&C}`9kDd*Wp)Hr9;T`W@6RkuodOvN zvcpMuH^q6!7&}edd6FMytVSXd0`^s&JB@6@g_#50T?Ajgj4< zZ_Krfh|^x(+WAPCl_R#rc7qzosW4VnhIo{DS8G<_2lD17m%#gqz|y9^5O0QVVrlTl z#Alqp98?~IF4Ww}8KLV`W#D;AK_NX=o5{A zb%_Or=vkatMt*SIT%mvTNjwqKN2+)b=7crJx?}OMtE_}s19~q~k5XM;drpW@QNalg z&XK`2l40o3&*0GEL$Euhb`BjUKSLHB&Z1u9BHpMLD^jKVP*sdO07jg1V{yMxKAn~u-kYrGzsg^ z`7b;xnK3G~sn!}9maEXN={_-Kv}Racx5uVXgpCiQ8%*%f5H(kZ^jDZA?Cm; zFjFimn4Z(LCL3vDMw8#eJHmCEhz1XU1!Ske2%*1NWjr@5CYr*t$uy<34?zM%n^c{! zYFHQ06nqBrVm8Pa42@^NSF;yj>#@PuDNgyoVuAvQec74hoh(Ta)WV#|B!S$>!Q)%k z=~L`5o{p~sL$W(TJ|IDMDbbDL(7=6E1(H+Z8Q?WoIw}f4G31wEzp=vj79;>OVUhMO0OkM}YBx4WOD8#D^5|CBE)R9u@lnW(5(GqXc=v$D4P?uw&UV@UcYp zAVjb%IZ~JrYG$zZP5M1p2F}J)B<|=5! z!bt_94)8nriT?t1BA>zh2_O_gO{mSASxg(0yz+~lKsRcFgkcHyd*dh zX<{R=clb5-D0|K5;Qvlxb!xBkJ~Z|SnRNCMzLZ)_P!Pxhjta(uS}O9vXaun*kpb(< zzF_Y*YCP1Tg28#uLB7negFV1ISbF}YO4V2lRuSCDT-h7ExdYj;bI^S5&-jr&Ctf(8 zMl=LIsS{s|Wq~E2o{6&#{1p2F3x$MOX{3RbHfNW>?bu^102#d@Vm>*~Nd$}lWMYT`&7&y3jok|V!A}uY z!ok8eV^LUrBw=h2egaOAm>(7jh5*)ylLH3LW2Zp=^%^Z~Cb)@+11rY-!3yjW6PdA6 zShfGf44^}>9{V5E0+WLz;CawSb{BY`2nNqMLO+4g0QX^Guv6?8wHe?*G#pC%kxLuB?`M(;1O795We7_zX@FQzL@)13`dM$p~%GOa^N|lPjYx z3tLRx3=uk>1y6#$n(=b_6-#fdAG?>mfaJifpg|&NY>vq^pcCv{FbFlDXcs$!+#=`4 zHs}ddloA<`*CA4e%Qa|)7_~w&p<0;wEn*II02~81V4_PZh440DV0;qH0=zrA1!iU+ zf% 0 + prompt = self.text_to_speech.get_custom_prompt(customization_id, prompt_id).get_result() + assert prompt["prompt_id"] == prompt_id + self.text_to_speech.delete_custom_prompt(customization_id, prompt_id) + + def test_speaker_models(self): + speaker_name = "Angelo" + + with open("resources/tts_audio.wav", "rb") as audio_file: + speaker_id = self.text_to_speech.create_speaker_model( + speaker_name, audio_file + ).get_result()["speaker_id"] + speaker_models = self.text_to_speech.list_speaker_models().get_result() + assert len(speaker_models) > 0 + speaker_model = self.text_to_speech.get_speaker_model(speaker_id).get_result() + self.text_to_speech.delete_speaker_model(speaker_id) + def test_synthesize_using_websocket(self): file = 'tongue_twister.wav' diff --git a/test/unit/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py index 2ef2a470d..986c01e6b 100755 --- a/test/unit/test_speech_to_text_v1.py +++ b/test/unit/test_speech_to_text_v1.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# (C) Copyright IBM Corp. 2015, 2020. +# (C) Copyright IBM Corp. 2015, 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,12 +30,12 @@ from ibm_watson.speech_to_text_v1 import * -service = SpeechToTextV1( +_service = SpeechToTextV1( authenticator=NoAuthAuthenticator() ) -base_url = 'https://api.us-south.speech-to-text.watson.cloud.ibm.com' -service.set_service_url(base_url) +_base_url = 'https://api.us-south.speech-to-text.watson.cloud.ibm.com' +_service.set_service_url(_base_url) ############################################################################## # Start of Service: Models @@ -62,8 +62,8 @@ def test_list_models_all_params(self): list_models() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/models') - mock_response = '{"models": [{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "speaker_labels": true}, "description": "description"}]}' + url = self.preprocess_url(_base_url + '/v1/models') + mock_response = '{"models": [{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}]}' responses.add(responses.GET, url, body=mock_response, @@ -71,7 +71,7 @@ def test_list_models_all_params(self): status=200) # Invoke method - response = service.list_models() + response = _service.list_models() # Check for correct operation @@ -99,8 +99,8 @@ def test_get_model_all_params(self): get_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/models/ar-AR_BroadbandModel') - mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "speaker_labels": true}, "description": "description"}' + url = self.preprocess_url(_base_url + '/v1/models/ar-AR_BroadbandModel') + mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}' responses.add(responses.GET, url, body=mock_response, @@ -111,7 +111,7 @@ def test_get_model_all_params(self): model_id = 'ar-AR_BroadbandModel' # Invoke method - response = service.get_model( + response = _service.get_model( model_id, headers={} ) @@ -127,8 +127,8 @@ def test_get_model_value_error(self): test_get_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/models/ar-AR_BroadbandModel') - mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "speaker_labels": true}, "description": "description"}' + url = self.preprocess_url(_base_url + '/v1/models/ar-AR_BroadbandModel') + mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}' responses.add(responses.GET, url, body=mock_response, @@ -145,7 +145,7 @@ def test_get_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_model(**req_copy) + _service.get_model(**req_copy) @@ -179,7 +179,7 @@ def test_recognize_all_params(self): recognize() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognize') + url = self.preprocess_url(_base_url + '/v1/recognize') mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}' responses.add(responses.POST, url, @@ -213,9 +213,10 @@ def test_recognize_all_params(self): split_transcript_at_phrase_end = True speech_detector_sensitivity = 72.5 background_audio_suppression = 72.5 + low_latency = True # Invoke method - response = service.recognize( + response = _service.recognize( audio, content_type=content_type, model=model, @@ -241,6 +242,7 @@ def test_recognize_all_params(self): split_transcript_at_phrase_end=split_transcript_at_phrase_end, speech_detector_sensitivity=speech_detector_sensitivity, background_audio_suppression=background_audio_suppression, + low_latency=low_latency, headers={} ) @@ -273,6 +275,7 @@ def test_recognize_all_params(self): assert 'split_transcript_at_phrase_end={}'.format('true' if split_transcript_at_phrase_end else 'false') in query_string assert 'speech_detector_sensitivity={}'.format(speech_detector_sensitivity) in query_string assert 'background_audio_suppression={}'.format(background_audio_suppression) in query_string + assert 'low_latency={}'.format('true' if low_latency else 'false') in query_string # Validate body params @@ -282,7 +285,7 @@ def test_recognize_required_params(self): test_recognize_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognize') + url = self.preprocess_url(_base_url + '/v1/recognize') mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}' responses.add(responses.POST, url, @@ -294,7 +297,7 @@ def test_recognize_required_params(self): audio = io.BytesIO(b'This is a mock file.').getvalue() # Invoke method - response = service.recognize( + response = _service.recognize( audio, headers={} ) @@ -311,7 +314,7 @@ def test_recognize_value_error(self): test_recognize_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognize') + url = self.preprocess_url(_base_url + '/v1/recognize') mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}' responses.add(responses.POST, url, @@ -329,7 +332,7 @@ def test_recognize_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.recognize(**req_copy) + _service.recognize(**req_copy) @@ -363,7 +366,7 @@ def test_register_callback_all_params(self): register_callback() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/register_callback') + url = self.preprocess_url(_base_url + '/v1/register_callback') mock_response = '{"status": "created", "url": "url"}' responses.add(responses.POST, url, @@ -376,7 +379,7 @@ def test_register_callback_all_params(self): user_secret = 'testString' # Invoke method - response = service.register_callback( + response = _service.register_callback( callback_url, user_secret=user_secret, headers={} @@ -398,7 +401,7 @@ def test_register_callback_required_params(self): test_register_callback_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/register_callback') + url = self.preprocess_url(_base_url + '/v1/register_callback') mock_response = '{"status": "created", "url": "url"}' responses.add(responses.POST, url, @@ -410,7 +413,7 @@ def test_register_callback_required_params(self): callback_url = 'testString' # Invoke method - response = service.register_callback( + response = _service.register_callback( callback_url, headers={} ) @@ -430,7 +433,7 @@ def test_register_callback_value_error(self): test_register_callback_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/register_callback') + url = self.preprocess_url(_base_url + '/v1/register_callback') mock_response = '{"status": "created", "url": "url"}' responses.add(responses.POST, url, @@ -448,7 +451,7 @@ def test_register_callback_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.register_callback(**req_copy) + _service.register_callback(**req_copy) @@ -472,7 +475,7 @@ def test_unregister_callback_all_params(self): unregister_callback() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/unregister_callback') + url = self.preprocess_url(_base_url + '/v1/unregister_callback') responses.add(responses.POST, url, status=200) @@ -481,7 +484,7 @@ def test_unregister_callback_all_params(self): callback_url = 'testString' # Invoke method - response = service.unregister_callback( + response = _service.unregister_callback( callback_url, headers={} ) @@ -501,7 +504,7 @@ def test_unregister_callback_value_error(self): test_unregister_callback_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/unregister_callback') + url = self.preprocess_url(_base_url + '/v1/unregister_callback') responses.add(responses.POST, url, status=200) @@ -516,7 +519,7 @@ def test_unregister_callback_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.unregister_callback(**req_copy) + _service.unregister_callback(**req_copy) @@ -540,7 +543,7 @@ def test_create_job_all_params(self): create_job() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions') + url = self.preprocess_url(_base_url + '/v1/recognitions') mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}], "warnings": ["warnings"]}' responses.add(responses.POST, url, @@ -580,9 +583,10 @@ def test_create_job_all_params(self): split_transcript_at_phrase_end = True speech_detector_sensitivity = 72.5 background_audio_suppression = 72.5 + low_latency = True # Invoke method - response = service.create_job( + response = _service.create_job( audio, content_type=content_type, model=model, @@ -614,6 +618,7 @@ def test_create_job_all_params(self): split_transcript_at_phrase_end=split_transcript_at_phrase_end, speech_detector_sensitivity=speech_detector_sensitivity, background_audio_suppression=background_audio_suppression, + low_latency=low_latency, headers={} ) @@ -652,6 +657,7 @@ def test_create_job_all_params(self): assert 'split_transcript_at_phrase_end={}'.format('true' if split_transcript_at_phrase_end else 'false') in query_string assert 'speech_detector_sensitivity={}'.format(speech_detector_sensitivity) in query_string assert 'background_audio_suppression={}'.format(background_audio_suppression) in query_string + assert 'low_latency={}'.format('true' if low_latency else 'false') in query_string # Validate body params @@ -661,7 +667,7 @@ def test_create_job_required_params(self): test_create_job_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions') + url = self.preprocess_url(_base_url + '/v1/recognitions') mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}], "warnings": ["warnings"]}' responses.add(responses.POST, url, @@ -673,7 +679,7 @@ def test_create_job_required_params(self): audio = io.BytesIO(b'This is a mock file.').getvalue() # Invoke method - response = service.create_job( + response = _service.create_job( audio, headers={} ) @@ -690,7 +696,7 @@ def test_create_job_value_error(self): test_create_job_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions') + url = self.preprocess_url(_base_url + '/v1/recognitions') mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}], "warnings": ["warnings"]}' responses.add(responses.POST, url, @@ -708,7 +714,7 @@ def test_create_job_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.create_job(**req_copy) + _service.create_job(**req_copy) @@ -732,7 +738,7 @@ def test_check_jobs_all_params(self): check_jobs() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions') + url = self.preprocess_url(_base_url + '/v1/recognitions') mock_response = '{"recognitions": [{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}], "warnings": ["warnings"]}]}' responses.add(responses.GET, url, @@ -741,7 +747,7 @@ def test_check_jobs_all_params(self): status=200) # Invoke method - response = service.check_jobs() + response = _service.check_jobs() # Check for correct operation @@ -769,7 +775,7 @@ def test_check_job_all_params(self): check_job() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions/testString') + url = self.preprocess_url(_base_url + '/v1/recognitions/testString') mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}], "warnings": ["warnings"]}' responses.add(responses.GET, url, @@ -781,7 +787,7 @@ def test_check_job_all_params(self): id = 'testString' # Invoke method - response = service.check_job( + response = _service.check_job( id, headers={} ) @@ -797,7 +803,7 @@ def test_check_job_value_error(self): test_check_job_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions/testString') + url = self.preprocess_url(_base_url + '/v1/recognitions/testString') mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"]}], "warnings": ["warnings"]}' responses.add(responses.GET, url, @@ -815,7 +821,7 @@ def test_check_job_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.check_job(**req_copy) + _service.check_job(**req_copy) @@ -839,7 +845,7 @@ def test_delete_job_all_params(self): delete_job() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions/testString') + url = self.preprocess_url(_base_url + '/v1/recognitions/testString') responses.add(responses.DELETE, url, status=204) @@ -848,7 +854,7 @@ def test_delete_job_all_params(self): id = 'testString' # Invoke method - response = service.delete_job( + response = _service.delete_job( id, headers={} ) @@ -864,7 +870,7 @@ def test_delete_job_value_error(self): test_delete_job_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/recognitions/testString') + url = self.preprocess_url(_base_url + '/v1/recognitions/testString') responses.add(responses.DELETE, url, status=204) @@ -879,7 +885,7 @@ def test_delete_job_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_job(**req_copy) + _service.delete_job(**req_copy) @@ -913,7 +919,7 @@ def test_create_language_model_all_params(self): create_language_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') + url = self.preprocess_url(_base_url + '/v1/customizations') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' responses.add(responses.POST, url, @@ -928,7 +934,7 @@ def test_create_language_model_all_params(self): description = 'testString' # Invoke method - response = service.create_language_model( + response = _service.create_language_model( name, base_model_name, dialect=dialect, @@ -953,7 +959,7 @@ def test_create_language_model_value_error(self): test_create_language_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') + url = self.preprocess_url(_base_url + '/v1/customizations') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' responses.add(responses.POST, url, @@ -975,7 +981,7 @@ def test_create_language_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.create_language_model(**req_copy) + _service.create_language_model(**req_copy) @@ -999,7 +1005,7 @@ def test_list_language_models_all_params(self): list_language_models() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') + url = self.preprocess_url(_base_url + '/v1/customizations') mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}]}' responses.add(responses.GET, url, @@ -1011,7 +1017,7 @@ def test_list_language_models_all_params(self): language = 'ar-AR' # Invoke method - response = service.list_language_models( + response = _service.list_language_models( language=language, headers={} ) @@ -1031,7 +1037,7 @@ def test_list_language_models_required_params(self): test_list_language_models_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') + url = self.preprocess_url(_base_url + '/v1/customizations') mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}]}' responses.add(responses.GET, url, @@ -1040,7 +1046,7 @@ def test_list_language_models_required_params(self): status=200) # Invoke method - response = service.list_language_models() + response = _service.list_language_models() # Check for correct operation @@ -1068,7 +1074,7 @@ def test_get_language_model_all_params(self): get_language_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' responses.add(responses.GET, url, @@ -1080,7 +1086,7 @@ def test_get_language_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.get_language_model( + response = _service.get_language_model( customization_id, headers={} ) @@ -1096,7 +1102,7 @@ def test_get_language_model_value_error(self): test_get_language_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' responses.add(responses.GET, url, @@ -1114,7 +1120,7 @@ def test_get_language_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_language_model(**req_copy) + _service.get_language_model(**req_copy) @@ -1138,7 +1144,7 @@ def test_delete_language_model_all_params(self): delete_language_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') responses.add(responses.DELETE, url, status=200) @@ -1147,7 +1153,7 @@ def test_delete_language_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.delete_language_model( + response = _service.delete_language_model( customization_id, headers={} ) @@ -1163,7 +1169,7 @@ def test_delete_language_model_value_error(self): test_delete_language_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') responses.add(responses.DELETE, url, status=200) @@ -1178,7 +1184,7 @@ def test_delete_language_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_language_model(**req_copy) + _service.delete_language_model(**req_copy) @@ -1202,7 +1208,7 @@ def test_train_language_model_all_params(self): train_language_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/train') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/train') mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' responses.add(responses.POST, url, @@ -1216,7 +1222,7 @@ def test_train_language_model_all_params(self): customization_weight = 72.5 # Invoke method - response = service.train_language_model( + response = _service.train_language_model( customization_id, word_type_to_add=word_type_to_add, customization_weight=customization_weight, @@ -1239,7 +1245,7 @@ def test_train_language_model_required_params(self): test_train_language_model_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/train') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/train') mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' responses.add(responses.POST, url, @@ -1251,7 +1257,7 @@ def test_train_language_model_required_params(self): customization_id = 'testString' # Invoke method - response = service.train_language_model( + response = _service.train_language_model( customization_id, headers={} ) @@ -1267,7 +1273,7 @@ def test_train_language_model_value_error(self): test_train_language_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/train') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/train') mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' responses.add(responses.POST, url, @@ -1285,7 +1291,7 @@ def test_train_language_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.train_language_model(**req_copy) + _service.train_language_model(**req_copy) @@ -1309,7 +1315,7 @@ def test_reset_language_model_all_params(self): reset_language_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/reset') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/reset') responses.add(responses.POST, url, status=200) @@ -1318,7 +1324,7 @@ def test_reset_language_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.reset_language_model( + response = _service.reset_language_model( customization_id, headers={} ) @@ -1334,7 +1340,7 @@ def test_reset_language_model_value_error(self): test_reset_language_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/reset') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/reset') responses.add(responses.POST, url, status=200) @@ -1349,7 +1355,7 @@ def test_reset_language_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.reset_language_model(**req_copy) + _service.reset_language_model(**req_copy) @@ -1373,7 +1379,7 @@ def test_upgrade_language_model_all_params(self): upgrade_language_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/upgrade_model') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/upgrade_model') responses.add(responses.POST, url, status=200) @@ -1382,7 +1388,7 @@ def test_upgrade_language_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.upgrade_language_model( + response = _service.upgrade_language_model( customization_id, headers={} ) @@ -1398,7 +1404,7 @@ def test_upgrade_language_model_value_error(self): test_upgrade_language_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/upgrade_model') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/upgrade_model') responses.add(responses.POST, url, status=200) @@ -1413,7 +1419,7 @@ def test_upgrade_language_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.upgrade_language_model(**req_copy) + _service.upgrade_language_model(**req_copy) @@ -1447,7 +1453,7 @@ def test_list_corpora_all_params(self): list_corpora() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora') mock_response = '{"corpora": [{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' responses.add(responses.GET, url, @@ -1459,7 +1465,7 @@ def test_list_corpora_all_params(self): customization_id = 'testString' # Invoke method - response = service.list_corpora( + response = _service.list_corpora( customization_id, headers={} ) @@ -1475,7 +1481,7 @@ def test_list_corpora_value_error(self): test_list_corpora_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora') mock_response = '{"corpora": [{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' responses.add(responses.GET, url, @@ -1493,7 +1499,7 @@ def test_list_corpora_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.list_corpora(**req_copy) + _service.list_corpora(**req_copy) @@ -1517,7 +1523,7 @@ def test_add_corpus_all_params(self): add_corpus() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') responses.add(responses.POST, url, status=201) @@ -1529,7 +1535,7 @@ def test_add_corpus_all_params(self): allow_overwrite = True # Invoke method - response = service.add_corpus( + response = _service.add_corpus( customization_id, corpus_name, corpus_file, @@ -1552,7 +1558,7 @@ def test_add_corpus_required_params(self): test_add_corpus_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') responses.add(responses.POST, url, status=201) @@ -1563,7 +1569,7 @@ def test_add_corpus_required_params(self): corpus_file = io.BytesIO(b'This is a mock file.').getvalue() # Invoke method - response = service.add_corpus( + response = _service.add_corpus( customization_id, corpus_name, corpus_file, @@ -1581,7 +1587,7 @@ def test_add_corpus_value_error(self): test_add_corpus_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') responses.add(responses.POST, url, status=201) @@ -1600,7 +1606,7 @@ def test_add_corpus_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_corpus(**req_copy) + _service.add_corpus(**req_copy) @@ -1624,7 +1630,7 @@ def test_get_corpus_all_params(self): get_corpus() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') mock_response = '{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' responses.add(responses.GET, url, @@ -1637,7 +1643,7 @@ def test_get_corpus_all_params(self): corpus_name = 'testString' # Invoke method - response = service.get_corpus( + response = _service.get_corpus( customization_id, corpus_name, headers={} @@ -1654,7 +1660,7 @@ def test_get_corpus_value_error(self): test_get_corpus_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') mock_response = '{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' responses.add(responses.GET, url, @@ -1674,7 +1680,7 @@ def test_get_corpus_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_corpus(**req_copy) + _service.get_corpus(**req_copy) @@ -1698,7 +1704,7 @@ def test_delete_corpus_all_params(self): delete_corpus() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') responses.add(responses.DELETE, url, status=200) @@ -1708,7 +1714,7 @@ def test_delete_corpus_all_params(self): corpus_name = 'testString' # Invoke method - response = service.delete_corpus( + response = _service.delete_corpus( customization_id, corpus_name, headers={} @@ -1725,7 +1731,7 @@ def test_delete_corpus_value_error(self): test_delete_corpus_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/corpora/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/corpora/testString') responses.add(responses.DELETE, url, status=200) @@ -1742,7 +1748,7 @@ def test_delete_corpus_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_corpus(**req_copy) + _service.delete_corpus(**req_copy) @@ -1776,7 +1782,7 @@ def test_list_words_all_params(self): list_words() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') mock_response = '{"words": [{"word": "word", "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}' responses.add(responses.GET, url, @@ -1790,7 +1796,7 @@ def test_list_words_all_params(self): sort = 'alphabetical' # Invoke method - response = service.list_words( + response = _service.list_words( customization_id, word_type=word_type, sort=sort, @@ -1813,7 +1819,7 @@ def test_list_words_required_params(self): test_list_words_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') mock_response = '{"words": [{"word": "word", "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}' responses.add(responses.GET, url, @@ -1825,7 +1831,7 @@ def test_list_words_required_params(self): customization_id = 'testString' # Invoke method - response = service.list_words( + response = _service.list_words( customization_id, headers={} ) @@ -1841,7 +1847,7 @@ def test_list_words_value_error(self): test_list_words_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') mock_response = '{"words": [{"word": "word", "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}' responses.add(responses.GET, url, @@ -1859,7 +1865,7 @@ def test_list_words_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.list_words(**req_copy) + _service.list_words(**req_copy) @@ -1883,7 +1889,7 @@ def test_add_words_all_params(self): add_words() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') responses.add(responses.POST, url, status=201) @@ -1899,7 +1905,7 @@ def test_add_words_all_params(self): words = [custom_word_model] # Invoke method - response = service.add_words( + response = _service.add_words( customization_id, words, headers={} @@ -1919,7 +1925,7 @@ def test_add_words_value_error(self): test_add_words_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') responses.add(responses.POST, url, status=201) @@ -1942,7 +1948,7 @@ def test_add_words_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_words(**req_copy) + _service.add_words(**req_copy) @@ -1966,7 +1972,7 @@ def test_add_word_all_params(self): add_word() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.PUT, url, status=201) @@ -1979,7 +1985,7 @@ def test_add_word_all_params(self): display_as = 'testString' # Invoke method - response = service.add_word( + response = _service.add_word( customization_id, word_name, word=word, @@ -2004,7 +2010,7 @@ def test_add_word_value_error(self): test_add_word_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.PUT, url, status=201) @@ -2024,7 +2030,7 @@ def test_add_word_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_word(**req_copy) + _service.add_word(**req_copy) @@ -2048,7 +2054,7 @@ def test_get_word_all_params(self): get_word() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') mock_response = '{"word": "word", "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}' responses.add(responses.GET, url, @@ -2061,7 +2067,7 @@ def test_get_word_all_params(self): word_name = 'testString' # Invoke method - response = service.get_word( + response = _service.get_word( customization_id, word_name, headers={} @@ -2078,7 +2084,7 @@ def test_get_word_value_error(self): test_get_word_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') mock_response = '{"word": "word", "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}' responses.add(responses.GET, url, @@ -2098,7 +2104,7 @@ def test_get_word_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_word(**req_copy) + _service.get_word(**req_copy) @@ -2122,7 +2128,7 @@ def test_delete_word_all_params(self): delete_word() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.DELETE, url, status=200) @@ -2132,7 +2138,7 @@ def test_delete_word_all_params(self): word_name = 'testString' # Invoke method - response = service.delete_word( + response = _service.delete_word( customization_id, word_name, headers={} @@ -2149,7 +2155,7 @@ def test_delete_word_value_error(self): test_delete_word_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.DELETE, url, status=200) @@ -2166,7 +2172,7 @@ def test_delete_word_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_word(**req_copy) + _service.delete_word(**req_copy) @@ -2200,7 +2206,7 @@ def test_list_grammars_all_params(self): list_grammars() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars') mock_response = '{"grammars": [{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' responses.add(responses.GET, url, @@ -2212,7 +2218,7 @@ def test_list_grammars_all_params(self): customization_id = 'testString' # Invoke method - response = service.list_grammars( + response = _service.list_grammars( customization_id, headers={} ) @@ -2228,7 +2234,7 @@ def test_list_grammars_value_error(self): test_list_grammars_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars') mock_response = '{"grammars": [{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' responses.add(responses.GET, url, @@ -2246,7 +2252,7 @@ def test_list_grammars_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.list_grammars(**req_copy) + _service.list_grammars(**req_copy) @@ -2270,7 +2276,7 @@ def test_add_grammar_all_params(self): add_grammar() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') responses.add(responses.POST, url, status=201) @@ -2283,7 +2289,7 @@ def test_add_grammar_all_params(self): allow_overwrite = True # Invoke method - response = service.add_grammar( + response = _service.add_grammar( customization_id, grammar_name, grammar_file, @@ -2308,7 +2314,7 @@ def test_add_grammar_required_params(self): test_add_grammar_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') responses.add(responses.POST, url, status=201) @@ -2320,7 +2326,7 @@ def test_add_grammar_required_params(self): content_type = 'application/srgs' # Invoke method - response = service.add_grammar( + response = _service.add_grammar( customization_id, grammar_name, grammar_file, @@ -2340,7 +2346,7 @@ def test_add_grammar_value_error(self): test_add_grammar_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') responses.add(responses.POST, url, status=201) @@ -2361,7 +2367,7 @@ def test_add_grammar_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_grammar(**req_copy) + _service.add_grammar(**req_copy) @@ -2385,7 +2391,7 @@ def test_get_grammar_all_params(self): get_grammar() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') mock_response = '{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' responses.add(responses.GET, url, @@ -2398,7 +2404,7 @@ def test_get_grammar_all_params(self): grammar_name = 'testString' # Invoke method - response = service.get_grammar( + response = _service.get_grammar( customization_id, grammar_name, headers={} @@ -2415,7 +2421,7 @@ def test_get_grammar_value_error(self): test_get_grammar_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') mock_response = '{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' responses.add(responses.GET, url, @@ -2435,7 +2441,7 @@ def test_get_grammar_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_grammar(**req_copy) + _service.get_grammar(**req_copy) @@ -2459,7 +2465,7 @@ def test_delete_grammar_all_params(self): delete_grammar() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') responses.add(responses.DELETE, url, status=200) @@ -2469,7 +2475,7 @@ def test_delete_grammar_all_params(self): grammar_name = 'testString' # Invoke method - response = service.delete_grammar( + response = _service.delete_grammar( customization_id, grammar_name, headers={} @@ -2486,7 +2492,7 @@ def test_delete_grammar_value_error(self): test_delete_grammar_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/grammars/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/grammars/testString') responses.add(responses.DELETE, url, status=200) @@ -2503,7 +2509,7 @@ def test_delete_grammar_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_grammar(**req_copy) + _service.delete_grammar(**req_copy) @@ -2537,7 +2543,7 @@ def test_create_acoustic_model_all_params(self): create_acoustic_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' responses.add(responses.POST, url, @@ -2551,7 +2557,7 @@ def test_create_acoustic_model_all_params(self): description = 'testString' # Invoke method - response = service.create_acoustic_model( + response = _service.create_acoustic_model( name, base_model_name, description=description, @@ -2574,7 +2580,7 @@ def test_create_acoustic_model_value_error(self): test_create_acoustic_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' responses.add(responses.POST, url, @@ -2595,7 +2601,7 @@ def test_create_acoustic_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.create_acoustic_model(**req_copy) + _service.create_acoustic_model(**req_copy) @@ -2619,7 +2625,7 @@ def test_list_acoustic_models_all_params(self): list_acoustic_models() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations') mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}]}' responses.add(responses.GET, url, @@ -2631,7 +2637,7 @@ def test_list_acoustic_models_all_params(self): language = 'ar-AR' # Invoke method - response = service.list_acoustic_models( + response = _service.list_acoustic_models( language=language, headers={} ) @@ -2651,7 +2657,7 @@ def test_list_acoustic_models_required_params(self): test_list_acoustic_models_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations') mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}]}' responses.add(responses.GET, url, @@ -2660,7 +2666,7 @@ def test_list_acoustic_models_required_params(self): status=200) # Invoke method - response = service.list_acoustic_models() + response = _service.list_acoustic_models() # Check for correct operation @@ -2688,7 +2694,7 @@ def test_get_acoustic_model_all_params(self): get_acoustic_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' responses.add(responses.GET, url, @@ -2700,7 +2706,7 @@ def test_get_acoustic_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.get_acoustic_model( + response = _service.get_acoustic_model( customization_id, headers={} ) @@ -2716,7 +2722,7 @@ def test_get_acoustic_model_value_error(self): test_get_acoustic_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString') mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' responses.add(responses.GET, url, @@ -2734,7 +2740,7 @@ def test_get_acoustic_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_acoustic_model(**req_copy) + _service.get_acoustic_model(**req_copy) @@ -2758,7 +2764,7 @@ def test_delete_acoustic_model_all_params(self): delete_acoustic_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString') responses.add(responses.DELETE, url, status=200) @@ -2767,7 +2773,7 @@ def test_delete_acoustic_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.delete_acoustic_model( + response = _service.delete_acoustic_model( customization_id, headers={} ) @@ -2783,7 +2789,7 @@ def test_delete_acoustic_model_value_error(self): test_delete_acoustic_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString') responses.add(responses.DELETE, url, status=200) @@ -2798,7 +2804,7 @@ def test_delete_acoustic_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_acoustic_model(**req_copy) + _service.delete_acoustic_model(**req_copy) @@ -2822,7 +2828,7 @@ def test_train_acoustic_model_all_params(self): train_acoustic_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/train') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/train') mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' responses.add(responses.POST, url, @@ -2835,7 +2841,7 @@ def test_train_acoustic_model_all_params(self): custom_language_model_id = 'testString' # Invoke method - response = service.train_acoustic_model( + response = _service.train_acoustic_model( customization_id, custom_language_model_id=custom_language_model_id, headers={} @@ -2856,7 +2862,7 @@ def test_train_acoustic_model_required_params(self): test_train_acoustic_model_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/train') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/train') mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' responses.add(responses.POST, url, @@ -2868,7 +2874,7 @@ def test_train_acoustic_model_required_params(self): customization_id = 'testString' # Invoke method - response = service.train_acoustic_model( + response = _service.train_acoustic_model( customization_id, headers={} ) @@ -2884,7 +2890,7 @@ def test_train_acoustic_model_value_error(self): test_train_acoustic_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/train') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/train') mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' responses.add(responses.POST, url, @@ -2902,7 +2908,7 @@ def test_train_acoustic_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.train_acoustic_model(**req_copy) + _service.train_acoustic_model(**req_copy) @@ -2926,7 +2932,7 @@ def test_reset_acoustic_model_all_params(self): reset_acoustic_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/reset') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/reset') responses.add(responses.POST, url, status=200) @@ -2935,7 +2941,7 @@ def test_reset_acoustic_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.reset_acoustic_model( + response = _service.reset_acoustic_model( customization_id, headers={} ) @@ -2951,7 +2957,7 @@ def test_reset_acoustic_model_value_error(self): test_reset_acoustic_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/reset') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/reset') responses.add(responses.POST, url, status=200) @@ -2966,7 +2972,7 @@ def test_reset_acoustic_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.reset_acoustic_model(**req_copy) + _service.reset_acoustic_model(**req_copy) @@ -2990,7 +2996,7 @@ def test_upgrade_acoustic_model_all_params(self): upgrade_acoustic_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/upgrade_model') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/upgrade_model') responses.add(responses.POST, url, status=200) @@ -3001,7 +3007,7 @@ def test_upgrade_acoustic_model_all_params(self): force = True # Invoke method - response = service.upgrade_acoustic_model( + response = _service.upgrade_acoustic_model( customization_id, custom_language_model_id=custom_language_model_id, force=force, @@ -3024,7 +3030,7 @@ def test_upgrade_acoustic_model_required_params(self): test_upgrade_acoustic_model_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/upgrade_model') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/upgrade_model') responses.add(responses.POST, url, status=200) @@ -3033,7 +3039,7 @@ def test_upgrade_acoustic_model_required_params(self): customization_id = 'testString' # Invoke method - response = service.upgrade_acoustic_model( + response = _service.upgrade_acoustic_model( customization_id, headers={} ) @@ -3049,7 +3055,7 @@ def test_upgrade_acoustic_model_value_error(self): test_upgrade_acoustic_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/upgrade_model') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/upgrade_model') responses.add(responses.POST, url, status=200) @@ -3064,7 +3070,7 @@ def test_upgrade_acoustic_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.upgrade_acoustic_model(**req_copy) + _service.upgrade_acoustic_model(**req_copy) @@ -3098,7 +3104,7 @@ def test_list_audio_all_params(self): list_audio() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio') mock_response = '{"total_minutes_of_audio": 22, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' responses.add(responses.GET, url, @@ -3110,7 +3116,7 @@ def test_list_audio_all_params(self): customization_id = 'testString' # Invoke method - response = service.list_audio( + response = _service.list_audio( customization_id, headers={} ) @@ -3126,7 +3132,7 @@ def test_list_audio_value_error(self): test_list_audio_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio') mock_response = '{"total_minutes_of_audio": 22, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' responses.add(responses.GET, url, @@ -3144,7 +3150,7 @@ def test_list_audio_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.list_audio(**req_copy) + _service.list_audio(**req_copy) @@ -3168,7 +3174,7 @@ def test_add_audio_all_params(self): add_audio() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') responses.add(responses.POST, url, status=201) @@ -3182,7 +3188,7 @@ def test_add_audio_all_params(self): allow_overwrite = True # Invoke method - response = service.add_audio( + response = _service.add_audio( customization_id, audio_name, audio_resource, @@ -3208,7 +3214,7 @@ def test_add_audio_required_params(self): test_add_audio_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') responses.add(responses.POST, url, status=201) @@ -3219,7 +3225,7 @@ def test_add_audio_required_params(self): audio_resource = io.BytesIO(b'This is a mock file.').getvalue() # Invoke method - response = service.add_audio( + response = _service.add_audio( customization_id, audio_name, audio_resource, @@ -3238,7 +3244,7 @@ def test_add_audio_value_error(self): test_add_audio_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') responses.add(responses.POST, url, status=201) @@ -3257,7 +3263,7 @@ def test_add_audio_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_audio(**req_copy) + _service.add_audio(**req_copy) @@ -3281,7 +3287,7 @@ def test_get_audio_all_params(self): get_audio() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') mock_response = '{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok", "container": {"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' responses.add(responses.GET, url, @@ -3294,7 +3300,7 @@ def test_get_audio_all_params(self): audio_name = 'testString' # Invoke method - response = service.get_audio( + response = _service.get_audio( customization_id, audio_name, headers={} @@ -3311,7 +3317,7 @@ def test_get_audio_value_error(self): test_get_audio_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') mock_response = '{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok", "container": {"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' responses.add(responses.GET, url, @@ -3331,7 +3337,7 @@ def test_get_audio_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_audio(**req_copy) + _service.get_audio(**req_copy) @@ -3355,7 +3361,7 @@ def test_delete_audio_all_params(self): delete_audio() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') responses.add(responses.DELETE, url, status=200) @@ -3365,7 +3371,7 @@ def test_delete_audio_all_params(self): audio_name = 'testString' # Invoke method - response = service.delete_audio( + response = _service.delete_audio( customization_id, audio_name, headers={} @@ -3382,7 +3388,7 @@ def test_delete_audio_value_error(self): test_delete_audio_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/acoustic_customizations/testString/audio/testString') + url = self.preprocess_url(_base_url + '/v1/acoustic_customizations/testString/audio/testString') responses.add(responses.DELETE, url, status=200) @@ -3399,7 +3405,7 @@ def test_delete_audio_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_audio(**req_copy) + _service.delete_audio(**req_copy) @@ -3433,7 +3439,7 @@ def test_delete_user_data_all_params(self): delete_user_data() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/user_data') + url = self.preprocess_url(_base_url + '/v1/user_data') responses.add(responses.DELETE, url, status=200) @@ -3442,7 +3448,7 @@ def test_delete_user_data_all_params(self): customer_id = 'testString' # Invoke method - response = service.delete_user_data( + response = _service.delete_user_data( customer_id, headers={} ) @@ -3462,7 +3468,7 @@ def test_delete_user_data_value_error(self): test_delete_user_data_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/user_data') + url = self.preprocess_url(_base_url + '/v1/user_data') responses.add(responses.DELETE, url, status=200) @@ -3477,7 +3483,7 @@ def test_delete_user_data_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_user_data(**req_copy) + _service.delete_user_data(**req_copy) @@ -4531,6 +4537,7 @@ def test_speech_model_serialization(self): supported_features_model = {} # SupportedFeatures supported_features_model['custom_language_model'] = True supported_features_model['speaker_labels'] = True + supported_features_model['low_latency'] = True # Construct a json representation of a SpeechModel model speech_model_model_json = {} @@ -4571,6 +4578,7 @@ def test_speech_models_serialization(self): supported_features_model = {} # SupportedFeatures supported_features_model['custom_language_model'] = True supported_features_model['speaker_labels'] = True + supported_features_model['low_latency'] = True speech_model_model = {} # SpeechModel speech_model_model['name'] = 'testString' @@ -4803,6 +4811,7 @@ def test_supported_features_serialization(self): supported_features_model_json = {} supported_features_model_json['custom_language_model'] = True supported_features_model_json['speaker_labels'] = True + supported_features_model_json['low_latency'] = True # Construct a model instance of SupportedFeatures by calling from_dict on the json representation supported_features_model = SupportedFeatures.from_dict(supported_features_model_json) diff --git a/test/unit/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py index ddb5a74d0..4af54f88f 100644 --- a/test/unit/test_text_to_speech_v1.py +++ b/test/unit/test_text_to_speech_v1.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# (C) Copyright IBM Corp. 2015, 2020. +# (C) Copyright IBM Corp. 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,21 +19,23 @@ from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect +import io import json import pytest import re import requests import responses +import tempfile import urllib from ibm_watson.text_to_speech_v1 import * -service = TextToSpeechV1( +_service = TextToSpeechV1( authenticator=NoAuthAuthenticator() ) -base_url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com' -service.set_service_url(base_url) +_base_url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com' +_service.set_service_url(_base_url) ############################################################################## # Start of Service: Voices @@ -60,8 +62,8 @@ def test_list_voices_all_params(self): list_voices() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/voices') - mock_response = '{"voices": [{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}}]}' + url = self.preprocess_url(_base_url + '/v1/voices') + mock_response = '{"voices": [{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}]}' responses.add(responses.GET, url, body=mock_response, @@ -69,7 +71,7 @@ def test_list_voices_all_params(self): status=200) # Invoke method - response = service.list_voices() + response = _service.list_voices() # Check for correct operation @@ -97,8 +99,8 @@ def test_get_voice_all_params(self): get_voice() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/voices/ar-AR_OmarVoice') - mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}}' + url = self.preprocess_url(_base_url + '/v1/voices/ar-AR_OmarVoice') + mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}' responses.add(responses.GET, url, body=mock_response, @@ -110,7 +112,7 @@ def test_get_voice_all_params(self): customization_id = 'testString' # Invoke method - response = service.get_voice( + response = _service.get_voice( voice, customization_id=customization_id, headers={} @@ -131,8 +133,8 @@ def test_get_voice_required_params(self): test_get_voice_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/voices/ar-AR_OmarVoice') - mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}}' + url = self.preprocess_url(_base_url + '/v1/voices/ar-AR_OmarVoice') + mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}' responses.add(responses.GET, url, body=mock_response, @@ -143,7 +145,7 @@ def test_get_voice_required_params(self): voice = 'ar-AR_OmarVoice' # Invoke method - response = service.get_voice( + response = _service.get_voice( voice, headers={} ) @@ -159,8 +161,8 @@ def test_get_voice_value_error(self): test_get_voice_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/voices/ar-AR_OmarVoice') - mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}}' + url = self.preprocess_url(_base_url + '/v1/voices/ar-AR_OmarVoice') + mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}' responses.add(responses.GET, url, body=mock_response, @@ -177,7 +179,7 @@ def test_get_voice_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_voice(**req_copy) + _service.get_voice(**req_copy) @@ -211,7 +213,7 @@ def test_synthesize_all_params(self): synthesize() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/synthesize') + url = self.preprocess_url(_base_url + '/v1/synthesize') mock_response = 'This is a mock binary response.' responses.add(responses.POST, url, @@ -226,7 +228,7 @@ def test_synthesize_all_params(self): customization_id = 'testString' # Invoke method - response = service.synthesize( + response = _service.synthesize( text, accept=accept, voice=voice, @@ -253,7 +255,7 @@ def test_synthesize_required_params(self): test_synthesize_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/synthesize') + url = self.preprocess_url(_base_url + '/v1/synthesize') mock_response = 'This is a mock binary response.' responses.add(responses.POST, url, @@ -265,7 +267,7 @@ def test_synthesize_required_params(self): text = 'testString' # Invoke method - response = service.synthesize( + response = _service.synthesize( text, headers={} ) @@ -284,7 +286,7 @@ def test_synthesize_value_error(self): test_synthesize_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/synthesize') + url = self.preprocess_url(_base_url + '/v1/synthesize') mock_response = 'This is a mock binary response.' responses.add(responses.POST, url, @@ -302,7 +304,7 @@ def test_synthesize_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.synthesize(**req_copy) + _service.synthesize(**req_copy) @@ -336,7 +338,7 @@ def test_get_pronunciation_all_params(self): get_pronunciation() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/pronunciation') + url = self.preprocess_url(_base_url + '/v1/pronunciation') mock_response = '{"pronunciation": "pronunciation"}' responses.add(responses.GET, url, @@ -351,7 +353,7 @@ def test_get_pronunciation_all_params(self): customization_id = 'testString' # Invoke method - response = service.get_pronunciation( + response = _service.get_pronunciation( text, voice=voice, format=format, @@ -377,7 +379,7 @@ def test_get_pronunciation_required_params(self): test_get_pronunciation_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/pronunciation') + url = self.preprocess_url(_base_url + '/v1/pronunciation') mock_response = '{"pronunciation": "pronunciation"}' responses.add(responses.GET, url, @@ -389,7 +391,7 @@ def test_get_pronunciation_required_params(self): text = 'testString' # Invoke method - response = service.get_pronunciation( + response = _service.get_pronunciation( text, headers={} ) @@ -409,7 +411,7 @@ def test_get_pronunciation_value_error(self): test_get_pronunciation_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/pronunciation') + url = self.preprocess_url(_base_url + '/v1/pronunciation') mock_response = '{"pronunciation": "pronunciation"}' responses.add(responses.GET, url, @@ -427,7 +429,7 @@ def test_get_pronunciation_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_pronunciation(**req_copy) + _service.get_pronunciation(**req_copy) @@ -461,8 +463,8 @@ def test_create_custom_model_all_params(self): create_custom_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') - mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' + url = self.preprocess_url(_base_url + '/v1/customizations') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' responses.add(responses.POST, url, body=mock_response, @@ -471,11 +473,11 @@ def test_create_custom_model_all_params(self): # Set up parameter values name = 'testString' - language = 'de-DE' + language = 'ar-MS' description = 'testString' # Invoke method - response = service.create_custom_model( + response = _service.create_custom_model( name, language=language, description=description, @@ -488,7 +490,7 @@ def test_create_custom_model_all_params(self): # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['name'] == 'testString' - assert req_body['language'] == 'de-DE' + assert req_body['language'] == 'ar-MS' assert req_body['description'] == 'testString' @@ -498,8 +500,8 @@ def test_create_custom_model_value_error(self): test_create_custom_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') - mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' + url = self.preprocess_url(_base_url + '/v1/customizations') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' responses.add(responses.POST, url, body=mock_response, @@ -508,7 +510,7 @@ def test_create_custom_model_value_error(self): # Set up parameter values name = 'testString' - language = 'de-DE' + language = 'ar-MS' description = 'testString' # Pass in all but one required param and check for a ValueError @@ -518,7 +520,7 @@ def test_create_custom_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.create_custom_model(**req_copy) + _service.create_custom_model(**req_copy) @@ -542,8 +544,8 @@ def test_list_custom_models_all_params(self): list_custom_models() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') - mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}]}' + url = self.preprocess_url(_base_url + '/v1/customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}]}' responses.add(responses.GET, url, body=mock_response, @@ -551,10 +553,10 @@ def test_list_custom_models_all_params(self): status=200) # Set up parameter values - language = 'de-DE' + language = 'ar-MS' # Invoke method - response = service.list_custom_models( + response = _service.list_custom_models( language=language, headers={} ) @@ -574,8 +576,8 @@ def test_list_custom_models_required_params(self): test_list_custom_models_required_params() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations') - mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}]}' + url = self.preprocess_url(_base_url + '/v1/customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}]}' responses.add(responses.GET, url, body=mock_response, @@ -583,7 +585,7 @@ def test_list_custom_models_required_params(self): status=200) # Invoke method - response = service.list_custom_models() + response = _service.list_custom_models() # Check for correct operation @@ -611,7 +613,7 @@ def test_update_custom_model_all_params(self): update_custom_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') responses.add(responses.POST, url, status=200) @@ -629,7 +631,7 @@ def test_update_custom_model_all_params(self): words = [word_model] # Invoke method - response = service.update_custom_model( + response = _service.update_custom_model( customization_id, name=name, description=description, @@ -653,7 +655,7 @@ def test_update_custom_model_value_error(self): test_update_custom_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') responses.add(responses.POST, url, status=200) @@ -677,7 +679,7 @@ def test_update_custom_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.update_custom_model(**req_copy) + _service.update_custom_model(**req_copy) @@ -701,8 +703,8 @@ def test_get_custom_model_all_params(self): get_custom_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') - mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' + url = self.preprocess_url(_base_url + '/v1/customizations/testString') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' responses.add(responses.GET, url, body=mock_response, @@ -713,7 +715,7 @@ def test_get_custom_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.get_custom_model( + response = _service.get_custom_model( customization_id, headers={} ) @@ -729,8 +731,8 @@ def test_get_custom_model_value_error(self): test_get_custom_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') - mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' + url = self.preprocess_url(_base_url + '/v1/customizations/testString') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' responses.add(responses.GET, url, body=mock_response, @@ -747,7 +749,7 @@ def test_get_custom_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_custom_model(**req_copy) + _service.get_custom_model(**req_copy) @@ -771,7 +773,7 @@ def test_delete_custom_model_all_params(self): delete_custom_model() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') responses.add(responses.DELETE, url, status=204) @@ -780,7 +782,7 @@ def test_delete_custom_model_all_params(self): customization_id = 'testString' # Invoke method - response = service.delete_custom_model( + response = _service.delete_custom_model( customization_id, headers={} ) @@ -796,7 +798,7 @@ def test_delete_custom_model_value_error(self): test_delete_custom_model_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString') responses.add(responses.DELETE, url, status=204) @@ -811,7 +813,7 @@ def test_delete_custom_model_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_custom_model(**req_copy) + _service.delete_custom_model(**req_copy) @@ -845,7 +847,7 @@ def test_add_words_all_params(self): add_words() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') responses.add(responses.POST, url, status=200) @@ -861,7 +863,7 @@ def test_add_words_all_params(self): words = [word_model] # Invoke method - response = service.add_words( + response = _service.add_words( customization_id, words, headers={} @@ -881,7 +883,7 @@ def test_add_words_value_error(self): test_add_words_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') responses.add(responses.POST, url, status=200) @@ -904,7 +906,7 @@ def test_add_words_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_words(**req_copy) + _service.add_words(**req_copy) @@ -928,7 +930,7 @@ def test_list_words_all_params(self): list_words() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') mock_response = '{"words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' responses.add(responses.GET, url, @@ -940,7 +942,7 @@ def test_list_words_all_params(self): customization_id = 'testString' # Invoke method - response = service.list_words( + response = _service.list_words( customization_id, headers={} ) @@ -956,7 +958,7 @@ def test_list_words_value_error(self): test_list_words_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words') mock_response = '{"words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' responses.add(responses.GET, url, @@ -974,7 +976,7 @@ def test_list_words_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.list_words(**req_copy) + _service.list_words(**req_copy) @@ -998,7 +1000,7 @@ def test_add_word_all_params(self): add_word() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.PUT, url, status=200) @@ -1010,7 +1012,7 @@ def test_add_word_all_params(self): part_of_speech = 'Dosi' # Invoke method - response = service.add_word( + response = _service.add_word( customization_id, word, translation, @@ -1033,7 +1035,7 @@ def test_add_word_value_error(self): test_add_word_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.PUT, url, status=200) @@ -1053,7 +1055,7 @@ def test_add_word_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.add_word(**req_copy) + _service.add_word(**req_copy) @@ -1077,7 +1079,7 @@ def test_get_word_all_params(self): get_word() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') mock_response = '{"translation": "translation", "part_of_speech": "Dosi"}' responses.add(responses.GET, url, @@ -1090,7 +1092,7 @@ def test_get_word_all_params(self): word = 'testString' # Invoke method - response = service.get_word( + response = _service.get_word( customization_id, word, headers={} @@ -1107,7 +1109,7 @@ def test_get_word_value_error(self): test_get_word_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') mock_response = '{"translation": "translation", "part_of_speech": "Dosi"}' responses.add(responses.GET, url, @@ -1127,7 +1129,7 @@ def test_get_word_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.get_word(**req_copy) + _service.get_word(**req_copy) @@ -1151,7 +1153,7 @@ def test_delete_word_all_params(self): delete_word() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.DELETE, url, status=204) @@ -1161,7 +1163,7 @@ def test_delete_word_all_params(self): word = 'testString' # Invoke method - response = service.delete_word( + response = _service.delete_word( customization_id, word, headers={} @@ -1178,7 +1180,7 @@ def test_delete_word_value_error(self): test_delete_word_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/customizations/testString/words/testString') + url = self.preprocess_url(_base_url + '/v1/customizations/testString/words/testString') responses.add(responses.DELETE, url, status=204) @@ -1195,7 +1197,7 @@ def test_delete_word_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_word(**req_copy) + _service.delete_word(**req_copy) @@ -1204,6 +1206,625 @@ def test_delete_word_value_error(self): # End of Service: CustomWords ############################################################################## +############################################################################## +# Start of Service: CustomPrompts +############################################################################## +# region + +class TestListCustomPrompts(): + """ + Test Class for list_custom_prompts + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_list_custom_prompts_all_params(self): + """ + list_custom_prompts() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts') + mock_response = '{"prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_custom_prompts( + customization_id, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + + @responses.activate + def test_list_custom_prompts_value_error(self): + """ + test_list_custom_prompts_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts') + mock_response = '{"prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_custom_prompts(**req_copy) + + + +class TestAddCustomPrompt(): + """ + Test Class for add_custom_prompt + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_add_custom_prompt_all_params(self): + """ + add_custom_prompt() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add(responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201) + + # Construct a dict representation of a PromptMetadata model + prompt_metadata_model = {} + prompt_metadata_model['prompt_text'] = 'testString' + prompt_metadata_model['speaker_id'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + metadata = prompt_metadata_model + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + + # Invoke method + response = _service.add_custom_prompt( + customization_id, + prompt_id, + metadata, + file, + filename=filename, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + + @responses.activate + def test_add_custom_prompt_required_params(self): + """ + test_add_custom_prompt_required_params() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add(responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201) + + # Construct a dict representation of a PromptMetadata model + prompt_metadata_model = {} + prompt_metadata_model['prompt_text'] = 'testString' + prompt_metadata_model['speaker_id'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + metadata = prompt_metadata_model + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + + # Invoke method + response = _service.add_custom_prompt( + customization_id, + prompt_id, + metadata, + file, + filename=filename, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + + @responses.activate + def test_add_custom_prompt_value_error(self): + """ + test_add_custom_prompt_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add(responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201) + + # Construct a dict representation of a PromptMetadata model + prompt_metadata_model = {} + prompt_metadata_model['prompt_text'] = 'testString' + prompt_metadata_model['speaker_id'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + metadata = prompt_metadata_model + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "prompt_id": prompt_id, + "metadata": metadata, + "file": file, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_custom_prompt(**req_copy) + + + +class TestGetCustomPrompt(): + """ + Test Class for get_custom_prompt + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_get_custom_prompt_all_params(self): + """ + get_custom_prompt() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Invoke method + response = _service.get_custom_prompt( + customization_id, + prompt_id, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + + @responses.activate + def test_get_custom_prompt_value_error(self): + """ + test_get_custom_prompt_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "prompt_id": prompt_id, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_custom_prompt(**req_copy) + + + +class TestDeleteCustomPrompt(): + """ + Test Class for delete_custom_prompt + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_delete_custom_prompt_all_params(self): + """ + delete_custom_prompt() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + responses.add(responses.DELETE, + url, + status=204) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Invoke method + response = _service.delete_custom_prompt( + customization_id, + prompt_id, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + + @responses.activate + def test_delete_custom_prompt_value_error(self): + """ + test_delete_custom_prompt_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/customizations/testString/prompts/testString') + responses.add(responses.DELETE, + url, + status=204) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "prompt_id": prompt_id, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_custom_prompt(**req_copy) + + + +# endregion +############################################################################## +# End of Service: CustomPrompts +############################################################################## + +############################################################################## +# Start of Service: SpeakerModels +############################################################################## +# region + +class TestListSpeakerModels(): + """ + Test Class for list_speaker_models + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_list_speaker_models_all_params(self): + """ + list_speaker_models() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers') + mock_response = '{"speakers": [{"speaker_id": "speaker_id", "name": "name"}]}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Invoke method + response = _service.list_speaker_models() + + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + +class TestCreateSpeakerModel(): + """ + Test Class for create_speaker_model + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_create_speaker_model_all_params(self): + """ + create_speaker_model() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers') + mock_response = '{"speaker_id": "speaker_id"}' + responses.add(responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201) + + # Set up parameter values + speaker_name = 'testString' + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_speaker_model( + speaker_name, + audio, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?',1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'speaker_name={}'.format(speaker_name) in query_string + # Validate body params + assert responses.calls[0].request.body == audio + + + @responses.activate + def test_create_speaker_model_value_error(self): + """ + test_create_speaker_model_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers') + mock_response = '{"speaker_id": "speaker_id"}' + responses.add(responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201) + + # Set up parameter values + speaker_name = 'testString' + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "speaker_name": speaker_name, + "audio": audio, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_speaker_model(**req_copy) + + + +class TestGetSpeakerModel(): + """ + Test Class for get_speaker_model + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_get_speaker_model_all_params(self): + """ + get_speaker_model() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers/testString') + mock_response = '{"customizations": [{"customization_id": "customization_id", "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error"}]}]}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Set up parameter values + speaker_id = 'testString' + + # Invoke method + response = _service.get_speaker_model( + speaker_id, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + + @responses.activate + def test_get_speaker_model_value_error(self): + """ + test_get_speaker_model_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers/testString') + mock_response = '{"customizations": [{"customization_id": "customization_id", "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error"}]}]}' + responses.add(responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200) + + # Set up parameter values + speaker_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "speaker_id": speaker_id, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_speaker_model(**req_copy) + + + +class TestDeleteSpeakerModel(): + """ + Test Class for delete_speaker_model + """ + + def preprocess_url(self, request_url: str): + """ + Preprocess the request URL to ensure the mock response will be found. + """ + if re.fullmatch('.*/+', request_url) is None: + return request_url + else: + return re.compile(request_url.rstrip('/') + '/+') + + @responses.activate + def test_delete_speaker_model_all_params(self): + """ + delete_speaker_model() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers/testString') + responses.add(responses.DELETE, + url, + status=204) + + # Set up parameter values + speaker_id = 'testString' + + # Invoke method + response = _service.delete_speaker_model( + speaker_id, + headers={} + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + + @responses.activate + def test_delete_speaker_model_value_error(self): + """ + test_delete_speaker_model_value_error() + """ + # Set up mock + url = self.preprocess_url(_base_url + '/v1/speakers/testString') + responses.add(responses.DELETE, + url, + status=204) + + # Set up parameter values + speaker_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "speaker_id": speaker_id, + } + for param in req_param_dict.keys(): + req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_speaker_model(**req_copy) + + + +# endregion +############################################################################## +# End of Service: SpeakerModels +############################################################################## + ############################################################################## # Start of Service: UserData ############################################################################## @@ -1229,7 +1850,7 @@ def test_delete_user_data_all_params(self): delete_user_data() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/user_data') + url = self.preprocess_url(_base_url + '/v1/user_data') responses.add(responses.DELETE, url, status=200) @@ -1238,7 +1859,7 @@ def test_delete_user_data_all_params(self): customer_id = 'testString' # Invoke method - response = service.delete_user_data( + response = _service.delete_user_data( customer_id, headers={} ) @@ -1258,7 +1879,7 @@ def test_delete_user_data_value_error(self): test_delete_user_data_value_error() """ # Set up mock - url = self.preprocess_url(base_url + '/v1/user_data') + url = self.preprocess_url(_base_url + '/v1/user_data') responses.add(responses.DELETE, url, status=200) @@ -1273,7 +1894,7 @@ def test_delete_user_data_value_error(self): for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): - service.delete_user_data(**req_copy) + _service.delete_user_data(**req_copy) @@ -1304,6 +1925,13 @@ def test_custom_model_serialization(self): word_model['translation'] = 'testString' word_model['part_of_speech'] = 'Dosi' + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + # Construct a json representation of a CustomModel model custom_model_model_json = {} custom_model_model_json['customization_id'] = 'testString' @@ -1314,6 +1942,7 @@ def test_custom_model_serialization(self): custom_model_model_json['last_modified'] = 'testString' custom_model_model_json['description'] = 'testString' custom_model_model_json['words'] = [word_model] + custom_model_model_json['prompts'] = [prompt_model] # Construct a model instance of CustomModel by calling from_dict on the json representation custom_model_model = CustomModel.from_dict(custom_model_model_json) @@ -1347,6 +1976,13 @@ def test_custom_models_serialization(self): word_model['translation'] = 'testString' word_model['part_of_speech'] = 'Dosi' + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + custom_model_model = {} # CustomModel custom_model_model['customization_id'] = 'testString' custom_model_model['name'] = 'testString' @@ -1356,6 +1992,7 @@ def test_custom_models_serialization(self): custom_model_model['last_modified'] = 'testString' custom_model_model['description'] = 'testString' custom_model_model['words'] = [word_model] + custom_model_model['prompts'] = [prompt_model] # Construct a json representation of a CustomModels model custom_models_model_json = {} @@ -1376,6 +2013,107 @@ def test_custom_models_serialization(self): custom_models_model_json2 = custom_models_model.to_dict() assert custom_models_model_json2 == custom_models_model_json +class TestPrompt(): + """ + Test Class for Prompt + """ + + def test_prompt_serialization(self): + """ + Test serialization/deserialization for Prompt + """ + + # Construct a json representation of a Prompt model + prompt_model_json = {} + prompt_model_json['prompt'] = 'testString' + prompt_model_json['prompt_id'] = 'testString' + prompt_model_json['status'] = 'testString' + prompt_model_json['error'] = 'testString' + prompt_model_json['speaker_id'] = 'testString' + + # Construct a model instance of Prompt by calling from_dict on the json representation + prompt_model = Prompt.from_dict(prompt_model_json) + assert prompt_model != False + + # Construct a model instance of Prompt by calling from_dict on the json representation + prompt_model_dict = Prompt.from_dict(prompt_model_json).__dict__ + prompt_model2 = Prompt(**prompt_model_dict) + + # Verify the model instances are equivalent + assert prompt_model == prompt_model2 + + # Convert model instance back to dict and verify no loss of data + prompt_model_json2 = prompt_model.to_dict() + assert prompt_model_json2 == prompt_model_json + +class TestPromptMetadata(): + """ + Test Class for PromptMetadata + """ + + def test_prompt_metadata_serialization(self): + """ + Test serialization/deserialization for PromptMetadata + """ + + # Construct a json representation of a PromptMetadata model + prompt_metadata_model_json = {} + prompt_metadata_model_json['prompt_text'] = 'testString' + prompt_metadata_model_json['speaker_id'] = 'testString' + + # Construct a model instance of PromptMetadata by calling from_dict on the json representation + prompt_metadata_model = PromptMetadata.from_dict(prompt_metadata_model_json) + assert prompt_metadata_model != False + + # Construct a model instance of PromptMetadata by calling from_dict on the json representation + prompt_metadata_model_dict = PromptMetadata.from_dict(prompt_metadata_model_json).__dict__ + prompt_metadata_model2 = PromptMetadata(**prompt_metadata_model_dict) + + # Verify the model instances are equivalent + assert prompt_metadata_model == prompt_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + prompt_metadata_model_json2 = prompt_metadata_model.to_dict() + assert prompt_metadata_model_json2 == prompt_metadata_model_json + +class TestPrompts(): + """ + Test Class for Prompts + """ + + def test_prompts_serialization(self): + """ + Test serialization/deserialization for Prompts + """ + + # Construct dict forms of any model objects needed in order to build this model. + + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + + # Construct a json representation of a Prompts model + prompts_model_json = {} + prompts_model_json['prompts'] = [prompt_model] + + # Construct a model instance of Prompts by calling from_dict on the json representation + prompts_model = Prompts.from_dict(prompts_model_json) + assert prompts_model != False + + # Construct a model instance of Prompts by calling from_dict on the json representation + prompts_model_dict = Prompts.from_dict(prompts_model_json).__dict__ + prompts_model2 = Prompts(**prompts_model_dict) + + # Verify the model instances are equivalent + assert prompts_model == prompts_model2 + + # Convert model instance back to dict and verify no loss of data + prompts_model_json2 = prompts_model.to_dict() + assert prompts_model_json2 == prompts_model_json + class TestPronunciation(): """ Test Class for Pronunciation @@ -1405,6 +2143,211 @@ def test_pronunciation_serialization(self): pronunciation_model_json2 = pronunciation_model.to_dict() assert pronunciation_model_json2 == pronunciation_model_json +class TestSpeaker(): + """ + Test Class for Speaker + """ + + def test_speaker_serialization(self): + """ + Test serialization/deserialization for Speaker + """ + + # Construct a json representation of a Speaker model + speaker_model_json = {} + speaker_model_json['speaker_id'] = 'testString' + speaker_model_json['name'] = 'testString' + + # Construct a model instance of Speaker by calling from_dict on the json representation + speaker_model = Speaker.from_dict(speaker_model_json) + assert speaker_model != False + + # Construct a model instance of Speaker by calling from_dict on the json representation + speaker_model_dict = Speaker.from_dict(speaker_model_json).__dict__ + speaker_model2 = Speaker(**speaker_model_dict) + + # Verify the model instances are equivalent + assert speaker_model == speaker_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_model_json2 = speaker_model.to_dict() + assert speaker_model_json2 == speaker_model_json + +class TestSpeakerCustomModel(): + """ + Test Class for SpeakerCustomModel + """ + + def test_speaker_custom_model_serialization(self): + """ + Test serialization/deserialization for SpeakerCustomModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speaker_prompt_model = {} # SpeakerPrompt + speaker_prompt_model['prompt'] = 'testString' + speaker_prompt_model['prompt_id'] = 'testString' + speaker_prompt_model['status'] = 'testString' + speaker_prompt_model['error'] = 'testString' + + # Construct a json representation of a SpeakerCustomModel model + speaker_custom_model_model_json = {} + speaker_custom_model_model_json['customization_id'] = 'testString' + speaker_custom_model_model_json['prompts'] = [speaker_prompt_model] + + # Construct a model instance of SpeakerCustomModel by calling from_dict on the json representation + speaker_custom_model_model = SpeakerCustomModel.from_dict(speaker_custom_model_model_json) + assert speaker_custom_model_model != False + + # Construct a model instance of SpeakerCustomModel by calling from_dict on the json representation + speaker_custom_model_model_dict = SpeakerCustomModel.from_dict(speaker_custom_model_model_json).__dict__ + speaker_custom_model_model2 = SpeakerCustomModel(**speaker_custom_model_model_dict) + + # Verify the model instances are equivalent + assert speaker_custom_model_model == speaker_custom_model_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_custom_model_model_json2 = speaker_custom_model_model.to_dict() + assert speaker_custom_model_model_json2 == speaker_custom_model_model_json + +class TestSpeakerCustomModels(): + """ + Test Class for SpeakerCustomModels + """ + + def test_speaker_custom_models_serialization(self): + """ + Test serialization/deserialization for SpeakerCustomModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speaker_prompt_model = {} # SpeakerPrompt + speaker_prompt_model['prompt'] = 'testString' + speaker_prompt_model['prompt_id'] = 'testString' + speaker_prompt_model['status'] = 'testString' + speaker_prompt_model['error'] = 'testString' + + speaker_custom_model_model = {} # SpeakerCustomModel + speaker_custom_model_model['customization_id'] = 'testString' + speaker_custom_model_model['prompts'] = [speaker_prompt_model] + + # Construct a json representation of a SpeakerCustomModels model + speaker_custom_models_model_json = {} + speaker_custom_models_model_json['customizations'] = [speaker_custom_model_model] + + # Construct a model instance of SpeakerCustomModels by calling from_dict on the json representation + speaker_custom_models_model = SpeakerCustomModels.from_dict(speaker_custom_models_model_json) + assert speaker_custom_models_model != False + + # Construct a model instance of SpeakerCustomModels by calling from_dict on the json representation + speaker_custom_models_model_dict = SpeakerCustomModels.from_dict(speaker_custom_models_model_json).__dict__ + speaker_custom_models_model2 = SpeakerCustomModels(**speaker_custom_models_model_dict) + + # Verify the model instances are equivalent + assert speaker_custom_models_model == speaker_custom_models_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_custom_models_model_json2 = speaker_custom_models_model.to_dict() + assert speaker_custom_models_model_json2 == speaker_custom_models_model_json + +class TestSpeakerModel(): + """ + Test Class for SpeakerModel + """ + + def test_speaker_model_serialization(self): + """ + Test serialization/deserialization for SpeakerModel + """ + + # Construct a json representation of a SpeakerModel model + speaker_model_model_json = {} + speaker_model_model_json['speaker_id'] = 'testString' + + # Construct a model instance of SpeakerModel by calling from_dict on the json representation + speaker_model_model = SpeakerModel.from_dict(speaker_model_model_json) + assert speaker_model_model != False + + # Construct a model instance of SpeakerModel by calling from_dict on the json representation + speaker_model_model_dict = SpeakerModel.from_dict(speaker_model_model_json).__dict__ + speaker_model_model2 = SpeakerModel(**speaker_model_model_dict) + + # Verify the model instances are equivalent + assert speaker_model_model == speaker_model_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_model_model_json2 = speaker_model_model.to_dict() + assert speaker_model_model_json2 == speaker_model_model_json + +class TestSpeakerPrompt(): + """ + Test Class for SpeakerPrompt + """ + + def test_speaker_prompt_serialization(self): + """ + Test serialization/deserialization for SpeakerPrompt + """ + + # Construct a json representation of a SpeakerPrompt model + speaker_prompt_model_json = {} + speaker_prompt_model_json['prompt'] = 'testString' + speaker_prompt_model_json['prompt_id'] = 'testString' + speaker_prompt_model_json['status'] = 'testString' + speaker_prompt_model_json['error'] = 'testString' + + # Construct a model instance of SpeakerPrompt by calling from_dict on the json representation + speaker_prompt_model = SpeakerPrompt.from_dict(speaker_prompt_model_json) + assert speaker_prompt_model != False + + # Construct a model instance of SpeakerPrompt by calling from_dict on the json representation + speaker_prompt_model_dict = SpeakerPrompt.from_dict(speaker_prompt_model_json).__dict__ + speaker_prompt_model2 = SpeakerPrompt(**speaker_prompt_model_dict) + + # Verify the model instances are equivalent + assert speaker_prompt_model == speaker_prompt_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_prompt_model_json2 = speaker_prompt_model.to_dict() + assert speaker_prompt_model_json2 == speaker_prompt_model_json + +class TestSpeakers(): + """ + Test Class for Speakers + """ + + def test_speakers_serialization(self): + """ + Test serialization/deserialization for Speakers + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speaker_model = {} # Speaker + speaker_model['speaker_id'] = 'testString' + speaker_model['name'] = 'testString' + + # Construct a json representation of a Speakers model + speakers_model_json = {} + speakers_model_json['speakers'] = [speaker_model] + + # Construct a model instance of Speakers by calling from_dict on the json representation + speakers_model = Speakers.from_dict(speakers_model_json) + assert speakers_model != False + + # Construct a model instance of Speakers by calling from_dict on the json representation + speakers_model_dict = Speakers.from_dict(speakers_model_json).__dict__ + speakers_model2 = Speakers(**speakers_model_dict) + + # Verify the model instances are equivalent + assert speakers_model == speakers_model2 + + # Convert model instance back to dict and verify no loss of data + speakers_model_json2 = speakers_model.to_dict() + assert speakers_model_json2 == speakers_model_json + class TestSupportedFeatures(): """ Test Class for SupportedFeatures @@ -1486,6 +2429,13 @@ def test_voice_serialization(self): word_model['translation'] = 'testString' word_model['part_of_speech'] = 'Dosi' + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + custom_model_model = {} # CustomModel custom_model_model['customization_id'] = 'testString' custom_model_model['name'] = 'testString' @@ -1495,6 +2445,7 @@ def test_voice_serialization(self): custom_model_model['last_modified'] = 'testString' custom_model_model['description'] = 'testString' custom_model_model['words'] = [word_model] + custom_model_model['prompts'] = [prompt_model] # Construct a json representation of a Voice model voice_model_json = {} @@ -1543,6 +2494,13 @@ def test_voices_serialization(self): word_model['translation'] = 'testString' word_model['part_of_speech'] = 'Dosi' + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + custom_model_model = {} # CustomModel custom_model_model['customization_id'] = 'testString' custom_model_model['name'] = 'testString' @@ -1552,6 +2510,7 @@ def test_voices_serialization(self): custom_model_model['last_modified'] = 'testString' custom_model_model['description'] = 'testString' custom_model_model['words'] = [word_model] + custom_model_model['prompts'] = [prompt_model] voice_model = {} # Voice voice_model['url'] = 'testString'