From 934bf8891da53dc455e4eadffc7df7c5f0f46b92 Mon Sep 17 00:00:00 2001 From: Caroline Chen Date: Wed, 3 Nov 2021 11:13:57 -0700 Subject: [PATCH] Import torchaudio #1942 ab50909 Summary: title Reviewed By: nateanl, mthrok Differential Revision: D31997978 fbshipit-source-id: cfbfa192780f7d786a658eb84cc0685881a4f398 --- .circleci/config.yml | 92 +++---- .circleci/config.yml.in | 68 ++---- .circleci/regenerate.py | 5 +- .circleci/smoke_test/docker/Dockerfile | 2 + .circleci/smoke_test/docker/build_and_push.sh | 8 + .github/process_commit.py | 73 ++++++ .github/workflows/pr-labels.yml | 34 +++ CONTRIBUTING.md | 42 ++-- README.md | 39 +-- docs/source/conf.py | 4 +- docs/source/functional.rst | 19 -- docs/source/pipelines.rst | 18 +- docs/source/refs.bib | 22 ++ docs/source/transforms.rst | 10 - packaging/pkg_helpers.bash | 8 +- packaging/torchaudio/meta.yaml | 16 +- setup.py | 147 ++++++++---- test/integration_tests/conftest.py | 32 ++- .../wav2vec2_pipeline_test.py | 33 +-- ...SRI-VOiCES-src-sp0307-ch127535-sg0042.flac | Bin 67549 -> 0 bytes .../functional/functional_impl.py | 10 - .../torchscript_consistency_impl.py | 8 - .../torchscript_consistency_impl.py | 4 - tools/convert_voxpopuli_models.py | 111 +++++++++ torchaudio/backend/soundfile_backend.py | 14 +- torchaudio/backend/sox_io_backend.py | 14 +- torchaudio/datasets/cmuarctic.py | 2 +- torchaudio/datasets/cmudict.py | 2 +- torchaudio/datasets/commonvoice.py | 4 +- torchaudio/datasets/dr_vctk.py | 5 +- torchaudio/datasets/gtzan.py | 2 +- torchaudio/datasets/librimix.py | 2 +- torchaudio/datasets/librispeech.py | 3 +- torchaudio/datasets/libritts.py | 4 +- torchaudio/datasets/ljspeech.py | 3 +- torchaudio/datasets/speechcommands.py | 3 +- torchaudio/datasets/tedlium.py | 3 +- torchaudio/datasets/utils.py | 2 +- torchaudio/datasets/vctk.py | 3 +- torchaudio/datasets/yesno.py | 2 +- torchaudio/functional/__init__.py | 6 - torchaudio/functional/filtering.py | 6 +- torchaudio/functional/functional.py | 79 +----- torchaudio/models/conv_tasnet.py | 6 +- torchaudio/models/tacotron2.py | 2 +- torchaudio/models/wav2vec2/model.py | 57 +++-- .../models/wav2vec2/utils/import_fairseq.py | 5 +- .../wav2vec2/utils/import_huggingface.py | 4 +- torchaudio/models/wavernn.py | 6 +- torchaudio/pipelines/__init__.py | 6 +- torchaudio/pipelines/_tts/impl.py | 54 ++++- torchaudio/pipelines/_tts/interface.py | 48 ++-- torchaudio/pipelines/_tts/utils.py | 4 +- torchaudio/pipelines/_wav2vec2/__init__.py | 0 .../{_wav2vec2.py => _wav2vec2/impl.py} | 226 ++++++++++++------ torchaudio/pipelines/_wav2vec2/utils.py | 117 +++++++++ torchaudio/prototype/emformer.py | 120 +++++----- torchaudio/sox_effects/sox_effects.py | 14 +- torchaudio/transforms.py | 43 +--- 59 files changed, 1050 insertions(+), 626 deletions(-) create mode 100755 .circleci/smoke_test/docker/build_and_push.sh create mode 100644 .github/process_commit.py create mode 100644 .github/workflows/pr-labels.yml delete mode 100644 test/torchaudio_unittest/assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac create mode 100755 tools/convert_voxpopuli_models.py create mode 100644 torchaudio/pipelines/_wav2vec2/__init__.py rename torchaudio/pipelines/{_wav2vec2.py => _wav2vec2/impl.py} (84%) create mode 100644 torchaudio/pipelines/_wav2vec2/utils.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 39d5ca5ddd..cb9e32302a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -97,10 +97,8 @@ binary_common: &binary_common smoke_test_common: &smoke_test_common <<: *binary_common docker: - - image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/torchaudio/smoke_test:56c846a5-acaa-41a7-92f5-46ec66186c61 - aws_auth: - aws_access_key_id: ${ECR_AWS_ACCESS_KEY} - aws_secret_access_key: ${ECR_AWS_SECRET_ACCESS_KEY} + - image: pytorch/torchaudio_unittest_base:smoke_test-20211019 + resource_class: large jobs: circleci_consistency: @@ -337,6 +335,26 @@ jobs: source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} python -c "import torchaudio" + smoke_test_linux_conda_gpu: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - load_conda_channel_flags + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} -c conda-forge + conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchaudio" + smoke_test_linux_pip: <<: *smoke_test_common steps: @@ -349,7 +367,7 @@ jobs: command: | set -x source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/torch_${UPLOAD_CHANNEL}.html" + pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" - run: name: smoke test command: | @@ -407,33 +425,6 @@ jobs: conda activate python${PYTHON_VERSION} python -c "import torchaudio" - smoke_test_docker_image_build: - machine: - image: ubuntu-1604:201903-01 - resource_class: large - environment: - image_name: torchaudio/smoke_test - steps: - - checkout - - run: - name: build_docker image - no_output_timeout: "1h" - command: | - cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} - - run: - name: upload docker image - no_output_timeout: "1h" - command: | - set +x - export AWS_ACCESS_KEY_ID=${ECR_AWS_ACCESS_KEY} - export AWS_SECRET_ACCESS_KEY=${ECR_AWS_SECRET_ACCESS_KEY} - eval $(aws ecr get-login --region us-east-1 --no-include-email) - set -x - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:latest - docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} - docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:latest - unittest_linux_cpu: <<: *binary_common docker: @@ -633,7 +624,8 @@ jobs: # Don't use "checkout" step since it uses ssh, which cannot git push # https://circleci.com/docs/2.0/configuration-reference/#checkout set -ex - tag=${CIRCLE_TAG:1:5} + # turn v1.12.0rc3 into 1.12.0 + tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') target=${tag:-main} ~/workspace/.circleci/build_docs/commit_docs.sh ~/workspace $target @@ -2359,7 +2351,7 @@ workflows: name: nightly_binary_linux_conda_py3.6_cu102_upload requires: - nightly_binary_linux_conda_py3.6_cu102 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu102 filters: branches: @@ -2395,7 +2387,7 @@ workflows: name: nightly_binary_linux_conda_py3.6_cu111_upload requires: - nightly_binary_linux_conda_py3.6_cu111 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu111 filters: branches: @@ -2431,7 +2423,7 @@ workflows: name: nightly_binary_linux_conda_py3.6_cu113_upload requires: - nightly_binary_linux_conda_py3.6_cu113 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu113 filters: branches: @@ -2503,7 +2495,7 @@ workflows: name: nightly_binary_linux_conda_py3.7_cu102_upload requires: - nightly_binary_linux_conda_py3.7_cu102 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu102 filters: branches: @@ -2539,7 +2531,7 @@ workflows: name: nightly_binary_linux_conda_py3.7_cu111_upload requires: - nightly_binary_linux_conda_py3.7_cu111 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu111 filters: branches: @@ -2575,7 +2567,7 @@ workflows: name: nightly_binary_linux_conda_py3.7_cu113_upload requires: - nightly_binary_linux_conda_py3.7_cu113 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu113 filters: branches: @@ -2647,7 +2639,7 @@ workflows: name: nightly_binary_linux_conda_py3.8_cu102_upload requires: - nightly_binary_linux_conda_py3.8_cu102 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu102 filters: branches: @@ -2683,7 +2675,7 @@ workflows: name: nightly_binary_linux_conda_py3.8_cu111_upload requires: - nightly_binary_linux_conda_py3.8_cu111 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu111 filters: branches: @@ -2719,7 +2711,7 @@ workflows: name: nightly_binary_linux_conda_py3.8_cu113_upload requires: - nightly_binary_linux_conda_py3.8_cu113 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu113 filters: branches: @@ -2791,7 +2783,7 @@ workflows: name: nightly_binary_linux_conda_py3.9_cu102_upload requires: - nightly_binary_linux_conda_py3.9_cu102 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu102 filters: branches: @@ -2827,7 +2819,7 @@ workflows: name: nightly_binary_linux_conda_py3.9_cu111_upload requires: - nightly_binary_linux_conda_py3.9_cu111 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu111 filters: branches: @@ -2863,7 +2855,7 @@ workflows: name: nightly_binary_linux_conda_py3.9_cu113_upload requires: - nightly_binary_linux_conda_py3.9_cu113 - - smoke_test_linux_conda: + - smoke_test_linux_conda_gpu: cuda_version: cu113 filters: branches: @@ -3243,13 +3235,3 @@ workflows: python_version: '3.9' requires: - nightly_binary_windows_conda_py3.9_cu113_upload - docker_build: - triggers: - - schedule: - cron: "0 10 * * 0" - filters: - branches: - only: - - main - jobs: - - smoke_test_docker_image_build diff --git a/.circleci/config.yml.in b/.circleci/config.yml.in index 87a0b3be9e..5c3d709a47 100644 --- a/.circleci/config.yml.in +++ b/.circleci/config.yml.in @@ -97,10 +97,8 @@ binary_common: &binary_common smoke_test_common: &smoke_test_common <<: *binary_common docker: - - image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/torchaudio/smoke_test:56c846a5-acaa-41a7-92f5-46ec66186c61 - aws_auth: - aws_access_key_id: ${ECR_AWS_ACCESS_KEY} - aws_secret_access_key: ${ECR_AWS_SECRET_ACCESS_KEY} + - image: pytorch/torchaudio_unittest_base:smoke_test-20211019 + resource_class: large jobs: circleci_consistency: @@ -337,6 +335,26 @@ jobs: source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} python -c "import torchaudio" + smoke_test_linux_conda_gpu: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - load_conda_channel_flags + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} -c conda-forge + conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchaudio" + smoke_test_linux_pip: <<: *smoke_test_common steps: @@ -349,7 +367,7 @@ jobs: command: | set -x source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/torch_${UPLOAD_CHANNEL}.html" + pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" - run: name: smoke test command: | @@ -407,33 +425,6 @@ jobs: conda activate python${PYTHON_VERSION} python -c "import torchaudio" - smoke_test_docker_image_build: - machine: - image: ubuntu-1604:201903-01 - resource_class: large - environment: - image_name: torchaudio/smoke_test - steps: - - checkout - - run: - name: build_docker image - no_output_timeout: "1h" - command: | - cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} - - run: - name: upload docker image - no_output_timeout: "1h" - command: | - set +x - export AWS_ACCESS_KEY_ID=${ECR_AWS_ACCESS_KEY} - export AWS_SECRET_ACCESS_KEY=${ECR_AWS_SECRET_ACCESS_KEY} - eval $(aws ecr get-login --region us-east-1 --no-include-email) - set -x - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:latest - docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} - docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:latest - unittest_linux_cpu: <<: *binary_common docker: @@ -633,7 +624,8 @@ jobs: # Don't use "checkout" step since it uses ssh, which cannot git push # https://circleci.com/docs/2.0/configuration-reference/#checkout set -ex - tag=${CIRCLE_TAG:1:5} + # turn v1.12.0rc3 into 1.12.0 + tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') target=${tag:-main} ~/workspace/.circleci/build_docs/commit_docs.sh ~/workspace $target @@ -664,13 +656,3 @@ workflows: branches: only: nightly {{ build_workflows(prefix="nightly_", filter_branch="nightly", upload=True) }} - docker_build: - triggers: - - schedule: - cron: "0 10 * * 0" - filters: - branches: - only: - - main - jobs: - - smoke_test_docker_image_build diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py index e7350bfd05..6113467652 100755 --- a/.circleci/regenerate.py +++ b/.circleci/regenerate.py @@ -190,7 +190,10 @@ def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, pyt if filter_branch: d["filters"] = gen_filter_branch_tree(filter_branch) - return {f"smoke_test_{os_type}_{pydistro}": d} + smoke_name = f"smoke_test_{os_type}_{pydistro}" + if pydistro == "conda" and os_type == "linux" and cu_version != "cpu": + smoke_name += "_gpu" + return {smoke_name: d} def indent(indentation, data_list): diff --git a/.circleci/smoke_test/docker/Dockerfile b/.circleci/smoke_test/docker/Dockerfile index acfac51fcd..be6a8c3c61 100644 --- a/.circleci/smoke_test/docker/Dockerfile +++ b/.circleci/smoke_test/docker/Dockerfile @@ -28,9 +28,11 @@ ENV PATH /opt/conda/bin:$PATH RUN conda create -y --name python3.6 python=3.6 RUN conda create -y --name python3.7 python=3.7 RUN conda create -y --name python3.8 python=3.8 +RUN conda create -y --name python3.9 python=3.9 SHELL [ "/bin/bash", "-c" ] RUN echo "source /usr/local/etc/profile.d/conda.sh" >> ~/.bashrc RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.6 && conda install -y -c conda-forge sox && conda install -y numpy RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.7 && conda install -y -c conda-forge sox && conda install -y numpy RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.8 && conda install -y -c conda-forge sox && conda install -y numpy +RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.9 && conda install -y -c conda-forge sox && conda install -y numpy CMD [ "/bin/bash"] diff --git a/.circleci/smoke_test/docker/build_and_push.sh b/.circleci/smoke_test/docker/build_and_push.sh new file mode 100755 index 0000000000..092d21de09 --- /dev/null +++ b/.circleci/smoke_test/docker/build_and_push.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -euo pipefail + +datestr="$(date "+%Y%m%d")" +image="pytorch/torchaudio_unittest_base:smoke_test-${datestr}" +docker build -t "${image}" . +docker push "${image}" diff --git a/.github/process_commit.py b/.github/process_commit.py new file mode 100644 index 0000000000..b26fe371ce --- /dev/null +++ b/.github/process_commit.py @@ -0,0 +1,73 @@ +""" +This script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in +'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled, +this script is a no-op. +Note: we ping the merger only, not the reviewers, as the reviewers can sometimes be external to torchaudio +with no labeling responsibility, so we don't want to bother them. +""" + +import sys +from typing import Any, Optional, Set, Tuple + +import requests + +# For a PR to be properly labeled it should have one primary label and one secondary label +# For a PR with primary label "other", it does not require an additional secondary label +PRIMARY_LABELS = { + "BC-breaking", + "deprecation", + "bug", + "new feature", + "improvement", + "example", + "prototype", + "other", +} + +SECONDARY_LABELS = { + "module: I/O", + "module: ops", + "module: models", + "module: pipelines", + "module: datasets", + "module: docs" + "module: tests" + "build", + "style", + "perf", + "other", +} + + +def query_torchaudio(cmd: str, *, accept) -> Any: + response = requests.get(f"https://api.github.com/repos/pytorch/audio/{cmd}", headers=dict(Accept=accept)) + return response.json() + + +def get_pr_number(commit_hash: str) -> Optional[int]: + # See https://docs.github.com/en/rest/reference/repos#list-pull-requests-associated-with-a-commit + data = query_torchaudio(f"commits/{commit_hash}/pulls", accept="application/vnd.github.groot-preview+json") + if not data: + return None + return data[0]["number"] + + +def get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]: + # See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request + data = query_torchaudio(f"pulls/{pr_number}", accept="application/vnd.github.v3+json") + merger = data["merged_by"]["login"] + labels = {label["name"] for label in data["labels"]} + return merger, labels + + +if __name__ == "__main__": + commit_hash = sys.argv[1] + pr_number = get_pr_number(commit_hash) + if not pr_number: + sys.exit(0) + + merger, labels = get_pr_merger_and_labels(pr_number) + is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels)) + + if not is_properly_labeled: + print(f"@{merger}") diff --git a/.github/workflows/pr-labels.yml b/.github/workflows/pr-labels.yml new file mode 100644 index 0000000000..9bde0e4a76 --- /dev/null +++ b/.github/workflows/pr-labels.yml @@ -0,0 +1,34 @@ +name: pr-labels + +on: + push: + branches: + - main + +jobs: + is-properly-labeled: + runs-on: ubuntu-latest + + steps: + - name: Set up python + uses: actions/setup-python@v2 + + - name: Install requests + run: pip install requests + + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Process commit and find merger responsible for labeling + id: commit + run: echo "::set-output name=merger::$(python .github/process_commit.py ${{ github.sha }})" + + - name: Ping merger responsible for labeling if necessary + if: ${{ steps.commit.outputs.merger != '' }} + uses: mshick/add-pr-comment@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + message: | + Hey ${{ steps.commit.outputs.merger }}! + You merged this PR, but no labels were added. The list of valid labels is available at https://github.com/pytorch/audio/blob/main/.github/process_commit.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 897e9907d2..4156fff4a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,7 +63,7 @@ cd audio git submodule update --init --recursive python setup.py develop # or, for OSX -# MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py develop +# CC=clang CXX=clang++ python setup.py develop ``` Some environmnet variables that change the build behavior @@ -138,27 +138,37 @@ The built docs should now be available in `docs/build/html` ## Conventions As a good software development practice, we try to stick to existing variable -names and shape (for tensors). +names and shape (for tensors), and maintain consistent docstring standards. The following are some of the conventions that we follow. -- We use an ellipsis "..." as a placeholder for the rest of the dimensions of a - tensor, e.g. optional batching and channel dimensions. If batching, the - "batch" dimension should come in the first diemension. -- Tensors are assumed to have "channel" dimension coming before the "time" - dimension. The bins in frequency domain (freq and mel) are assumed to come - before the "time" dimension but after the "channel" dimension. These - ordering makes the tensors consistent with PyTorch's dimensions. -- For size names, the prefix `n_` is used (e.g. "a tensor of size (`n_freq`, - `n_mels`)") whereas dimension names do not have this prefix (e.g. "a tensor of - dimension (channel, time)") +- Tensor + - We use an ellipsis "..." as a placeholder for the rest of the dimensions of a + tensor, e.g. optional batching and channel dimensions. If batching, the + "batch" dimension should come in the first diemension. + - Tensors are assumed to have "channel" dimension coming before the "time" + dimension. The bins in frequency domain (freq and mel) are assumed to come + before the "time" dimension but after the "channel" dimension. These + ordering makes the tensors consistent with PyTorch's dimensions. + - For size names, the prefix `n_` is used (e.g. "a tensor of size (`n_freq`, + `n_mels`)") whereas dimension names do not have this prefix (e.g. "a tensor of + dimension (channel, time)") +- Docstring + - Tensor dimensions are enclosed with single backticks. + ``waveform (Tensor): Tensor of audio of dimension `(..., time)` `` + - Parameter type for variable of type `T` with a default value: `(T, optional)` + - Parameter type for variable of type `Optional[T]`: `(T or None)` + - Return type for a tuple or list of known elements: + `(element1, element2)` or `[element1, element2]` + - Return type for a tuple or list with an arbitrary number of elements + of type T: `Tuple[T]` or `List[T]` Here are some of the examples of commonly used variables with thier names, meanings, and shapes (or units): -* `waveform`: a tensor of audio samples with dimensions (..., channel, time) -* `sample_rate`: the rate of audio dimensions (samples per second) -* `specgram`: a tensor of spectrogram with dimensions (..., channel, freq, time) -* `mel_specgram`: a mel spectrogram with dimensions (..., channel, mel, time) +* `waveform`: a tensor of audio samples with dimensions `(..., channel, time)` +* `sample_rate`: the rate of audio dimensions `(samples per second)` +* `specgram`: a tensor of spectrogram with dimensions `(..., channel, freq, time)` +* `mel_specgram`: a mel spectrogram with dimensions `(..., channel, mel, time)` * `hop_length`: the number of samples between the starts of consecutive frames * `n_fft`: the number of Fourier bins * `n_mels`, `n_mfcc`: the number of mel and MFCC bins diff --git a/README.md b/README.md index 228a5c2aef..520844eb58 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,8 @@ torchaudio: an audio library for PyTorch [![Build Status](https://circleci.com/gh/pytorch/audio.svg?style=svg)](https://app.circleci.com/pipelines/github/pytorch/audio) [![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/) +[![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio) +[![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio) The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to the audio domain. By supporting PyTorch, torchaudio follows the same philosophy @@ -32,6 +34,7 @@ The following are the corresponding ``torchaudio`` versions and supported Python | ``torch`` | ``torchaudio`` | ``python`` | | ------------------------ | ------------------------ | ------------------------------- | | ``master`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.6``, ``<=3.9`` | +| ``1.10.0`` | ``0.10.0`` | ``>=3.6``, ``<=3.9`` | | ``1.9.1`` | ``0.9.1`` | ``>=3.6``, ``<=3.9`` | | ``1.9.0`` | ``0.9.0`` | ``>=3.6``, ``<=3.9`` | | ``1.8.0`` | ``0.8.0`` | ``>=3.6``, ``<=3.9`` | @@ -45,39 +48,13 @@ The following are the corresponding ``torchaudio`` versions and supported Python Installation ------------ -### Binary Distributions +### Binary Distributions (stable and nightly) -To install the latest version using anaconda, run: +`torchaudio` has binary distributions for PyPI (`pip`) and Anaconda (`conda`). -``` -conda install -c pytorch torchaudio -``` - -To install the latest pip wheels, run: - -``` -pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html -``` - -(If you do not have torch already installed, this will default to installing -torch from PyPI. If you need a different torch configuration, preinstall torch -before running this command.) - -### Nightly build +Starting `0.10`, torchaudio has CPU-only and CUDA-enabled binary distributions, each of which requires a matching PyTorch version. -Note that nightly build is built on PyTorch's nightly build. Therefore, you need to install the latest PyTorch when you use nightly build of torchaudio. - -**pip** - -``` -pip install --pre torchaudio -f https://download.pytorch.org/whl/nightly/torch_nightly.html -``` - -**conda** - -``` -conda install -y -c pytorch-nightly torchaudio -``` +Please refer to https://pytorch.org/get-started/locally/ for the details. ### From Source @@ -89,7 +66,7 @@ The build process also builds the RNN transducer loss. This functionality can be python setup.py install # OSX -MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install +CC=clang CXX=clang++ python setup.py install # Windows # We need to use the MSVC x64 toolset for compilation, with Visual Studio's vcvarsall.bat or directly with vcvars64.bat. diff --git a/docs/source/conf.py b/docs/source/conf.py index 61b1612b7a..8e979a1a0f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -199,8 +199,8 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'python': ('https://docs.python.org/', None), - 'numpy': ('https://docs.scipy.org/doc/numpy/', None), + 'python': ('https://docs.python.org/3/', None), + 'numpy': ('https://numpy.org/doc/stable/', None), 'torch': ('https://pytorch.org/docs/stable/', None), } diff --git a/docs/source/functional.rst b/docs/source/functional.rst index 5ff8ec695e..0d22d1f6c1 100644 --- a/docs/source/functional.rst +++ b/docs/source/functional.rst @@ -71,25 +71,6 @@ resample .. autofunction:: resample -:hidden:`Complex Utility` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Utilities for pseudo complex tensor. This is not for the native complex dtype, such as `cfloat64`, but for tensors with real-value type and have extra dimension at the end for real and imaginary parts. - -angle ------ - -.. autofunction:: angle - -complex_norm ------------- - -.. autofunction:: complex_norm - -magphase --------- - -.. autofunction:: magphase :hidden:`Filtering` ~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/pipelines.rst b/docs/source/pipelines.rst index d907d2d613..d7958418b2 100644 --- a/docs/source/pipelines.rst +++ b/docs/source/pipelines.rst @@ -153,6 +153,22 @@ WAV2VEC2_ASR_LARGE_LV60K_960H .. autodata:: WAV2VEC2_ASR_LARGE_LV60K_960H :no-value: +VOXPOPULI_ASR_BASE_10K_ES +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. container:: py attribute + + .. autodata:: VOXPOPULI_ASR_BASE_10K_ES + :no-value: + +VOXPOPULI_ASR_BASE_10K_FR +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. container:: py attribute + + .. autodata:: VOXPOPULI_ASR_BASE_10K_FR + :no-value: + HUBERT_ASR_LARGE ~~~~~~~~~~~~~~~~ @@ -167,7 +183,7 @@ HUBERT_ASR_XLARGE .. container:: py attribute .. autodata:: HUBERT_ASR_XLARGE - + :no-value: Tacotron2 Text-To-Speech ------------------------ diff --git a/docs/source/refs.bib b/docs/source/refs.bib index d1b0ef17c3..80df0e6161 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -1,3 +1,25 @@ +@article{voxpopuli, + author = {Changhan Wang and + Morgane Rivi{\`{e}}re and + Ann Lee and + Anne Wu and + Chaitanya Talnikar and + Daniel Haziza and + Mary Williamson and + Juan Miguel Pino and + Emmanuel Dupoux}, + title = {VoxPopuli: {A} Large-Scale Multilingual Speech Corpus for Representation + Learning, Semi-Supervised Learning and Interpretation}, + journal = {CoRR}, + volume = {abs/2101.00390}, + year = {2021}, + url = {https://arxiv.org/abs/2101.00390}, + eprinttype = {arXiv}, + eprint = {2101.00390}, + timestamp = {Thu, 12 Aug 2021 15:37:06 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-2101-00390.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} @article{specaugment, title={SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition}, url={http://dx.doi.org/10.21437/Interspeech.2019-2680}, diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index f05c7eba9e..7e4a193d1d 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -88,16 +88,6 @@ Transforms are common audio transforms. They can be chained together using :clas .. automethod:: forward -:hidden:`Complex Utility` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -:hidden:`ComplexNorm` ---------------------- - -.. autoclass:: ComplexNorm - - .. automethod:: forward - :hidden:`Feature Extractions` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/packaging/pkg_helpers.bash b/packaging/pkg_helpers.bash index 28f1c9acf4..ffb7df4968 100644 --- a/packaging/pkg_helpers.bash +++ b/packaging/pkg_helpers.bash @@ -141,7 +141,7 @@ setup_build_version() { # Set some useful variables for OS X, if applicable setup_macos() { if [[ "$(uname)" == Darwin ]]; then - export MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ + export CC=clang CXX=clang++ fi } @@ -246,9 +246,9 @@ setup_conda_pytorch_constraint() { # Translate CUDA_VERSION into CUDA_CUDATOOLKIT_CONSTRAINT setup_conda_cudatoolkit_constraint() { - export CONDA_CPUONLY_FEATURE="" + export CONDA_BUILD_VARIANT="cuda" if [[ "$(uname)" == Darwin ]]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="" + export CONDA_BUILD_VARIANT="cpu" else case "$CU_VERSION" in cu113) @@ -277,7 +277,7 @@ setup_conda_cudatoolkit_constraint() { ;; cpu) export CONDA_CUDATOOLKIT_CONSTRAINT="" - export CONDA_CPUONLY_FEATURE="- cpuonly" + export CONDA_BUILD_VARIANT="cpu" ;; *) echo "Unrecognized CU_VERSION=$CU_VERSION" diff --git a/packaging/torchaudio/meta.yaml b/packaging/torchaudio/meta.yaml index 720c396d93..ef6d2b0269 100644 --- a/packaging/torchaudio/meta.yaml +++ b/packaging/torchaudio/meta.yaml @@ -1,3 +1,4 @@ +{% set build_variant = environ.get('CONDA_BUILD_VARIANT', 'cpu') %} package: name: torchaudio version: "{{ environ.get('BUILD_VERSION', '0.0.0') }}" @@ -16,25 +17,32 @@ requirements: - cmake - ninja - defaults::numpy >=1.11 + - pytorch-mutex 1.0 {{ build_variant }} # [not osx ] {{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT', 'pytorch') }} {{ environ.get('CONDA_EXTRA_BUILD_CONSTRAINT', '') }} - {{ environ.get('CONDA_CPUONLY_FEATURE', '') }} {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT', '') }} run: - python - defaults::numpy >=1.11 + - pytorch-mutex 1.0 {{ build_variant }} # [not osx ] {{ environ.get('CONDA_PYTORCH_CONSTRAINT', 'pytorch') }} {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT', '') }} + {% if build_variant == 'cpu' %} + run_constrained: + - cpuonly + {% elif not osx %} + run_constrained: + - cpuonly <0 + {% endif %} + build: string: py{{py}}_{{ environ.get('CU_VERSION', 'cpu') }} script_env: - BUILD_VERSION - USE_CUDA - TORCH_CUDA_ARCH_LIST - features: - {{ environ.get('CONDA_CPUONLY_FEATURE', '') }} test: imports: @@ -52,7 +60,7 @@ test: # Ideally we would test this, but conda doesn't provide librosa # - librosa >=0.4.3 - scipy - {{ environ.get('CONDA_CPUONLY_FEATURE', '') }} + - pytorch-mutex 1.0 {{ build_variant }} # [not osx ] about: home: https://github.com/pytorch/audio diff --git a/setup.py b/setup.py index eb8692e6e2..03c3b3fa82 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,6 @@ #!/usr/bin/env python import os +import re import shutil import subprocess from pathlib import Path @@ -11,31 +12,34 @@ ROOT_DIR = Path(__file__).parent.resolve() -# Creating the version file -version = '0.11.0a0' -sha = 'Unknown' +def _run_cmd(cmd): + try: + return subprocess.check_output(cmd, cwd=ROOT_DIR).decode('ascii').strip() + except Exception: + return None -try: - sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=ROOT_DIR).decode('ascii').strip() -except Exception: - pass -if os.getenv('BUILD_VERSION'): - version = os.getenv('BUILD_VERSION') -elif sha != 'Unknown': - version += '+' + sha[:7] -print('-- Building version ' + version) +def _get_version(sha): + version = '0.11.0a0' + if os.getenv('BUILD_VERSION'): + version = os.getenv('BUILD_VERSION') + elif sha is not None: + version += '+' + sha[:7] + return version -version_path = ROOT_DIR / 'torchaudio' / 'version.py' -with open(version_path, 'w') as f: - f.write("__version__ = '{}'\n".format(version)) - f.write("git_version = {}\n".format(repr(sha))) -pytorch_package_version = os.getenv('PYTORCH_VERSION') +def _make_version_file(version, sha): + sha = 'Unknown' if sha is None else sha + version_path = ROOT_DIR / 'torchaudio' / 'version.py' + with open(version_path, 'w') as f: + f.write(f"__version__ = '{version}'\n") + f.write(f"git_version = '{sha}'\n") -pytorch_package_dep = 'torch' -if pytorch_package_version is not None: - pytorch_package_dep += "==" + pytorch_package_version + +def _get_pytorch_version(): + if 'PYTORCH_VERSION' in os.environ: + return f"torch=={os.environ['PYTORCH_VERSION']}" + return 'torch' class clean(distutils.command.clean.clean): @@ -57,36 +61,73 @@ def run(self): shutil.rmtree(str(path), ignore_errors=True) -setup( - name="torchaudio", - version=version, - description="An audio package for PyTorch", - url="https://github.com/pytorch/audio", - author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough", - author_email="soumith@pytorch.org", - classifiers=[ - "Environment :: Plugins", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: BSD License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: C++", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: Implementation :: CPython", - "Topic :: Multimedia :: Sound/Audio", - "Topic :: Scientific/Engineering :: Artificial Intelligence" - ], - packages=find_packages(exclude=["build*", "test*", "torchaudio.csrc*", "third_party*", "tools*"]), - ext_modules=setup_helpers.get_ext_modules(), - cmdclass={ - 'build_ext': setup_helpers.CMakeBuild, - 'clean': clean, - }, - install_requires=[pytorch_package_dep], - zip_safe=False, -) +def _get_packages(branch_name, tag): + exclude = [ + "build*", + "test*", + "torchaudio.csrc*", + "third_party*", + "tools*", + ] + exclude_prototype = False + if branch_name is not None and branch_name.startswith('release/'): + exclude_prototype = True + if tag is not None and re.match(r'v[\d.]+(-rc\d+)?', tag): + exclude_prototype = True + if exclude_prototype: + print('Excluding torchaudio.prototype from the package.') + exclude.append("torchaudio.prototype") + return find_packages(exclude=exclude) + + +def _main(): + sha = _run_cmd(['git', 'rev-parse', 'HEAD']) + branch = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + tag = _run_cmd(['git', 'describe', '--tags', '--exact-match', '@']) + print('-- Git branch:', branch) + print('-- Git SHA:', sha) + print('-- Git tag:', tag) + pytorch_package_dep = _get_pytorch_version() + print('-- PyTorch dependency:', pytorch_package_dep) + version = _get_version(sha) + print('-- Building version', version) + + _make_version_file(version, sha) + + setup( + name="torchaudio", + version=version, + description="An audio package for PyTorch", + url="https://github.com/pytorch/audio", + author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough", + author_email="soumith@pytorch.org", + classifiers=[ + "Environment :: Plugins", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: BSD License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: C++", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Multimedia :: Sound/Audio", + "Topic :: Scientific/Engineering :: Artificial Intelligence" + ], + packages=_get_packages(branch, tag), + ext_modules=setup_helpers.get_ext_modules(), + cmdclass={ + 'build_ext': setup_helpers.CMakeBuild, + 'clean': clean, + }, + install_requires=[pytorch_package_dep], + zip_safe=False, + ) + + +if __name__ == '__main__': + _main() diff --git a/test/integration_tests/conftest.py b/test/integration_tests/conftest.py index 66adda5a5d..7f95d9aa0b 100644 --- a/test/integration_tests/conftest.py +++ b/test/integration_tests/conftest.py @@ -1,11 +1,12 @@ import torch -from torchaudio_unittest.common_utils import get_asset_path +import requests import pytest class GreedyCTCDecoder(torch.nn.Module): - def __init__(self, labels): + def __init__(self, labels, blank: int = 0): super().__init__() + self.blank = blank self.labels = labels def forward(self, logits: torch.Tensor) -> str: @@ -21,9 +22,8 @@ def forward(self, logits: torch.Tensor) -> str: best_path = torch.unique_consecutive(best_path, dim=-1) hypothesis = [] for i in best_path: - char = self.labels[i] - if char not in ['', '']: - hypothesis.append(char) + if i != self.blank: + hypothesis.append(self.labels[i]) return ''.join(hypothesis) @@ -32,6 +32,24 @@ def ctc_decoder(): return GreedyCTCDecoder +_FILES = { + 'en': 'Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac', + 'es': '20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac', + 'fr': '20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac', +} + + @pytest.fixture -def sample_speech_16000_en(): - return get_asset_path('Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac') +def sample_speech(tmp_path, lang): + if lang not in _FILES: + raise NotImplementedError(f'Unexpected lang: {lang}') + filename = _FILES[lang] + path = tmp_path.parent / filename + if not path.exists(): + url = f'https://download.pytorch.org/torchaudio/test-assets/{filename}' + print(f'downloading from {url}') + with open(path, 'wb') as file: + with requests.get(url) as resp: + resp.raise_for_status() + file.write(resp.content) + return path diff --git a/test/integration_tests/wav2vec2_pipeline_test.py b/test/integration_tests/wav2vec2_pipeline_test.py index 012f960ac4..2354da30c9 100644 --- a/test/integration_tests/wav2vec2_pipeline_test.py +++ b/test/integration_tests/wav2vec2_pipeline_test.py @@ -18,6 +18,8 @@ HUBERT_XLARGE, HUBERT_ASR_LARGE, HUBERT_ASR_XLARGE, + VOXPOPULI_ASR_BASE_10K_ES, + VOXPOPULI_ASR_BASE_10K_FR, ) import pytest @@ -40,30 +42,33 @@ def test_pretraining_models(bundle): @pytest.mark.parametrize( - "bundle,expected", + "bundle,lang,expected", [ - (WAV2VEC2_ASR_BASE_10M, 'I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_BASE_100H, 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_BASE_960H, 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_LARGE_10M, 'I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_LARGE_100H, 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_LARGE_960H, 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_LARGE_LV60K_10M, 'I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|'), - (WAV2VEC2_ASR_LARGE_LV60K_100H, 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (WAV2VEC2_ASR_LARGE_LV60K_960H, 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (HUBERT_ASR_LARGE, 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), - (HUBERT_ASR_XLARGE, 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|') + (WAV2VEC2_ASR_BASE_10M, 'en', 'I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_BASE_100H, 'en', 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_BASE_960H, 'en', 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_LARGE_10M, 'en', 'I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_LARGE_100H, 'en', 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_LARGE_960H, 'en', 'I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_LARGE_LV60K_10M, 'en', 'I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|'), + (WAV2VEC2_ASR_LARGE_LV60K_100H, 'en', 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (WAV2VEC2_ASR_LARGE_LV60K_960H, 'en', 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (HUBERT_ASR_LARGE, 'en', 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (HUBERT_ASR_XLARGE, 'en', 'I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|'), + (VOXPOPULI_ASR_BASE_10K_ES, 'es', "la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global"), # noqa: E501 + (VOXPOPULI_ASR_BASE_10K_FR, 'fr', 'la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars'), # noqa: E501 ] ) def test_finetune_asr_model( bundle, + lang, expected, - sample_speech_16000_en, + sample_speech, ctc_decoder, ): """Smoke test of downloading weights for fine-tuning models and simple transcription""" model = bundle.get_model().eval() - waveform, sample_rate = torchaudio.load(sample_speech_16000_en) + waveform, sample_rate = torchaudio.load(sample_speech) emission, _ = model(waveform) decoder = ctc_decoder(bundle.get_labels()) result = decoder(emission[0]) diff --git a/test/torchaudio_unittest/assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac b/test/torchaudio_unittest/assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac deleted file mode 100644 index 8ef93ecc90a213e011464b63538a1d3d9ff28c33..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 67549 zcmV)1K+V5qOkqO+001Ho01yBG1Iqvy2LtE;@BjeRfOaMKW4t(Er9sX>L>X`9ZUX=S z5&!@I00000000000000001yC#002ZF0001TWoBh^Wo~0-AZ%%3Mod9NATcg8E;1l8 zFflhUF)%R!0000O0000(Z*6U5Zgf3Ra&Kd0b8}^6AYyqSQ*T)R_{9hSZ$tuE0#5=> zwJ5H-3$O@iDO3b`5k!IIOP#V*WWqye>ErRHQ*MZS%T7c#DoaMvnOKm)wO2ACz-@K@gkpl5;gx)6Z8oYvimTGyJ|Q-Tt$BTSjt`w~5i+p=1B? zD6l}MqkavueX`)5P@EDS)DKq$R(F3APV!rvWT!dUth$>1UZ+0^&k2I4_zZ#pdzNqK&Y8DyWqEkV#S`V zTKjFQEK_GF-O?k1veI;U8IkCdku-pe3lO;~gmG63Hd3@vgTy@39wovlNL2DgIYsnX znpGG=nQ`n-l!|5&FuSISk<}P#uzIbbQV5i)5#sCr7A9Ry-wro@B3=q?p4-twGpr>2zBxm`Mn|d8<^+zQxHZQQi_u7tqNR99eGI*PO9N_&DojVPcM=N&#bQzp{*CQMUJy}S!7#MMA($C%a*%5i+?y7rn3l)?#LNw|aEB_eqDqw7-~jICPz>g|b1KE1x5V{aib1!;(f8xw(H z#;OqS#v*Y%Rcb`FM;miSliY|#p6x}mSTu-pBVD3}F@9oBy?K>l&fO6Ll9v=}@V&(K zQdmQ>HRz*qyo}m`a(V2NXv+WO)1v5!2$pgU5f{s`B`GlaHA+PCA$mUJn}XR`GDz>& zu&L~n_n3kQF1oZ-$9U5)XroM4Cm6cHWY`_3c%B+bLa!LdnU|xw7|EUtD#xV(tVJwN zisBMK8l+m%yYr;VEsl}@?xDglWgKD)K0Wnq0udQ-PA<)jA`@mO=&u#tkVIdb(i`oNLMMzt}hE9k}6f?(Kp}ES)f!MjxOFv?U(cfBEF8r0u&jklMx2+s zACyR|tVYD0L_d}$1wWVD{b5XA?Fwn-UEg^J4^9+8FK3$4n>a&m7jB6btqi=Nqt-Pv z+}2NRZpncL=W-Idi8?9fw@dt5c$A0M!=lCoUR0BwCxgrSIV^lUq&>>aSjnSnb}6ym zGd4xA%qd^@q|~t@C}e3h5D%{&5P%Vnlt+pY8ODuc};Zb+LJK7h%l zzGPDO!w7`ypYapib#jA?>5*4NP%M{ck{IIcC0P(AT$!<|Xz_$ovI>zTp@J@F6xUMj!^8jXFo1LQMh<@-@WF9 za%i2^PUO|kB?+Zwc(IHlbDv%>7F@$bvQWgEMoeCN`_70KTO`gQ^vNNdsE5>@1>F$y z4-`0AWc5Zetk8X|?ujk8x|`Osb44z>3h+Y=eyA|0(gu7YV9?rC&Cn>)LP*Gu(D#r{ z@jU*6(g4rDVe*}CSM5k>NbrS zC?;iGq~T_Y7r>OVfxV(CyiR1WBZ_>3-qP&4f~GWzUnE+H7_vJvk{asl&nB!@%d)`( z1rpn-%(T}vpE^-SmuaUClXABxQ^aL76?r~*j#jMP{6joZH3SfnA~zx+YdP#h(q8qG zrO&U=;n+79>4;{yDUW&is{hl~qGmkxqWjY3ox#f?s-&Rz%gMj0`f?EU)TQg<#oMI2z^M6j6{V`NZwWeKwt;Mi5a8^jgJ zL|sH<;5gd5tuPpD){jX%=u?s%ysTH}V%ol|XZV)X1%{t^CTx>xH( z7!zEk?#VHY1SkKHN{J9@G}oatToPH!l#}_nl*-{;AgUsH6M{m7+~sy%ED}SD?K23z zQdht2BWg&Pk5!Em5S0;S6mfMF>dsweYeCr)%`oTMt#QI6IOlJ!&W;s#iYV+eMPMv~ zJ4?vf88%OrY9#qNF(oyKiTQgc>*3w}rr0YUr%hhvQ@7JN?Q^OooW;x*s!90+rmIZa z#IFoOoG@`!rsiCN_AjMAw~H$j5HszKJR=toC}YYqWw2|gA&wkkf0yrp3r$NT!9*;g zgjm_94RqR{M4K2%4+t_NsEfxulC&zUL|(ElD#VQuO9ziKU_rF9C(LuQ5ouKnCw1Dx z2hMOi%>6+S=JtUL5nYEMv&3{w#F(W$c2Yt=OVg4{Zwze(5=PsJXP8>(U1g%XRuRS9 z^_wPN(L|*RgjL%%mO0w-CFgeeBR2g!!rYaw^6!Wos|;QQ-sR@NZc|i+l6S|&)oE-N%Tdb^Y56|S*+B(bhVGB`n(-5|~F?q3LQ zA@nnkeDNsG?;MNg`QiMey;Y?8YGF9FdCm zE7-9T#)%j5+EiDZp(u(PywQ0|Aaf&g(rR`XY^+r0`htQHPJarLdk=S7UF|y!u@o&a z>Jq~Dr@T)TLzZHE8iI_W(wJ9DZrpJK@};NZ$;yq|m{Z9om~fY=TK7mqhtvNombrxI zH&CS=n>y|7XZ~plCkYqBbD~M*B{*GI4e4-flK=R{2mxqB01E&I00Ffp&fp3tXzEYD z4L_6xoS8hkbmiK{EaFUA5(s4|AuONsEGUfFVy|#xNF|FWqeLrWUX;>NG@z1?NlBp! z1%#BMni9DpNLHkiaN?MsafQ(QS^N-*KUQUHe?|RciV|7NRJL>cL`ftXYZ+(NR<>Q~ z=}!B}B%F%CyGb$Ql+)8nH*|qbU&2ly-%7|FYEIYI_ZRG1sv5Uwg|+3kMkS(*+CS_& zX$Vz}@l;K5VzOIB;h%c8Lz`<>hL}6E7e)T92bRnrT{iA-IJurB^yvvrITSdaf6jK{ ziO;%;I4p*2ROGQH5Tt#^v{LbREmQ23loIvVFpxJbV`b*oYHa9U83UmCnR(9 zIknbL2%P&h3va|2|2D!Z=-Po~6v+^rOe4M{gN8UZQl+kM9 z6`B@BN?r>neGEK`NpaXl|HP=Y{VQb?1*IyrD-cAC_g^&_%3AO6j(CP}kN%AeuB>MT zW}06Z304hb_N|kwn2I`jk(;&2^1gH!&E-a5;@7l?yop>!F`522{XPn_@CabP_**rovQs zvWCCgEGZA68oIqckyEeL+nZaCr~V&g zj2p>(UX;3v9ilC#b1L?26?VByiBhPttGoPEe#!Q#u7Y~tzShO}ZxYJLFkU_=msBMh z^()^o*IU{eL8`X2NQ91JRb|xzg>Zo>cXDvJgIRVkl>|SN*KCxV^E+&n>!L z0vfy~k|)=f|3%=UNwq{9q6hmD3VlQ?f>4P{Qap2omsQQ!rS{4tmQGstj~Tj#qrqe< zrsm1kBVq-Gl-%aWuIj=nmS}~HapcG-(&bg(F0VGL#0)PurNZoX4NaaXL-IOjbS#G} z=)5#e@1}uCszj!>L?FUK1sR;X3DeNQAqvUCNBbK?t8ob}VMwA6Ly#&?a8sYu)>t{d z7Alz+E&VFe%O!ywuSi6GGqva}y~J0Qt+X$gIz!O%*L6OwOwu!LEKA`D5i^b;P6@Td z*@inQJ&r2AEpGowVBz^`TT_<(efK-eBSc?+j-9A5;jU2n2b?}OU#3oW=pi+=(Qrs7|I-D#qLO)^@-Jg2@rAk z!$~11c!yFHF$PgNsxV0fV>+<;s-u&_j7cS*XL-lS6#cP>9jczq?5>l}v1;}Vrny|! zu5tI%BQJkxi*+;mEN9Sze_WIWQ}y4_freuLS&i@3b}Ob7@hwdh;YaSBGibCi{%Xcm z*o0H#OA*$RlLe1W6OFFbv1#gXt5xL%k*Rz&7VX+G0Y)^orpqgAnm9kz?wjm>mAcSt$C8tt7wISD=Efip?L>OOAxqsFwvF`~h z<&-c-${oyNT1Y9B(95vY?n)7jmQ5ZATu~J%2PcFIZVVt;DxRQUHgHvn>?Xfpl}ohr zwo0jn=O*`A_vAx_l80K&-?Vn^3zq)mzO5uwhmvPU$fX>`k?lo;oTB$il8o&nHgP>lqSaYce4#CpYml{yIoOcY zdrXM+C_sc^WmC=6OYydL3^$fgd$IbDMGf*q`B4?J3m@3MEmd9*b00@3bv;oO_R0 zp#@Y)4pgrND;6@7iLG*E8jBP(YwQeri^ZEDVsmJh9_8tstwxo%w+OI}|H9L;GL30x zu-`d)P?vI)Dw5=&N!_~Ld62%6Q+~cN4zjIXlUINAGd(c|F=)n!x4p~g+*^_QE-s(2 z-H;&J8&SMw=s%U-gwU=@D||y;bg4e7&SXcm^KQh2>VBy1dE*4@yNI|%+|=>DAev}9 zK^PK3?$3yc;VZU8xvwN`v{Qk*t-30wO?y(Bf^Rc7rK&A!3QI~Pw2U$8rRZAEa`jzI zE{xRAD#H@7_0u(~qJkOf4@j#Uk1Zw|#LFzIt6i*=XKT2YeJ4PCBLcsYq0VR|OV=$dY}@m?Uo z*XL@Z=6r(}Goy=ZWEDXTpelrUkyZLxEmQb<$w~Jh=8X_7>@CHx@>MXlc%lk&$}IZa6jfazOcHuac&FH#en_(k z>Uby+;ly(?0zMS?s?%9!=_-0x%85Bk<0|?}VAP<`INiMxiJ;?zSR2mu?AjR60&_dqNtkQzsjZ8^N)`(Lgo;ZQ03PT7OaW&@vOsNXCwYmuC#n-PeL$HD%)vFUd(`*e zoHhAs-xSTQbxU-M(N50K)BKLOv%e{|%M^0YB^O`b1QVp{&V0cy8a!Ng9r*brrPm#_w$ZB6d%;IUN%|>`Vzf3$h8!7Zv%x=C@1s2{ zuV!~^y6;kuHKOfpg8Zry^+J5mYt;53%{^7;`+Ou}eqQ>?9y8CZODtstu_8)X*1qxq z7y#e~qsL}~v+e#l87Vh5XpKIY#|H@XCVsT?6?U>l?C{W2frCMAe@j_{w&W4k{&hC^9b0omh=qLA)73ASHg=KXLN`VvDvKF{vzlOc(w7CxK??@dF|b80 z6^2!%NCcN}%cp9jQ|0|~`!^HXB;hJZH;YipViA`Eln&@yBizrD*N?Q#OE#^rOvm(` zhs#CiiO!}KT9nSYDU|9jb}1E)87jiwqVNfXMsL2d!9_p5x^&etPd-9J;6l#gdn9-f!oM0S#ji(>; z((6!Oya5^mcoB7kLLN=jK;}aJxLcyKqUCJ2sL%fkSN1Ba;bef&w@g{YH{BA_)~e%ce6Z)e zGV!#z<*f-Ry`)Mc=UPoy)bw|zW+0@OiLftJQ={1?w<9aor+5+NOmEc&%`LW?SgEo- z+}3x|l^(6!Npv{eo1R>JL{$l`WP-eO4Jo>#EW}1#v%tbi{-3t|;r%uO6N*YgO$?>miUs`bi=?xoiQMd} zM?jV^mAJ3$&Zd}Owa`~5Dzu?td)3&#nepc`^LdbRGWJWIX}y_j`v_Ew--3RZITIIY zUa3@m-RgSZ?p?6hk*k^L>D7wTz3Q%@W7BU{4^XDE0wney0Qsy7;>^c$Sl$DV90LeG zz)pZ0Kfdx!?{ z90~ZBl0v$Om$4+AQY<@EQgNEHX2V{bh}C$jTU&l#R{vEy?e-_S42-e4NkdR3l9gdW ztyQF?u1^pWOlF2MSwTha;wyfldl`yf$H^NC&%L6k{h-e_JF4+1){2lr2{#irIjrtP_S~zOc2JEdVpVV#DOdgb_(A^JhPOu2H69}^V z!rn(}SIAz9K?hoRA`wKH;fS!1%_`=)d#GG;(0O73_u>s6O!C}D->fGJTADzBs&TR8 zLc+~cW!~MYS5k-%END!!afeCIxVmK8EcGTI85y89$uUJojh%)JWi)dUq7_*C(N+@{ zGZ!;3!fD0QrwFKp@@cBks-jAif-658Y4q})e@EQ3M;r+fxN)`6s82zAsL9tpDrH|^ zs;s0W!AE3{s?FhFnnI1Mb0OY-X=3I_l@f^+LZ)R?gbNdM)|r>v!krm^h;fjWZ1f`$ z32#W15yf}BTlge*KiCOum};j2r?=$V$!hdQsb&q&lzS@cQ(x&^OV8_kHuMt zAg+LTfSPjx961|+SCoR;zR%o2WeD~Vj#=@U4~Je(auTg>QYnuUz{*I9QmfKI)cFw< z>ynENmZWuC!0jeD+;#jM9(rhni&~JP=ljza2o*bIB8t$1Lk$PWf3bkr7a&2JKmd3W z(C>m)n8JiQeqh;UX$>hM40(jC{MLxZq|H1|%ko5&$HpD3`H5|~VH=7kCq&!*kmYt?AfuA9XVxNc+6*iXA@KgJy`D~MzIq@| z{b73`Wc_%5`XcBNm|HMv*idU9jwtXDhP#a;z@%iIkh_USFmE8lbKc4Z7+JvuyXt8^ zPt>N}4+6ICfzQXYE=W$G(a?xqb(0mT zTqZRr!*ZriNA1r;Sz%Q)`XAm6W#H7teqF2wE%Sq4F;=o;6&u}+z1dlw)i^aw6eXMZ zs)K6OSE}fo?Nz;1I#$?BCKn%JvVWpzJxMS6>n@Wc)rQt~G9 z@AUY`mk_RH&aQ~Xu}LD}gU5R2WldIYNSltt!A zk?5Uj%(n$mT^0*ZS@sQ#C5W1;M+Kxrs{s}O7+7TB5u^fg`@mZyG6OQQXmF;g7Q{s} z`agK3qp+M1st5XHPbcn`3GceP!uB0IYila4Qz(rgzqPTMozl`WVa>Bq+yjn3iBOZm z2x=Jf=^nyf5g0R`Z3zrY28$B0i3p0(-m`8Mb)sO63u|K{_?8+gvJIg@kqN>4@j8Hs z7~C$mPgxw3Vdl84zmO>XAuc+~-LFG!q5_pOI{a#VCGc>SvDhfSk?YpgfikKB--HuL z!|t+5Zz)zCGRklo6s0p1FysAa!&&Js-}tNkPvs~|+tB?1cZwC~KS$yqBML2~bvc6r6h~D0*3+AU`r@mUP^#=$Pa@zuZO=ydpzF&W>Ln%qLN*+$a@GxzGcrc7IOvI}f!6*Y(Q9&96e&ceJ<8h_DKddAvdA$T*Q=u`M5XH5C-JQ?&@c$$@*_!d z2VNp7!=rIPNxS#qu!2JM^TzltI~>yDYXa-*!CV9pz9rb!L%Nnh9E-EXGE)`5g;S^< z3Ph_)ls1{lhJnew_B=q2Uwi~$t>D3^xw!Db- zadjcA18BHJc@ZrE%y7e@{!k3l%xZ49Ki|x6tT+}u6Q{UZ=SN)|eIy7?9L?iC(!FQU z?zX@oB%u1`=^Cz${Ui5!F6ee)|ba}&Hi#CsSX+_QJ3dr;g!x3XlRgA8E|=&W$MI5 zJNQ=QC~rSGP{sFFm65$buG=grD&2EH=JPmbr`K!o#3n%|-Jb|T>fC-#nLJZJ#MH~p zwkQ$|NP?D2ej(FOg#*0SNm*MO?Gj<#LC~}5LGxdF_u>d=k%Q!M$w-*=!=Z`Xv3PN^HHXCHZNT)CSLpk%o$)MG1iV5tFkzP-{p~y?^A)Xi&QRa$?ct?vPdVBY1sH~NXMBGK<~mQ210*> zjsxLQ`pIknk;N>)!#iifokF1-I@*Grc?Qca#yt&5(1o4*L&c(j>oROH&Agd{1+9DmzeR2$Ll@8aO^-@S zmdAXP)70EQa*}>ucz`8(u$6_KdIJj=AY%hd6;l>}D)haFcpJ-pB?5;PQF$oCcjm}r zZ!e@5%|)6KS5d8ms`RUhcIP64`O0ig5w5HH*Njr-eoS!kxg2(|1)i(#gp@j|H2wC} zh+o#WIsdHC&#wP_^GSZUMMJF;&q{Nt1cihjJ}l^iB>GoYt&KlkYQN-f=L;hf@aOi< zwK=+-b4x@-IHxf#(UeSIMk0?mMQ74YTZrFZlq168D7}_sq-z@~*0^y&wcY7u4j7Ge zKjd%>1!FOHjgJfox&2H9Q2J*n3cch}=hCJR@V9rx%-~P8U}}`PV=|I&x{?+{Qf3H3 zVWwU6Zt=MP;X8o~E~x7#gy@>Yft8ukvF<2_86uLC(X&pQ+C3?vrl zJo4&T)JPC>!kEwdIh2GbQMn^K`snUu1kl+-uA=Z$gz6>F0zyta#myNJucYaXb1e&! zO0LvB(Rh{arcnHIUuAqwRvUozycq)ODxphVt;SU<8RSAnwp6zsut&cO*NvaRuwjOU zA=Oxfd*q`n|FfGWxCuHn2fwAm1oWgZt2=#TJ9RU}m)$@;d`k0}#e%l_M@~hG;)VM9 zshWw1-5YPi%Un7YLFnaC)tycyHchS3u`GQq@>T&8t7gQ=(Kl@0zCe?@Yt!Tf<(c1G z8RAy&TzT7S5GA3}R`#ZutHUffn6rrl*GWCz?9?p8>`;!|2@M(fVRTtWhKO$X3fIXAQdJvqvk8Ki0@mHz zSCpw=1X&AxwUv@YS33glnCUp0S>Byed3uS6d$vu)PqGzj%(KA^BoC^|GAfmOq!zI?4j{IH? z;YHb3nKu4HCLZp|qu@>_6&C=O^JpiDVL(x%#u)s_EFq_lk|Yeh zlRWbqJYsIJ+I-P8dL`%7kqWsAVQqDgyAcPFL5(BrTe`Oa+<@E%#xa@sy`3;iPLkYL zF{UWhAM}J1tcql_hnTrm8_Gje0fTW%$Ywn!a@38l$-CCi!%qoX_IYI?Ku+Ea1fpu; zxr{1Pcb~n2CLOc+))#%&9K3m%lxMdbtaf6~kRaM2)+)*~b{tU+MP4%1sp4IBGW+Bp zn4YO2>8|RkC(aO=btD4tQ~zmdQmZ%?>fYNeqJT6#37^@Gx)Q=si}_<=AB(KjjX!J3 zNt%3TF^<>faulu)KP@WAj2hIWgswYzSHzs{!^J0wVaQj33;Nc3Ak{Eud+@DHt=P%rBjX|nDCV;l>cj! zA=5BJ;!?$9T1^jY+if|%gI%gm01v9aY=0U1^p>lq1LuPuDpdeVj86(SciP0 zKcCZ(zzmXv$pU$ekzf!72anoRB`8|19JPq~4b={A`_yBXD<{K8pBCIX*#%Wyxj{XE z`;^7L0^@?XfINkNVq2MRiEkv%R4eh$ zN|(c;yy11~hEhR;Ano{K0i;SFPT+w@DxGGww+zOc;P_z`4pdbUyBHw7Y($7;(J~bj z2)gEry|gP-saT0BwnGUm9g&!#DbD0;0{np}OcocEmJZk%I2{rYPza)_z{*O$*Oz=; zpag~rVlWEsi~=lq((DL3CsW+0Ej;!N8#g%rMf@&ftu$7nDJgUI?m~z;febS?Cs+=J z=Se`%5gFohs&sy&?vN{Mjned`hpK)3YZ)8Fpm8|)cqRH&tKrSw|s_VknM+FIVjQYr{ypwaE>Usqake z#KA;c2PrRR)73hu;kk>Wnb^nd1iVNArd1PG&1^{Zo~e|PGJ!>xoAcGr{Hl)zTqt>> zB4My^EASfNDd@$yg}O3l(%S5p;U`xv=b=oDhS+$~$d=@5XX-RUj1$3y`w3zxkz0M( zM-(l=0!tblf|t2%71QFQlstq^n7jMw)F{QbIIZg&K;@5$zKjdPeEQ1WRE(ydlZHd+ zg4Ms;I;f!9ONx*VNF1}s=4PR26|LUn$nMNbBV{>gaVm9EEAdzXuvLplY&0+ytj6T1-2g7S zB`J}&zPnzkv~z8e!Xa-7JaM;JsG(ONpsxxip&^#pT9J!DOkEzr!8 z83+?4_vZZK+xMj~F+ii*Av71A+A^BgEAk~NAzi%JeKT%1Jz6EN_#)_Lx6gBP9SyT* z1q4k93o9^HO~3-x6UvO(MT*Nq{q$}ZMon7!VhG1BkZGR`^hM*tbzH*Vp5SVKpD=|l zeXoj}u_6Md?HGwf>f4&OO%J8mJBWW$rs=%0+kmMG$dK?NCY4Q*LI<(C8Y5Y#^G6*l#NiG)Z^%Do@ zcjCbF+x%UVUz@K#HWdyaUi#wKF)T}AS(-t`wV>8j&THoNEVPKTR!Wn^be^}f$^dTF z;w|HKT}qGGKcvr6cKaEH!gabm)X{<60B#_J?2HpFs+PGl^^aJtl;r`v1k)lvkMixg zhY($Grp2uI_|b^hg-&lwCXlu%4pMH^&bqVmB5cZ;`Kxz?SoZx1g)M7~A67{Tt~X8` zDdAf~T*eA8LeNl}g<18(4TC0_NdCruODz3kYtvt{@RO@vj;@)^B#|QaR{Vm{P-;PV z@<6}-lxpsL^s_zd^8#Rm-)EC*gM8fHZ;CI7=15w1+~;OPdrK0&EyD;<_%pJaPk5j% z9mFi?OiCD37SRq{OBuvD_iTG{VngvAW}|!=`4Pf5x+$LLn(+qpYgWZjt42xmoXb>9 zdQtDcTbNXL6}m091F+4TyE@T6Tf2d__T_Jf`+Q4c=I&_9VUNb0q!crH(7eI|P*nyT zx3~hDXh`ZFQwSNWHw398qilORwxYcb-PSI5r{Oo(%qWY z)YzZjU()RI-R|pOHZe&5$c1aF{imP9SBjlN`W=6VPus#mw3EgK)I$j(Kw+N(Jd9kF z^n#o6yko@SzCqM6#CZ(6Vuc7vgz;A^SOi&!!L4lC$JP2+$3*qkN$jxFhBWG#5)w&j z;8|{CkLS9I>0vRfc%{b!|B)XyeZ^CEy}!D5OiAuIRF^8+%Yh_aU{ z;~YQ@G4go^D+nH=xaCWvaM5;9l%_0IDC1aII*E1V{)dZXb1%Zhsc+fWC@cB5gf*4c zzT_QWIPPoi=!hGN@tbvYDW*L7u%^_6m-5TqV|=Sw6(DsQzM507OHesoM^{O)`lUoM z+2eiZ)TQzX4rM$=DJVv%BpBZ|*n_0!7zVNoAd@DEO8yYi9h=6i(w{e0 zVOyNk^Mj#a$}Vh}kz=l;GB}DnRZc`-3Inu8r%v=viDFECnkQlUai1b6!3cp^ zBmyXDG{k{|;4;TTNNkbLCg6u_>PUn&K&Zp#e0D8?xscZsy`MDnxt;U2mX?-WrRh#+ zj-M5@j+pX#UEQ@b&%>5W`_Gi-u;~bl4GYYb>L7c}i zmS2Psd?5-m0BVc@#C;2apdbc-ryZ>$_2zUk3DngntRhLEB>)p}nH(4`MwCwnMG0C9 zER3?4`wQ~dy(<|(F1ryQ`=0&Uep%B}9#G%aU9~HFGVk}}lUs^=0Ze95iM}Cey>6g2 z}1Dv5%OfoU&Ae8o!98@w(kwm4DLdD2*8?OVP;&TXLSWUt;kqd#V24X5m zsse=25-$PrmySflNVJ@);c%8e4k?tkC=u8@;8*}uK#>kvUN5-BXscrR=NM zF)Z+g{Tnm9w&jiQlKcB}5?f>+8eK03t6~s4)gOqBbNa};+52swsUkSxhXX=KuOe-t zUl$3tJFLN6HFG6*(w2Xr)SnoTKcpW80#Y7(03;gVU);W!{GotSzA zjdhk+kaL$Z6Z8|NO_@CJQe{kE1wk9*v@`IxQrDQuTrsx1FBDa&N~Zmt6duT7SM(ye2__s_mi)(rmQD(b^ zMKfmJ1sQMz74DK@Jun_LP+2?r9?I`n*WxAJQlm1(T5BL%X;hI>?xXK)-z3h>;@wHA zGf5OB?2NB3xhMC6F-ep%sjyO&NgdYRlEMq6(>mlTvasKUBT=$Rz*cW)fr-r`fx_IN z$q8_^pU#DW1`r38>gh-S8k&=&24gWrdl3NIA;{`#l7xg$xqd4d?+h->F(Pu{24xdD zvgkr@r+-#KtvLhVTNX~~y8-FD5|Y}BwSvsqB&S511J+~CdrXZJ=cCNyfL2kh4kdd< zn2M3G=oVJH!UV8}it$v5UTp`R#p-EaV`sLt*(paf+(*?^s`e&ll3CQsuy{|#!k7C; zB@JFv8trhvcjyv6NSbh{`7~8SDg#r+Sd<7|kugokMa=ff+IO5hYwyE=2K*;TR=WP$DSs!5YVN9V_asqkeq4&!KSGDkm& zSHVCp2&n=P1CTkSYM}xg5PHh~S2NEN_GZM(m(5asaF{H|U`c}Xgd397(XQK&A@G{R zs9nbj&{3I5DY4&_n4DvsWNoxR8?w&Q)^0`4D{oxaw9zN9P8{qDQ7ovOb62E%_k!Bg zT4@z8G$&d+Z)u9jcuNg>`(TZS^i-`C)u+&7{ z4wZ7h^DY&AuZ;uMjc;VJ7l?|%$B%h^Tgg_nB?5%t!DbL!S3w?qqh3tVcrErKg5qByAZhSS*4`XYpGC-=c!9PC&s1Luc8@*PR_F*PHJ`x`| z4Cms34sNG})LPz`SXZ$dfJ00!(2=1hYrk0AQGU@VPl5^OU>14@8-Qw9jSzf~cu zk9N#VD6Nn(%@TzfRT-};T~y|@4aN8ZbjU{QwRLK>x5zOTZ_`SQa!DqKrgleN5bw8L z-_=aR79+QAx+oZ5mcupVK*R&aC-P#a-Xged(p5l}6_kZ7ZXHSzV9DBA_BCl;MQR-W zK`3-Tat}jSjj^{zSVw>my9F69RkDKuT5paB$>?~OL)uDJd)OB)GBJfp8)fbhxwU?CSQ=%jmrE4e z=g9cCR8kE7QWv^-p@QULCK|z3(c?R!AoCE2{t&d}eLN{p`W%y6LN5f8&3CWfnHNUa zl2BX7;ruqy(@4mFcWMFLscvK%?cyBhJCi-zjo#& zHBrqQ3CG*lzagRmD0YucA7*8p>ctFC1)AdlWz*y6`U6;~soPBctJKEk;X?DKnh2Dn zHG~&A9lT}oTDFl#NaTjdHUAY~BW@^;!}cX2Q0FZvHrOVbX`}iDD96Ew?AX;BVY;@Z zjq|u!uowYgp?V3zZ}AC`4Vsis#94-B!m=)-MS7;~mC~&(%E@rnCv!!=+&NMSVr*mf zla?*hU*-4JAqmpyC?85mv%$w%LbRd*g!ZG5<9m)LpHmV_WuX)oFQtwolzm{WnqV%9)7zOM4|E9H4l@yZ~KM805Gz_0H}G zJG6VBMOCBTh4lqtpJo;~;w$Fu9oi5sPZ0^)H<6solOrJJg+Xtwk8fsmBm5!biR^Tq#|Ci4z zIawjKT7?7LunIlO(f&v#5g8udBJ6blr2V-LnODxBJ(vx|4KxP_13d0Fu_zMy8k8drH%(^>S%?&ps1sx3p&>4@a#WSx30;XQ!z_}Gy-Dyy zO-bQy++6CiX)X{DfblIRlVc}q&g&(YxSX&>OmZjSeo9^+Ny|8gVjlbCxx6}6bm3DlX3 z+_D#AtnmEM(Ovc4(WEp&8{VBQM(^h(_bd;a3Ur2zkO*+$K?A^pt2r zc}NeS0fGtxqnfpmg@jSv;N!>uAKb~I@;C#Xs=&+-78iuWf`ea^JezY>7VhPZ*{cOV z%%ljB+trg2Ih;`?BDwct@mkX@v9GpHY&YET^d?lB%(c@s5a?6}-*Ke^FTz*m-QkX{ zY^d@WWNa6vD(ZKY;VwVLxSH{!bl<3`+#_gj!2SPK0VuOv5Sv@Kl83A! zdZ&$0QhL0$FI48yX+8RMD-w(q5}g&SHjo>z97!0*a9%tK7lcGyTq%@Gwoq0n7$}Bv z6zp4fYv~t_PBic8v&70Xriea8PzIQXap;roJ zAvu94VK$Q}q*b)3Xv)|_B)nqLM|3w|Uh!UFIOhIuNLS=nB53X|m0yAGuKF6zaF?4G-b-$%D9Z zAPRYk1V#}3vYKW#N1}z&RwA}&l!_sep$it~Dm>1^MUu1un%?KeSgAyeg{mEOW;0=x zaS58;My;;>rxQBfcC*x%(K#j*x9B+Th{`Uk+!QM&dLzubwTzU^QzsFrv(rL|Ok8RR z)@)+I88sY?LbA84l}L@6g!+>LWrTXzbw4U|X=ybodydZ*c+j|?2CtNL#5g+dFuEqTN0oHTqR)eKJT%u`$jEF8n25g*#Qy zZZ*^#DP9HuMm>1^dM=}q4-Uwr8r@mO^UyPPBl%(Y*%d2WmDcnoMr+ujt+UIZN`-ym zWOjKhc~cfWs3pI#g<(EBGb#B1DC9}83Ph^tvCw(X<9Zqt%dYa|yj#Z8X;CHTGEqNV zLTVD1)zMBfKXQ`@-izEYIX(I5N*)(E1NaqE-(%Jl|CnEI?c}O0)P> zp|*B#Dw9TQdSK9oqWnaibYrWrjmdE{6#t;tSUBr?sTxxmBEtI@r_c0u(EjCdUi{SqGK^NIZ&I3WPhQSiw^unU7;TfGPWh0b zO^Wq!poMuIhHMp;w-5j_ zn!5ZVzUNX32Tsy>*{#*@Io-}p@j!J<-`Z**aR#x1gsJJMI_|21311O@gcSLm>;u>Z z1&8vA9yFLouj)(h6Yfr`-a*xA3X3MZ5S2rxmz3ex*njc=oA}Y=OY<%)$Wcq5oW~KIb%Mdb8Ct$2(q6pk*3`LkwO8p!)5ue;tKWy$pL zfjRgL2tpF>t$rZU?aXvt->IF7_ek!68YDD#MW3M=sC&8m)H1m6pA|VfgxU7Ji6HRZ zK7ds*va*|7R$VbZ<{f4z^sscn)b3>rJPu!JR>~v^JLEouf)|d{QNGaVFHT(vpk|Px zeqBD{EBN*3sk6x%CH<81doU=X3Z9@Omk4FuS{B68DoC`nA3*5~OhU-)h#z^dz%a7(7x*8_Vk%DlzJD0>soQT)WEa+*r}9W?s;bIBy! z4c8v*Lpw8Wi72P~l z#`$~?Eus4aCE>QjL|!vn&dZu78++K{Y_#4C6cl0aJXO`cs6;aLUuT&`(Kwd#=E?F!AN-9iW)Sa#YE|UfTbY|60{{P|600MQ(@d6w1{L{Q6%n7u|Aw{!7{BjjbV;WuwCC2nbtU0AaY$ zBb?X<$;-B%_@k5dklU%(LI8l*n(iY3?aE!x_F|x0MlkUMD%pB=BNdO%QlYkecN)kTiBFy_K5(J3&;nbEB%250$VYi?E~%O&k2XT{51mnp0@Z+{v|5t3k>9OpL2jUjk^YWbc&DK7^!; z=o1GEVs4cFMY#%=2Dvr1WvrlLeCGpyA#C5B_g8hKI!pYX-l2_Gu zOh#|{O6)#GMZL;eyHcG1(?65&CyEv9p7@%fXoH&_3`i`j)H)1+E@>a`p;iR1z{Nsv zRD>6Nq0(0QrpaNCN|T=TjFhl8nv@stn7$PGlB^iK@`~`PY>}5rZ|Y7BgVaz(8l}ld z1-{h9us?hLn)&0){B}I_&{>`E>O3PQl<9hOr!N&OTu@AHMlBq|7Q2l+>g3I2W zBstotP0K#}wJ~{po4&K5=b5(bgdxgu7Uz57KJD+6uaq+SXN~5Xxm>EEPBIjXD?UR0vsUG;$e~M71cD_qQrmWbr8D&Awbnw;St~%MiwvE)st3tI_=Dcs zq$(YjCRVpstD#90jF0pc9&|4wZ7G#d+#E54v>bcmbPm1)mVsKY@&`(Kq53fyj1fAF zS^H^#8%d2(rT5+1aO|ZE=>~r}C;wGL$ zs1Qv$%i6aXXn=pBuOp5UMe ztHY%LC=n37?>G)<*24&#p)$)4WLcUB3CfZ1=J}LD`W5aDRV(O1Se5`;e&V5v8B32= z#-(LftDp0$GnvE4J`91&Yt_pFR>CUD$pINKA$V~M=|;H(3XZ(zL~`GHTzmbG3}4S9 z^4WK}!mQ}B?jp9#>gz*Vb(kPTvznAU55Nrh7!l4Vr93?U=_7>c3RAOwgG>~o0y|+# zz!C(G@y9k70Xo4zrcp_Ry=PU$Ljl++9_+3!kt(vDVPNzKlCeVn=qf3?AYl{wzI)G5 zL$6CpP*IO(cD6AKNns*YB!ux70asn(yh5%5*H1<{WDEL*o#fhnitfG7`Fe1cgsEo` zH67q6wXn@K#?+e~^t9eswA(=SaT4DDv)NrJ8Jdkez}4Xe7lamOz2 ze0TO#V5A0XDxMM0udz}v;^FT-G^fZ=Ptf{G z+Poq(ukx&=a4RS>tK$z*7HgfxL$}^ud{y~D^7LZtO6?4rnIo2od7{?3fK5Hdoj?hx ztymz5UtBYE{Q;u_K9DAgEpv^9&y%tyz-;uGP|$~#;qpsw9~^0Go6p$F9)&EnH$(>a za7zWY7h(4{1dP2D!qS^GBylEaSE5FiNJ_Co#Ty%E^d;l&HmuyInx1hot0EAMC*HET zNZ)d3kJ}D_qdE6JfVrimo=4TXp>Xk50JZtv^+mB08zNK|CX&S|7GhcEDnLQ20tN{w z-K8_hb@O6L4&dB@*j5^$hoK(^31;?k|J>s=Zr3h%RVjtAihOtLg+q`*tqhqnC`wa1 zn39Wd%arFdmX-&ISBc@XWa$*OSGv2Iv4@08Od_H|>zyXZ=qja?+XGZupxcnU7zKwjU`*)a)T;~BHenK)u zd$|jt;DE=O3I!ld<r78<^O8JS*wzW|XLAO)|{&8jN>B&V^?l;D0PzmDg zx8iVDJJ5uRWN{4e^ES(;grSYtVpBd?@gjoc!d0vuN{4SlN=B+gAYg?!c?O{0XEd>D zrRS|kD9=-r(s7nOIa$Dn$>n(1K)2)A zA5$efq|$}~i##HkSkVy-Ft-jz>j$MHl6TSFMvTIl$|Y;*9^1$IG&P|UcNCb~81>ip_iH%FBEY3?CzA>i7{N~6Mp#~QQcZ~2M}FER$!bhcmXiox=dIn`t$9@~=^ z4wIplffD2~>n3G%F`&qVuM@5!)(QGU&BCs0KBlegB-Ex^4jST+Up><;l-|0P-qI|E z0y~SYa~6%Fyqsw&mkwGi%pM|?@Hxu@voq1;M^Kq!R)_)01BM@@g|`5RC2m9Ls` zwvtbEfP^m@&6e1XJ_Ui^?<~$%;iOCrP?0>?l?;{TZuP14QJ6<15FQWmV+was{8f(6 zy5=zd0*3FAMVH$@8C%OJ^M^w`09FAC4|x&_fk)oVnUJHt4LZOcb0%Y4*G$f!;|s!|g-ItJz8dRfbRMMfqx|bqiX`ba zY8-~!%}QVhuz@^Wj#5xV8bv|6BZRY{qP?zvhSf$;K14|*z)1PKnyU6sS2L4hfz~fB zD8k7#i&Tlr9MW2drdaBLy@0X;?l&VM_0vjg2(HtNiKuFd>B~Y1H3xU3Cb7)Dpp|(f zulqG+5#}Xey7!XBRm~?!5wW!t(@d18j+4Yv5KR4lPf%P_4J9t3b$+G?$tkwW}? zj@fEr7Mzc;^&O``k-NrZ_nMa(?npGBM&Q?M6M8(R)$w$cbnot`bQ6leGxRkN;}YCi zJfL?&;fOLdQ z@seN~O2JNkykLTe3H&VtRW?#IocYF1X~$le<|#>NStNN@OEE07k#NYH0GQ@}Hx;X4 z$aV_3;MZE1-uu^cFU_j75ed8Y@{5H$2#8COq5p_=AG(s-5x`=B08!q^mdagb`&b3} z60dDBkapBv!}2BTX~Y`AU_&ggIM0x|K!*od7NA3#OT{veUZo+;(ur!))M*^Y z%DRW#V-u|saPcBo75{UVH3Aeg4YUUfuZZOyqCn;lYNmqP*@gf3#Rvs-QVdiJmLlG`VWnhR=Eq0)QvqHU*4qIZy&^Mo}AD5@eGiOIjdW( zS$@Q@QX=JLE>;Kb1l$>saZk>Kwc7f^D+A&y1g-cWGQUx%KODj~9K}NCHQlPkS8C_< z;Pc=zN85aoKP81T6TvVIaEm|6whyTYGKRn>4TiD?L8#Eqt)B*xZxPS+@eS*kR5zS{ zo}{V3PW&|VsdynoaGE7k)yrU`dXmWNEv-DrOvXMe97QsknUHTZIzp%JR%CA3Vl{wu zT^k1h3Bhcv@DfmTj!D!CW%s;x1bTqR3My^SY_yiaer~Z(SXWs|q~5-pNGxTy)A6(g zlGrHKcKGnFB+&#E#-nm3C>ju3fh-T8-Zvl(EL}| z7eyP{E{yT2LaFuMq@~0?M0pCtYAlN)L0#Kg`l^43&*WTjl`G?JD6&awRFlyB{$CLA z&cjFb)%mt@!f5UYz$3ENw8@Z60H~WoYryZ_u3k(Gh=M3I8c(|o(;1~yBE@EDH8Zm` zet_HDK^$mEGm!64np=g-8Ob9C?3N}Orig|3tOkh1HH-i9ZbO>X;Z|G}-tUI0kJv7- zsdkZJTWTyqP43-96e=!wnkmb&grj~^E;o?6iNJ!f7+?DOC}VmjVM9;MIU5mhK-j_9 z4u}P80kM=hhd9kqflpA#Ogw#B6i~DR8hUIk3$RLEwUsq1r7d*91g!B+5M^EDbnt0h z+uCC)D!FOe($VQBBO%hg>OGzXSxWUuVxln)XOt*iXEf~0M18&)^Z|zn+dd!jJ`!;u zH*I(?(eMFf04nqJ(0rZldvLSJtVJSB1?(yzs4Me%R%yvte$9d3c-mXBAf2)jzK}&q zu469@Of6n5rX(oMW2iPqyr|UG4r*1WDn#^GrQ$8l@jz+)rUS8ZOv#gTT3rH!wkpPz zSd%eRNSB3y?}=_xify%t+vavo2%o5X0)ONsOp1EnGmh$OpKZPw1WA>>>q{$C^U@H! zYpt0Xnf)rJ7eCex9?(N1W&plL2$%*$I?{A6nQZY&wA_T?MAEt?fNtuMF$pT=L`i`5 z4Gj)O;k1M)^)eEIF1z) zL?aA0--kXfs``su_Bph-*`LysIbY@S|Ht!Vxo?(3(`9tD@97TMkg#;@jF9(3cA~Y+ zimtk+DZZ+9D)Y%e^H4BW{RwIE1ad`UF$chNqk8}~G5R)#9qpku(osOn#L_Us)@H`x ze1|^@6qJfd7_z0z)}fIdr;-pWg%++d6+YkUldVv^!v=l+*uU1_bMU2iXX~`m0+1fl7ln`NXe%q&2!VxYRJ=F_0(#FVG{(yeR>O zs{!$cSOrc_CLAn-;Mc3i?lURN8VyRt}zPT*(szy~>n6A}g#49YzPK_lC0bWT`doJvdPPTz|&9Y@q z_J%QW=k>n3^Ga-jF?@p4MpOOeL^f$!XyyIuE8ysH{ar&V`4mGg7)QRyjc>Giu11n) zYPc5Q94I`ojjJHYqGNB-l9w`kWF`}VuMasLK5|7Y#hL9~D=0!s`mGOUD}`?9!&~ve z_jkDBM?=kfe8Mh^-#o_6zK*FMyz5n4B6oF{m~7P4G#nKA*@8CSrwl=fRKBThxe z{t0qYp?ZCX9*2r9j8a_pF_@znEy7M{2@WVLx?5uLq?6-7k$;Q|Io~67>$2WaRo1C` zF&?G2wpD!VgLY)b-|Wyv!TXE+9GF+oSX72KcH5co1&^;PL3usF&TBo40#S{pyVMM` zXx;`IaBPn^W^u){?&@hQy z$#pzsK^Dlz9CD9$eV28aJ~)X=LW!@0pkN@Octk|H2j08{O5_{nJp|E1=6n4%)^v>b zqG_Z5+c~t->@9)hCl+cwT8%L@a;?w^Vnr-HP#!wCX3XG2oU%S7OlW=aH{?KG;8fJf zFqvZ+5z2wtVj)LWCDM}KdDX6dkb%$<7a}|^=W8$|<(WiAg;;i>zFa6nV*uEyS#`SB1(knB}56T?xZ3j)0jPce`CC0{1GSjOX}bXo>&biD=Ww z2*R1E{(qMic1FrYzH9^?61wYCs-t$~|LRq=71?7UdbaZ|Ty2V-*c~&i){f%33^jlF z=({Iw0Q7)*H&G#IsEGY|q3sL{l)X=~HaNvcE5G?ImXeHF<$ExiP4)v=*lMA3XYmQ7 zCb{_0f%)+k&akhXfeK4l|B_s{ya433wn!pADb&zrfpf9-%~>kpRaTM!Pg*0p_=QA_ zcCPAVLrak#otZsQ-y35rsV(?WmqdOLHnNzZv;NEt$`xwWIc{xpRJB1U@1BO2=^W2Q zl|&9nOq96b>yCdp!h~4FhFQkF=i#mB-2POFK0Jt{GN{r40F1h6Ivy8Z83_2A>Uju& zL31{ooV1W3EVjm{)O9PR(2cyh%N&wG3!uHF!(w=W4X;N~MQ-s29mO9vt(SHd0FXFmPGkdA_V2Hklpt>T_i4_h?eh0w{z<)E~JPM>WIQEw%1lIKA z!Vtr&`fv}XuD(iEVc7M)8QW2T;t z_&HnTOoCp=L^z9Ma&6+Bw@|`F3?_zYJuR{Tj=II}LyRCH>;>v2=aDETdPTRp&h29^ zGrC(YXyD$Fo~fgAKvYVKZhQz!B)AdH!{Rw{B?__Q-)&|E7lK zg*g@qDZntiGWb(#XM^|N6u$M%B`@DSZS>iZF*EgNQ||4X0;)budqP%mDiuK8mkdBhOR+5gwuc4KEkB8RAD()Rg^&Y7Dvb0QEFw!N0Pxkl| zt3Os{PQ3>Qb+V&0rO-c;0au6(XfQt9W78G4g>t61i8>LQF{m>vCYaANNCP8$i3BeO zeXa{&ZhtiLSP@wb{Ycjv@8V!f~88&AnXnLszWY+O$Iw`DgD@ zXv{waLmn|@?|FZfA1UWbqn7n#E^;b%#{Z%xY0ECOc_1qplq^TCn;n5pU+O9(H*o(g zMZz%8PdH_7mTk7u6n1rZPs;^Ud6pL*W|H&EWEBAH8{%WyD5m-7*+SQ43DnxNLNBqI zC7syS3D1rEl{rK8bF$lU!SA6PR^M{OGBdb+QywH|U;%X}-AC|vR0so!2GOEmy=#2& zd-+WL^>=E3j<)%>IsA+{c(lfgEI~GwN>8EZ&42r&j*lLB8Xr5h`9Z$){K&BVP7}Wt z;O~eEmX}Q={zgXNYK<%;!g^!!<2`x@qN z8kA$6YkF|vSl6KYyQoI-9ad%eS_r0)BXC6>Qgmcn`{|#(vU3%m`55;uW%=QxCu)br z8f%3Q6aaL903VX@09mk-9iSY*3&%l2X8DxtOTBxgG^HvKRjd&r4yO9fxH1O5p(VMc z#l?~--rzMWI2nI;e}+I;sBPqXrYj7TO2_QuSDm|f!Vbu(5%(J9S#P#PB+eVn_{!&d zryQ)BnawBFYQ%2eY|mX~L^#*oJE`XJ7L0ziSN2pJs7@??^s z)y22U_r*~WWQ0e{3e;Ahm1K6UD{`X+3LrArlQB_dg`1)44jJd$CG9hc2o%A{x|oY` znVasw#7A?_(kjE(G%lXa15y41XySVN9;%$+c`T;P0iso0LWUn!$-FNM>;{lZws$v>PU<+8ok+c?VZt0+mQuXPF~rHbnQ zIc_OT%`{jC0%4X$jp3l@YgAd_j<{vEb^fslkWU%~B`u~McYYXjaE z#GTqBS17M;cQymBBAoWgLoGPi1Zq;jXcA$jtQNE%n!G_jeBBTbu)hAr1XE=8neQ9% z8D3##SVinmfucQ!yLRm7x{Jneq7hO6l8G>s?NY5fm9v zdpMGdQ|*I-I)!$Vt6y|%FQ8T3wqeLp`~pc`3@#om`Y0`-Jn{&U<|7PT1T^st=+dN8g3m+am!HZ_+)-CqCW2yjSM|JXpB3cU36ZFm zRBgDk{Mk;Nx>qvpb(cSe5t7#;@k=+%b_*@P|BI_^Vy%DFs^Ou;rUUr{t{J9?o-8~v z9TrrY?~@@Df%#q9E?$u_W|N>HUag=yFC;V}Z-hMV?7gMxR9;nb1eG>+;#}JA3(Sc7 zchDq-oUkxL(#8oB2AOlKtk~sBSMF-!2ctJW+_;3K5}?e2RKMZm&0`Lvoa9)% zKc)g4$vJX3beh3>E2W^Uop&xwvg(;6RtvWMT@e?0*N#x+3AEDe2SQMz|50x;hDZOe z@^g5m9>6^@3mx3xpz8r(Myw3-gtjXY$Y|iTSCDae3$7p%TgIGc&D7b(gdB>_efa$~ zWS(i3YJRH#+3bLwyC&nA$duT2uB2iW<-{ngT5EA66AtAV{YO`psw^ad)VOvlHFHiK zWRNEKQ%-bEyCK!7eR`_dE}N@Fj?25D%cO6LV#PAv_!2LDo3pbsj{~L9w{TrQ6-QeY zEwYcX*J?#a*hG|bVX&~5?{*(#yjG$bB6=Vx|KGRNVzy%Kdx}f%7XV?|-;)p%$ z9$H1G__On83!@8DS3zb4n%4pjJlndGe0)H3v=4_zI zObCvoTc$N6)T=13T6mDfj4aV&gxq5a^-vAT#2JZBNX8b0XTv%51vKyKw7`iNxX(gp z``y}N#+!z$)VG8t$1?42WG2bh;$Bw*(zaSQ2+zf(-)F0VU|A^TBup-tX5@Wps$;r< zc`VrmZ?(I=UpA7G(%1+0+e!xFo}$$5xR&2f*kc0^p9^E01#*i83<9?S)f6Zky<=7H zfS6QuYD;S?Ycoid)110L4VZQ=1YJgeej}>^Y)OlRa}3Lu>S-_yjs|lVDtjl4=r5Gq zD?#_U&U37kQOMZ}E?RuTJwfcVe*a(A)XFh$>|EYWsaW2od_(3f2hk{7Tt*EV!^zfC zV4b!Y1^+K}IA>O`X*(77aq-YAGugVT&&)2a`Qq*k(fh(^Q&*+%lSUvFjHiqj0)mkb zN5m-=l6fI!5or5Gun1(&R=`o9hymi%)3Je}ga&@Et6&tIO#L>o6_Sx&ms26xdcvoL zkp&A1Mbrz3)i~U$xe&Ox=5b55i_m@M;xu1QRB4{*cj-esHlbqROje6RwG&u~ScQ3) z=cyGDB{U`1#|T8QI-)=#y0^wO=!2Q9)gb?2I`m>DO z;Q#o=2nKyn^852P_z?Vi`;Pbw_Z#-;^l!B?hGc@Ell`Zo1h__c*t0_y8@re^QsjP2 zh14&jL$wxk-o^P(VAOSpMzR;%KXH%!2GQ^Fno84@Ehg4Znq4s2qNpV#6ir24kX1$y z2$e-baj_*Pe%X}CY1}?m@$@<=4#ME@f#N-C@nv1(MyRAk0u1>3Z|SP=Zs&D-!t3ld z&#?VbmIF?RbU-YVs$mXj+`L5d+_zRW6^R+!-*H3flS@22>a{9Db}IBAFkOnJy%Ldg zhWd~~O_42Rv6r!K1n}uJAF#9ET{dlEG#TIjX)=D(rpcdRA)JA6FhnVUS?ZvbrHzxd z5pUvgtCDjgLu_kYZB9jY)AEl>!7EZr6*@op zPF)pS#(E%_d!&-MC_Yn*FoJ#OWs@cu7fg8S(8B5gUs6VHHxuKgnvEB|aitQXfyMK5 z0$2*9&lvKO&li=_66|W?PZ^{>5Puo7d#^PB&3$jrr4H7;5Tldtbp*4X9V zTx%p0L>FY-PEG{^iYC<>m+x`+W_Mv+Ss@xNP`6yx%i}q*LXYdj zf@8pdE9VU*9S8igxdPMWRv;{3q`+1h`R>^<2>qlRiVB>PfJZ4kSC&Wl8W(Hj_U`ip zCr92P6cvjKPD->QzBuuRK!TW8`{XCBM+==jzZ(vp9gDQYQj%V-i3rx`}h*;oiBZNtO80zr488l>Zc!UWgpdt8TK zny@v=WbUj=T82cDL6&A&!iwJ(lhWpR!aB{X&c)4c06}Ax|5b~4>LzeawmWzKDeIgQ~mD%`A5D6D{ zPb3W3g+g;~ zj=>pxE-tup)j}no8!tc{ZHPerYv=cZ1L9W8!tdEX@?ys1)LL7p?`gWFL-Y)u%Rfyj zu{6}bsj!BHgVb+FeiT5M!%zK~Ysh!DY0eYu4@LP?`Rsz5>N6dDj+_e`g2!PGc;Oa+W6>Mkqq~&M$!3U*=5>J|Ke;5YNo*r8DTyCV8MDg zr9lFB8+86HU`Z7dTkn+Pa_#V%;7YSoupzuA^cvOLqsdRwtLi(9N{ov*Ksl)MXQAz) zeLG*7P<%<|w#eYnBx=1&IuhHIGD74K;5Q5ZC=xNMAYk+OG$6J3dw1v1Y2Gm3t9p~;@b#V^<_(G zSG>MNZ%uEW|Sgt)goFpnDb2NU#?F8$3G4$%QZw31DnRvHTi>--&x+Tys(iL5D<4<>g1UxRep6rC8ODpm z#)wA^iWS-dhOaReDVArc6#fbWg=doQEcQJhW4|-*P;`IAEBR^&?Hcq4LxCAF0>p)f zp+sn=oFoP(tb~fxZvRFHO6z5cmmK@aRgpLdLxk8NRrGwRLBAEBL(ssq3Z#QrfoByJ zmrl|t!LV(B5N+)NUZ)S+p9w%PN;n1#kQlRLL`jg03ON#^}$kJR@ z>Ct-!`d@($F9t8}c@ItposS#PmoAtcof@rv_GYItiY$M}_VC*VS+ounpG*mh7I%ty2W0(KX%s9c+l}!XmTBfQuUm9u7em34R6*@*Mz1FCZcE6Dajynv8Ks=qTXhmwCZq3Z(vrtop0$4<+w$r15Le^=virsreb>SwtD1(wIv(12 zQHiLqrzvMTlV{0JHMR|Q<(_kRk|Se&o{`*q@|7w)RQ3|2n(MHdHr3lm1y|g( zpp-WTXDK3UzIX2Omax#?WGoL&4VEuDfrSY?14f;w5OBgFg}}Z@_FEOyDac7C4WAHR za|JX2q~MRnycCp}VS2qV<+mXE(nw<#w9y(d!GWAX7@s_X!IVG|5uSv6KHtf{+1FTcrnrbZ9&HC}$1CfQ~()-r# zFY!!Jn4X#((4u>5;pjyoY=Br0V`oK}#AiQ2&d5SN^8uN>GQiw?tmlP0Fu3CaduXfb1#aXK{o5rztrC{(H}Dp7if{U6A9zQe7Xibww!u5qkp>l_)f=;u35Sun8K z_LA9GJRVfK?6S(@&rw>jA|^4m=Ud@hOf$BznH^aZ zuhrtvWp1*ndwo`4y9V5B`QDTJp;wzK()snmyEDaI7iilOn|3k`S8LYLJtQ%#_>$lv ze)FGVrlUe+NW_{;#-oGKN>S80A4PC+Jr$sX_sSIUSp5vqm0;-AVHJoJs zdtY#akr$AkCs|x6X_pFfZqU%$$BL7^)$3C(Kku%$s$#hfUR|Xet zYpK7VWq(Mc-aj&$QH3`B7FyVDc3qNj+af`z@BWQu zP4cp_*wk7qxumOmv@|0jg}P->q^#{j^j!^gdw_oj3Z9c;fmklaeJB5@I(x}C7D!B) z#Yvs5l(mOPgb~&hCl^2Fd2W>^a<@u6GhryA4rPvKCe-qv%FZ>RT5TgZO_kOzU+!b` zaJ?!PpA7uB6!2vI1}XuS0FY{vVkVP!z=VH=A zuPo4w{rA~i4qCV>rq!kYLANVvS+dh7A8j0L0ZD=j-NEz3@~&($h>6Y?Szau-fC4zO zupg9cL_+5cTb~5nRUhU}_XkflqiG51nS_~9bpE;~Bklh^{jMvGP1WY&qkAvL+HOn* z+78+H764PGpbFL$U|3VKSD=J8_m$%poScWrN<$f@4q+>jEuD`IjnzW5a9HZZl(oLP z6k<}&kb{l!uO_u8B01k1TeMp7U8W^?AadI!a-VH2h1QGopUDJHVW*9tGcTw=)p{gB zQIWf=C}KQynK03)vkMjMYED<`i4wr%uhl<*yp)D0`js^h5>vb^WHLC@w~mxPT7gR9 z=I_|br)h-G&DyU@PK6NlZ)W3n95~XF>Hh^ARt@nYF9$dxqrs{mL$`Yw3_`-X+R_kA zXlQ6P$f__2K(b6DP`!zmRJTaxJtmXOr}O#cv3OWs1)d_W3+!`xQ%@fJr{vCAX(8d8 z+>ao56i^|3-s_YKnK`>0K}%Iq16oW^&w?ay?y+8{B;dQAQ47SEcmXG;?NDWXtG07t z4x2>t`j%shq1RG>mg|U08@=7-0q=pzmoOMz0kLsjxXB zhE;5?N`e4TK(D_7{4y>^(9pekQMjbWw;XwaY=|ye!CQAVYX!2`J@!DSP>CK=+?h_b z!i*|w4f3QU_CqvZsK$s#zQsF!1ZMZU(qdFjk^qy2HQGu43pi`pgz}FV##@rG0%Pyp~vDp zM@#Ho^Ua>83t3_o|H+kKFJ8PY_7I$~$fPF{6e?uf`u71c^- zC1uqBgf6(>jO_)p-WL~xL-3jj3!(r=9M;a@(CHTjapCAJozbI#XpVCZ!l1YeM^m{> z-Zi`v#sc#I((>y^ma!#ETtU5ReN6H+bA<@aI8BmI<&U1}Uz(v?t9q$TLfNjBZn6(V z^_hQe?z%im{#P4s79zunvzDi3T!%u$S}3G@uy^{N=K~W6)z@T|e79FscF+>jV*9En-?jlajjwF^d+*mdY!b0?w$LTMgwbF^8scnG3xHLJS={Oo z14K=oW{|HDP4&N(4W{O2qCnmh&jxxL1(Aj&&PhLIRPLam5TxY_CxkgR)5vPj@%di^ zcoe${vJVuxXYB-EC1!?QffyDj(bm1}5cJY>^XQnmz8)abi~D_SCJ=6&sqW>4tnL|F z-$j;E2Vu7gTJ{Ka7wyz(VfVB2m06?TXAz3}|BmD1E_?pYvpl#Xr0cv05{oK;QHV0P zsCK37CvUktAv$Yb<0gu0cmB;2(oHLMb`tXNi?IopCk%t?(6`M?NmU6$9ocNu!xzSKMxmU^aLRf|8HFJ_<=NC#nJkOLQ=qjX}Hr@2m4sF9Ms=BoaUJE6d!rS8z#>EmTz$(ozo}P}oS-g6v&`p6df|s61RZ|t?sX_k z(?23iZ$z9?eCc;tCYs(DrOSI`2`s;Ip+*sxc75q~a!n7)4Bl~WpE>B4<3!3dRVN%) z(cvgzaFX$w_r^W`!#unZ8^o;^3YumsQI_hmT*IEjUkA)bZ%183Vh?!bO!iSnuIc?30oSz zj)o_O-4ft&pVK<3grR-F^)K`ECp>~awJ@2T%ha7zRYdLmOesjB8j>L=Vb>&ZkZA)f z3Xoi1`QGoXuv|nm)9CJ~rE&yF{g~CLW9Sd#eh0xtJ+{W%@^9 z%T>_}b5sks){$1z;rP;*6vw-2O8ZH)N=#FTQyn9et2J~;!#z&+YN5Gr?}Fc+T3Yjj zp-H~hGSz>8h^-bOg1u+KtnH#XH%Q>th(v3te{fvPc_+*^YMX<#!m^?mP(*NrK3xl1 z^;EWgdo1S4fvw#NtS50U+9~uz_t|8mhBC40i5A3 z!{E9w&^d7eW}8MCR!=DMsAA2;qeuwfRzJ4c<6AOsFz?lNJzTox+ot{5;Mvn$QVuqz zPz)-lVu1eqL1SJ7X-*1AMtCxCjWcD`Ul_g&b;TAyoP9!bIIx7Un+YCaDM)V`YNIrm zi@nY{nG>Qag#TWfC~m`Z88LDIBuL!>n9;@(XW{7+T=&4UME-COL`nQP1aSMG%4KW0!m4F6+*U4 z`MbW!Riajxx@TJp??P1CACMuSrwK}`3LrQ=pg}~L#se4z>|y5S7#Zj|FsT7nm`WX6 zKqkew1QjL32I$WP9H;m!muiagi(G;RuCH1v(jIa6RyD8jir;8XU$puo6@+inMgw@I zh(WD3!jxL0q9l#9=#ox|aZ;h73nNCFPKcJ;EH(eCHlrw5ODtmOqS?**g-K+_#@Sw4 zJR=W@oMJcc`Mnw}5qUOnciwRiOr0V1=dU z7BWmfhXyI%OJKH=ic2BIEf1wOYO~w#auD52)tAyV4=%l2gYvxo5sWv*2!kW!WEBJ0 zDFxXgDJF4G1iHP#cPr#B5e@p;m<9ZbT<_XcAhJ1v3lBQ%Qx9WzQI1ZX3)kgm zs+!kGeA^Ab<0oi>NIBiq($0{2F4t=UDaznTrb*`E*32u4)p5q%kn5a7Qn0EmoQ}{} zlr44s7rsm~2vYr^&1O6Js9Xg!$lo*E2qR+K>^V(%F#m+_ILE8g)2Y@k*(l*JJv-7k z&mhK6hCw5dN;Q>L#x%12)H3L_&r~g|_hMW#_9B`P$1Nnss-hz}My?AU1tGJzB02_z zWg>*YH7yPX3WGEe@xXNEndvDwO!krJsDvU?V%Wf5u0q}{FC|VuGZYmX|2k-{SHl7W z_=4Ei)E(_7z_+I1KLXpaJgP@=j72w!H(k-i7-c;NR8;!$O8e*Uxn!7PKvYU@77?7O zjSSy3PYB7+pI8}-4O>|_-gMh9TpHFJVcOiehIFJ3NLRf-MUu=+4Mqf~V<097f>Usq zUcg2;AB&L|Ew)f1B`-zEm#j`H?r$PVO)bCWe{(6>&fDuh#O3 zx#23jVKkRF+<#zn6>?(8ZpwUy|v*L~`m076ZpMg;cQ^FByn)U<+s5_}W za^=Famm(qo7`5rbmJb!Me$5e*hRD9Dp=Xb#FYbQ!<>rdQqNl8;YjHIRr~0oyg`g$!9vN8FeMqM4C}WE|OnK zCVcKLDDu~AQ%!w~#%#5^IK6p)MGjmgdmjLeH&uS_CJVue?7)yTBd`*WiA@^o-qI`O`IsnqIE-9Og3 z_xa7asp(Sl-}?2j)vDaoil2{<#W${twe8Ve{G|4L*J<=lKL36Eywt7zRRnZ3J&h$) zggx{}#`LD9_Gc7T*L0P%PGuLA0Ew{UujH}=P008;` z01EI1Xe5MCErNIeafx8~PNof~NcJb7uL8gUBQZDMi{V&%MbV^zATCQHlK)MeduIHz zc@x8T`o=Xa)IaU_P1lo(bPuU|6Z7p>H;|46|J`RRJzd60BoQloh~dR_sR>W=;8leA zC*>ynQE%6l`(!NgQ8>}7R~7<@)#0vS%0I_aYTQXa&C!!FdQHUW_0bS_XKgNnY6Gnq59V2cVmbEn5|OKz)4uV+9-_MiXSG; zsW?Pt7T4A(H1=_12xeKjElw?7cbl=vEgT^R(STe%{;v;h$iU7O<2KFb+it0el(OVb zn#Ls}Q&JKRgkIJMom^}??XZxp@0U@_Y8}|56osJukk^#&@UDhk+Eeb`T|xdtoboJHF+x@oN3 zk9l0&JK1l;Se9e2Dx@F;oC0Po**_IjsN{CalrLWaX5FL^)DGGUVExZ zh@0r~xrP~qXUJQee?n{_2YB{uRVu!?i3@~_ZB@ zNfK$BR#IMxBWk?9XtF7x;i>5pYa4kv*qL34czP z2}PQ39R6D;l-C$Rl}>ryn_(Lv4?Tues&*ZvIqWxVAv_lpd4D_AOW<0a^#?T0bed;& zHarcBga|G4h z2vsn)q00-*M_o-e+}>C8-`J^G9xPDWlxsh?*6gLYD6?ZK3_Tdg<)6?w|Ldd8**fHw ztyZog{~0d#iQSw%1VZ?L$=g+WiOF0(L_S5*$k%E^L)aEt4#$hwr0n{u&0+cF2-_5M zsmNel(Ql^9&7?K#ff%6MIblvt64hFbA_h|^Gh^}u+%y(|Dos!k$ULEax`HxdER3@c z@N4t2p*_01lA5SGAo)y$4ic0t-Nz7Z89|CIhbx6%^IKd23t!l|gCp)yk))SF89X-h zy&Apnl>n}~LINer#D4b)h|1vR1}kd&kiyX-uN&{v0U?**6pVT9jJ7lPo}0O6gN z7h!zClV++p=3XC}()@qxNq#uzSfJt$KDCxhKVhb-5#p5baD(p4lD(tY#adz@;M2D_ z-<^_w=KM*Va zlO@Et$2$fkt7PxYeSzs*YuQwYH2=^Um_i=~*G|b|jMw?Uij^<6q;li>6Ej(d)m3QW znzE}GHXYYOxOSjZx4P_~MKYQuF5viU{#aL7VnMSZR&E|*Uct2l5)$h^AbXBwX+Eey5kkh2=bFabf< zYKW3eC)PUWP}E>+VkXjQKpvO_z$3hk7Yw!R0nx_MOrV0Y)Eo{agkJ_0Q!pT)^X|np z?p9nJnkxRLnmgnC**6PLit|jq>=JAnM;p{-I3gad5)5103R;M$$_L5K6P*?2(N^0L zSi0K}@ls8f9C9D0jhcNWC~z!?scE7$i?MClm&Zy7WWkTeaL`$LRqTcENyBuP7s@{s zQTa1^Wn#;Wk~F`gASCgzD^HU4Au596=QzcC@Q9EqFI|LZ65AurCX^Pp*K?WauKX9% z8XSqr3Sm^iS2JUMhZ59D{;H)v#S!_$Pe4O1)}seSJ?$z8W^xe}9z8Y3PK7BK6^A8< z(9bq9Zp#2xkoU0(ESLx@4D)mNFW7S5aSXOekvt##DR68&YJRdMnzqazxF*_1`l&%Z z9{DGkMirUpxZ5$ zBeG$~DqZeaCvLST_`9?so^qIatEC3rlC%Dd_NEfYX`bHm+RUla-yI9F3<6fOTHnA& z6!1OFCZsB`v`4bxs__4PG}4d!lvV0uB;RLp>5g;gj)%(G%oeFBPng#E=_PxSJMJA6 zvht{V%fbuwaS)&PrBeC;Y!gee`|ZxPhkLh$Tr(qsNGA_BnrQ<)a<(9=-!Uray7$BK`ioJ z^oG>K;#QSvyK>U-;t+4kdt;QRV>$e2h=v|usehPLAA*;JF;&-^s{KRGpaT97pn&L2 z&Os?Vc}~lR7~?T5p3EpgS#S|5n@k_`1@JZG4Kjf`Sa77~w5n7_QKaC%?D7L7-zDtU zj}=|(%x5=xY+rrSW zVWsbYTfJz*;M?D2UArftX;Jy&f2M2oi;??EHhvT!M;4qZTVkF-9Z|#M zwaprb&CM#Np9X=Zn$W$jgHassQj7w|OH6)`wDx$n8ktsd--LW5&-!K~v7h}qRj zCcQ09LKEXsH*}bC^G+vp3FOEeuKZDYO%ODBH!Mmd73+r9MOv)%L;k>B>SrCp!+m=f z5-M}&K^AAbdGf-;Hs6{QmpQj+Eaa6#Mcs*Y&~t@@fU@*Oe4W65PHdgc)4ExeJBVU( zdYB^{a7>ryd4{=1BXTZ@1e)@zktBX=I=I5alFBpr?c)afCu~zm;^R50_hZD8{pbST zTC?)h*mB|&F-=B2cMwuSzqL&H)BVwoMZ9UNtD_Oe!mP8NG_d-xelqJ4XUt3LCHy$a ztsF@d5R<(;FlGy++1@bJ>lG+FTm`OaC_KSbAY-ap;h#TdFF}fbS;}=wMPf3;T^`DU z)ZjS}HOdrG0#fq;r`~|ML=k0K_7M4n-o;Hej$zdiuT81ql0v9@M=v~H!kV`raDFC2 zLkJ6zL4#7~ayha{tonuCBeKO%=pTIe|Cqe>xCX>T%qmZ~fFyS#Y@prMyTc6Ua9|-3 z5#Bg?w?G=QpMa? z!m9@ffYd>>5>Rt5^Sc5ce`rt42p#X1z2$sU#oym84ov5{Mki1F#9M^x_25AolhL#) zNF;TCj8L7}z1^gn#6o^1QOuvcU{^yha9qt8GWxO;Z1hl(t3t)?JZb--zO4g=&w>_K zr%(g$XH=Oe5pC|EiCqxg3sA{Qi1!YK^iIQBxZ`%!N?HD$$UsNTZ6Dju!u>gf{S$5vJB?&wa{Nnw z@x@a=fIfY5@vK~RFx2)H#b2|f{0+G6T~w{3oWJ0NI#Eh!;AI+3aJ(VZ0GLu6* z{J?wGB#_}x@zlywobborl^Jzz=lF`PE;Ti%s7{sz9qR=i(G195tRM(0=|!}`nEl<` z(uN^}=(5S#{9Y_9mub_4n#jiF@B@8Q!IAhd!$q2}3b`sc*IzvN zxOXa3O~Ua;+nS_dDw-NKpdi8kyu5p~ID0QXLmv+ff0Z30|Hi_~EG_8%fUaXsrP&=e zkxziIRWanf_eIgUOGJx0diODokd3^3F!%)%8IIE@+3!+!b0-r7edvdXFUaf?)9{#- z7mLIM0wTQ}3L3cQc&%`?{K)~>o(>y0Bj?gncUKL?TqceMo%yUewk#{*o0VmCYpK2K z&>1L;Wd(_Jw=xVj>0Z&{5}Vo4(z~rSk?dHQ*NvYK^i@URVhEBMNEQ)lK=r(qBOx9;3pjcLemvpCrK> z#K!scuTev!78N+?r?j#?4XYn&@LdUthlI_ko1YjD(L~f6p?`VwnO{ZaPvq2z_r9NM zoPD-a)_eC|%MyY;lD|R}wE-Hl;VC*zL$@SCNw^9qP7LQrgeKB&)OG6SX~en>O3auP zxG0%~jGR&UY3tNd_(RB7C7Y5zm(t;9Qo%b5R`*n9;xxwbZZ2qRGN5X9s#6HA24EGo zNp9|YWYnL|IadY*2zg{{g*h*8X{V5j#J#q}a*2rftiy>%kA;}yK6h(rOiso=4sDw* zddS4|y*-J2kNx*83{wLhwE0Ij!NVDyK^uVoulk_?AXq9@bf4H+R$m2?vA}71X8(Fy zp_rPrCruv(lfp**^#C{>|MbjpU-|3}5VWjRf~WsV_NdHy;JQV2 za*<@9W)cdYmPUyGMu{1Ylf;=rAaB)$0=CA6t}5_`QUG=0I+iwM29u^q1n2{Bv<^Zp zVAKSc(4?3+qds!wX@e{4Fk%cEdB9LPP8>-2>{p;J2h&J*Kn#9Ote5==DqWLu%i(wS zHf2`1XYJx{jt^MOog^9`HG`(0hc=C3cA|<@vQ^~H&72w+4q9o`Qvub#pKZ*eKQl`j zQ52rRibOyoqbio2l{!D4lsX|>L7|k)&Z+*Nc)UPT^Pd!P$O>5cBf+E!Ltd~Gv&wMd z*y=2gsvq*S@r*G|Eu^I}gxWryPC}7`0j3Z+^b^gD7{tI@yvbH@i}I*Q^$aa@?E}CciH;xOmk{?LT}T1-v-59xKtT%`8}E zsbIBujZBML7|n$FnFD1sGJN^WIazfQ(lhg=lvpqS$}7tC1>(k@P=?K%7Q0xe&*!aA z-RCXY!qtsaCX3qgBTlE-6UE73@Irg=t(k{-J5J+OwcNle)UvTYL{)v;8-} zm7^4+Jt+~Zxg&W?_Azz; zq~ewXd5jg!^wi;P&}|`39X#>c{oDj(_~&8~|6Luh+~f2MlI#t-R0$0rl)f3=?o=!Yng{n# zQA!$@7N|ECdXXn95S^h<3EtfrY|{d!j0k}O2AVp4G>#Nl!icDaNRc}mJFX{}&Ypwh z-!eWiF0UCdZL(!pURux%7gRN#q=M#WB6NZZ8Y%@F2nMeh;3`ROiQ*)wLITh90Iw?D zn^$;lPh-8va93kn4a&8{pNRNSQ6nfTYOWfzGJraG<$+twraF*e(y1RbX2A%>RC0tB zTIA#|_$4q{ml)4KJ9yS*;5k~#di^uBbWZ(?QMZ6fkhl&eVuU37%EZ88aveuILXY_B zD=Bt*J+xDUV3C^CJSj^ZIR)i%Noocg=!%z#WBS;yv#j23Nt3gUM~>QMZfB`m$`VT2 z+#sLBjS18bZ9J{taTh2v;2^1_0L}jJ>kDS^GKGfF+N#X+L8M(w{BgInf61QFhx(;S ziG_Q#he(_0`RIde$i3-wzd8me$C=KO{3q@}?TLFCn+I6p$s-PiTrGlI@b( zg`YN4Eflline^2O0)Ld=Z*!NnvMClXBr0Ln$-tgzL5P1yv;&0@5R7Gu0XBg^yC@cyM34TutYcYgByJ0P=-0vc4e zYd?my?ExSv&zSS6oU!=Y{stkEF=eEbiZ;+NlLYYV!bp3Xmi|9NvfOr8=*{sj2Xk`b z$ddUJ=1%yY9&@~ROrD=J8D9UN7e$%LlMLWfWd57o%EW4=o~09kP)E>bsnuPqdgD}APq1Ugjb>LKo$|I1yj%98f;F4&vW3NIGoC*AVo0afhBiiB9MSe zO<wv9%0agO&vy;DFgMKe;~RL@&6IN)>1Rb6Q5ZKWjUZeW~wR0R(G6cxUeobr7kL2 zT59v!CKRZ0^|r&hqQ_?+RD!~*%0l;>Wgb}Wtsn#Ve ze1G~$vWszXgP$Eedh(1svVB+thD9=CUqUFRTc!5$59Jv!+URh|6kp9;ONHQTxzzuJ zzwA&~anDBk&kFx=u&r_F*Uok{#wDcE&QQ`;^F%B4 zE-s^dEPLguB)sU~hH5b6H&ZCTJz|`R%_JA$sJAo`Xo}uU#Y>Q#NGOa*FEDUpgo6oVl1N5oiGf-qD>CNC|aoT97VDh z*kfa-c&ihahVQs{A1;ulBF`FkQNANJQs$buQ;gHsCsUK75{GW_GRupT|5;vL>p4n+QS zBn#G!1r7LZ>qbP`ci#ZBU~2$TA}{-bqO6c2kZk!}De31%2K}Yf5^Vz6RIBEaiW-$3 znY1cXLnwg1$McU#Qmy;x==e_4E}56CN&+dm+Sr8^QfGXnYkG)J@re+lcMC3={e%j8 z8bl5r*-vi-H|nM%=cs6bHhsN_p0AZR>%tY8R%Qn(gH_@h)6V(dUwAusOC#X{SVTy zWpQBFob0bXr6dT#7?_z2tX&%_ohuhermQU3t9FN5W-3k=uef|w)VZcUCKEc5YNFpC zl-n~Jiqc#-KrJUyaXsX>^p0I>m8p3~^HOvI;1=KP;lP~zf&SYS<8Kp1w;nxti zsLp6ndDt96Yr;xvc`pq$1DPQX93FrwRRTi>jO*kt_U!Y@_wSL82&SJzMvEGC@`)CK zTFQ0w&CP1odjq#ZsT+pTN3G{2ydtI2jeQ+5mzstCle5OAB!yB^`_$^M?zDD!i*?_; zS(~1-USC$U6{WKE!AX9bSg4rpwA_&OCWlp!1oRAOAgf}LtepgC?1Tk&i<9jz3%jU! zq(rrHcj~Fzxhs@zV*06E;u(EP6$9ilSKz>pH_uYnndYctl~adr_34a=wyG9RcaY%- zaaoWkGjNwph5fkgrfW?=LbEdz!}=Z6{S!>RFR%tDr`qE|K7 z4b)N99$J1ARb?N{xd-MaJ}-6w`w7K;*T&hc1ugYiqt}queb8nXx>fI8a8%l3&WoZu z^*>Lbh04lMh2cs~+h}2xyM|P?B^cJcU`(|Gl3csyg&RJKQX$$>c(#Ae`%{~@45^i7R_d*2Vzct z{`GlgFUceNCu=J#g`tm0F%a18?x8}0a*1G>E;s4o6r4?00Lycqk)tc-J(C=b!Js8a zybGJI#Y}JxAtgCPHvSi%5mZ8jrn*>?2W@UQ^ON zFA^y!Twnb^{YoP;^M4p}vSW7X6?f5~Ui^o*2j~KOz)s3TQNL~WOFY4kgzhOI@cJUqz=dcDV@&RFu)FZU!at1 zxqjjn3D@v(1)gHYSQ*ZG`tmYO2w9WwB|$C6o(1}&K)OwHA$PbSYz zXwZ259nuk*SLMErc9PgQN>d760rs3&2#9t`Fw20n!eU}_Sl9&`tVXH8#ATog=G`D- zKjR$WhVzF1y~FauZcFAt~ipwLmo8_q-J$lS^2bmsFLG_vMu#HF^3wF)Vn*!0#OLKZ2*-) zp4W@81bHcsM2;gp1ktbDa|$Xjau)b)@3PmUYb+HKTR~O)mD=H_6T5a|!lTM!Sf?cT zZ3n=bQ4-=&|6)$K5M-EnLNF=Til3Ko)T)X`Z;g6%6v@g*W)3^U?IUz0rF7yp>73D7Y4m&4~iFCvU3 z%jr3IBc5QV+eaa>urh0xM0QgMc-tz|o*le5imTv4BO+ag3YT3meHNEOuJ9PVlE}89 z>CHMTeCkgQ>Ivc?vvl=W$wdvTN=ANUMTuTfQV#bBdU7s*Zq65a(~&1czQ~c3P>$tF z2N>wfXzO!?-20&Fy(@bhkaMc=5d=a_)Gia>=#yw5y4w>1f-1SMGgz!D!g6x_%j&rRh=4;7Nugfxde^RFaP){z3tl*biEWhkMsA)Iy08e2+(r z9=1zVeaI@>Yf_DI0AK9Sv*+3^l&0=xDH^7aMs6}sq)C8~A}NGbW1xnbSv|IqP%e2A zo(-kZl>?eySqEaw(*{FQ-?^Bs&0ZIUCR$+Xz-dh|$@ zl}M24lOGt-UB{pPB^o`}mrLsSoO;NC8;+%jgo~xlpt11MP9b7UC|Q+7uf?NTPCDMf`RZOHV1s&~tKC8dA zzp7ZJ*s8KWlv}1NKn+j=m<2l-Mo0XVX~1E$3i+sfHXEN(?K@>M&mpbTJIJy^qqF_Z zMIcK;`!JOp4+;;3GI)fzj`my^i-l)_xLPe9n$@M3v7nwL6vVWWApDW`LwOejvU;uA zQeq-iNjt1*X-5Bw7OOY)reMXCe{0k_RSN4A?l_UVA$n*Ixu1RZ-F?iiPKr0Lt}+u{ zH6#6LT`eSx%^)5`r!oUsR6z$JP4ElMQZ`IA2LY-TsTc!TlI~LzHfBi^0({}HSTF?HQXocP! z@*bv6W4`+-81dB_$*Y9a0_cE}X*(%e#)JzIrO0A@N*5B3WTPmexFMzA8p{W!(IvP+ z5+2`#vg**(jO-?^*Ncf))L=xd$w4j_Nl%GtAPCK4 zdBCzJwI57^>DpqfuSk_HDM zVT84*0b?4AYz^ z(g?SKeBknEAoX_0GYIj96Jh_?(8?{0Xnz*C(HWt>2r#n>(6Jb- z(U!x?M^fW0NppmGhlsmFu<$PU5eT>(d@)DHK$fqRtGCU{n@_%LdG|c^Dt{MW4lW6e z+TG)Tngrnx5)MJdOnn#!(U5WqWO4bB+O0rO)hHC3$=e|Yoh(!B6g!01H^uwRBhK){ z6e03Poipl!^B;s@h%ioqTs2PIyLGg>d40E%sV7INnpTK4TIZXe;;Uqz*+stj&_4>t zK|IhVEc~n7OMt*VfqWMYE_WZl4{pFO~Ks6~FhDu4<+kxG8Qq0xrAJ*`^L- zt(#5aIg~xecVCjiQAqjMA*2lxMq~-XDNfY2YKZ|LX2)}1fl4IXiG&Z^i&NFV|MiLJ z_EH**SIeu)&-sC&-d zwCQ^br}3ln?=NrLvzqI3Gz%vhOtDTRT1iJ~UPzzk5SA{5cK1n$Kh>&0lWi?3BVdsvr*jhb?XZ%b(-)~0JQE=ic^BeT0PU&GXYh3IRK;hZf_69}ku zyo6Ye5HEEuvl7nDvmK^+8nG|wiD4?Db|htll#~$thD09H9+%)GB7%?t7I}EMtzdch zoQe|0H$n|^=e|)jI@oTR_0TfX;U0PCw_a)1HjF8xy_Yk`&q)cN6a!k#lFzqwu45z| zp_V@g86em6`3n-WoW!9Hm8sTqTC#kNSqe{W7fHNuMdZplOHYGMDm8GTIseP(NW^yK zZbo>t(mCK^#pwtJGRSm5lLVB=h|iG~OYdv>sHqSEZyi~{g7;+o>K=w!C!S`d0_)pj z!Z>bQnNPiK!IwV8$V^DE&Zqh!5ibYW!ab;i@%Z^_5d>!6SIIu~pYPTBmqbL8zS!Xm z-Y{VZG^+@yk;1NIa*$~V-ECZJB$H*h?h0;w3>La0mZq@5X}+nP0nD1Pg)ZvLS#@O+v-3DR)~aJ;bgknvmGf&rWZQBM(H6cb>lR4o_6$z zm5PAq5?Wpsz_XUQJuRiAg`1&GXnJK6bvHGL>kMRx->? zIhma3)Subtph;m!gg&tM8bnfK94OK;F=1J@3xtnBl(x}kxmh#R2t5AJP%K!~H0$0T2 z(3Jt2T1qD;$Zc1-UaMC$-^!#YA_WynYLAjgSg6n5V3LSwbbu#}s=qDdB#_ zTG0cTij8C`=3!7AV(hXv{P`DWzEvYK%dWbWy-TYcRcMi|d?iZw{ zOs&xd(Mv^1jVJ`f14C%x6cWG=C>aYrXLspBG}Qf*ESQxikI*nX_I2Ef)HOWuQwiUR zZGxXDmSwkYkmCE$djVoKKeRFC$hqb)MV8V^Zph0#OJwZ}XlNr>LA*E3xc7~lW8g`0 z;`sSDQHQb|PH^;Ph1hAoStPt1*6&|VOaL$d3AeF_!U6+aLM9(?P4!=OwpodGcP0_u z7HcP{cg9{8U-HTpXo$>6`N*aSL<*C2K&nSRGims;!w6;=$^!}EuvkesQ%ukbI^w4| z^7MIWLSBICw&il86s*j!c795nf7a($mr7*tIT_SxwN$aypIX?!1t0;P_)e54fGW#G zO+BSYIs8=By$-fDl64PFW7X&pO@qFEp+J;oWKjH$k(55Z$aAGe_QY>=WJoE0$)In4U;~wrt#7YQ0EfupsJ3rpQqntU?u1^1g-{ZiDA6g zB{5mn1S=nAJ2RRCUnaT7lP8}K#OET40hOrm)rI6SrY=@MWdvUXRosg8)| zQxLHIX~=2$^E!#Cf6JZxsS{FVTx4pMmR57r46BZ4PS4bERpnapyg6iz0~a~u97i*f z?!w7L=&*a+WwVywM@{lS`h`Vi18m0rd1axHA3nD;y!31y8nKh=h{~N*?2LU?hgmij&HN`8=LpJ zlAH&B*y-mF^HzIBW3b^O&8@Rr(e@AIYpA+)Zq|TmOE=8%TsRIbVqpvVa4Dh#~J52624YLbJ)V;sn zuB+jA@(?A&qjk&fJMZr&Um))E&azSQc|YvFtH;jSxD6N?PysW(uZT=Q8u7^C|Fg-t zAi|=bWJ15TMQjUK@Y*{8PlM6&9s^%>X;H(o6mCKTG)E-wlcfWgOFD4177_`xgb*bS zqI_Qr85*E0t6Y)c^vaNIlmz^FDwzF~0iJPTVNA#7EC8%1? z({;w^p7k4-JKcJj(dBkB+G!YAM9<|r!M7Gn~BmP0c%a! zfDux}^S{RjKi=JJ978|?xC`_Pijy?lB{1)Z=8jdR3V+{}Oim1{Ij(kf!6#;mFD`*8 zM^}g_ScSLOepVLWZ}-|D1PMx>Znxgshv?A?k$POA+W9^75tq%W={-H!(3S#cM;l{6 zES`DFTUs=2*vDurtO=KrZi}bw*q3)M%GR1FtiCP6_&#BZA(;cX0j1VH9*tZRi}AW- z_vz&2(9Jd06So7wb>Is6nx`y$hYt>KGpX`Y|F_zY#5;KOuzm~Jgt-;>xN{G&*yK); z4fz`eVGPMTOw5tpF5c!G?I$hlakkL~3Tckmik5{`CB9!$zM)RHJYSI3_Kx>B`z|lf zaUni$rLRjAAOKgU;RT&V6<`eMnjOK3DOxj$O1)NzMw4dD65v`8N}t3LNvnf0w9Io( zlFa>L^JccAP5pKEtew+wLn34GKjI+g-$?_ra~~%hXH#NvY$%D$b)3uPceY15#`QoQ zds5>E+H&nX^YESaM%l-jxFn_TXbL4E|FF0L?4u|++EWwMGR4eaAc)m- zQ-$%3QPSE?RzNvXoxoBWUMMjDjYrDl8xe#<{~|C-{mSUMVvlK8kg;-wF=jujiZy4+0F=1Rpjs*3$;p%T< zqRR+kQe-u#U29DX<)Mk;=VE9ptj)I}p(DU-j&J!YU<{eb`gq!0n@V!XF1vYFM{mf) z(dmOwmZfN-YHQJO)}N2aINxsHyIb{+TbSB>Ab?1(#Gbv3366)mKJ08uMhrvU6fe~z z82F^X>ukZx{YYZ&ab8VMrl;%f-#cvyvu0BQ&w>Yf*)umfq6DImovZ`gf`ih)$`+o{ zVnIbtZIw2U@2n(g_2071;dowe-aj{=>w%*eP$!Wr3XjCt+STn3z9rW#O2@RO0Vr;W zC{>W3c`|!DDtE(P`YWeZeT$dBqc+Z;9I%yk?0rq;P~4#14earhmo!^uVxI8qmxOA+ z?J+rORT@jgh5qcI*i zfZ*KWaPC(dWj2p41PY$pxCVOr=RPRk)UH zL)}H}p--mo;g*ose5uDJ!iAw)dh1n|!11JmN+(opAS6rnbVmAiY-#p+9)=x)K zSS%3`8mRl?D<@8CASsKz03H~?Bctv+A)Q%tt5{%^+ zq=CEzE?lZtaW!pE+L%Q5aOb=BW-=z5(&djzKKX%qNQ^lT-}Pva#U9P<;Po0$+JWzF zd#u>5yxQ>Ttg-L)(3s7|nr6Htv{8O$GH)}I%Ukd@bBJ2dRSRal#&f>d^Yo?fgy!$0 z)k01a){Jq+Y==dDsb4LK-K?b8zUIwMea9a)+D|qhdX#ZGkZvygh+jbUSf;8RGU}nE zjsp4vu0$|md1e}5JHjGKKvnVpXHja)@CWEWxpzEG!&)Y;Q2ZLC6tEnepb!^Q#kx1d zBoxDs5`0SNP>%@w5%5EWhO$yhc%}oc0~BGzEBpj}Qh>^vYytpa8rozbB`Kb%>sz zJf{_^|A}T)q8T`~!I6hp&Bs}m<(1|Z|1ypmrTa`nCw&EnIM+rNs>>W-q(cItOw zY8RDy)bXv6=Zc|plpfI^{ zbpA*ptqn8hB7t_A$a*Z{-#9FJlF_5ETEvGBz@e)6pzhn>Abk$ourv+1<0iTYI7_>AFwur??geRAv96~dq@wO{nM00?9Bd@Q)05~= za9Kr*Zf+*ZK z9pcLu$>_5tf%# zDx`ktDu6l*sI7Djn1*2%%6vH!P|iee!`_(>&qT_9$go{lKsJgD2(8jl@KP8|OxS1> z`S_UVwRm!PPgi8c725LLn;_9ncaTuU>GuzywFiLr${n9cxr;JVOE=DtG z%1w}BCAN=lJ;*HPwd3rugzA>7t&}2^#TypW&_?jGyL3jN@_$ck<%NYCequewl*BoD zT%@3DZh}C)Yc3&Bh%J1GOyr1qKN**l+=k)H11`owQS0j@$*vD>TUTJbPO1yZDU;W{hv8zqZ)PRPl5J=8JL;AbyTvX@YsEW9CCp9N{Cg&;&9+^|O#<5Xz+yE<3Jok^Ei|s61*DTd1>HYjalLK$^=s_c=-x zBOzNh=(HA|-J)Wt262lY;m`U8gzuyDaEB1#SHJ;@J{RQFR#1|?_6Z@OBMM|-e;oJLKD2hLqX;~)in8C zBpR#yK?8ld)Y2%q;3)oTbi3!QE!+Y@B1_yNA}A8`z+7MyaihV^=dDw+A5tabVMm{9Y(C!iYzwkax+q}JgHDd4y;iz&Nj75 zBw(j9IW`(8(6PYAK*~^n0e25k!2lV^5E{Tf03e9l)vjDObSbUm9SEo3;Sd+5gn9G} zn`x%Pgh(GPz1785N9SUk(A;C;LT#f;gmY_)AG%h7u@xgeM#T_i4AWxjlMq?L^fgC0 z0n~*=@f3BshE+suyh1D_ms66w&k!UBR*^BiWPyQ`L$z;EDte_%S`!gGc{&_=0=4^556tXM$>B@{XZ%#TlM7UDxsa;H|$hT8+NjA7q z;$Rk%eL+7yi(P}1L#i{cXeTX2`+rKLJv2ZBR@Ga&YfXgg$k=mjc&+slc24%M z={88?Pin9%J~Sw;p(;dX0V;$5w7?EV%@H^VB&X4(Q0Ce8ix@OPS3#;t0%Bv}VB#Lp zbyGsCTV~GcQnF0EGYeBy1`k1RH5 zX5>tTo+>$Ai6@J*ax><>4{o65i@f#!HDXO6#I&U}m)(;{Wa>G>r6x?CQfhAa^r2ny zYmuZ#v!0h!>0%nGXa*cD0cL$^w?5)v%8ZWOOR~VJ+sE#y^bz+WN{N>S-66H(S7n+l+D@cw*I(U7$fL=t}qhyKR zFYA|>)Wp4L1WYaSIsxo%VH-yPK~3#xkPz+*piQD#iB9+Z185@6Zc&P(H6QJ~M7^fJ z7m;TETj-Fnj+wQF)URW4Lsw@-626o`he$&Tm%Lz$5gI^(4`8p{Bdv3*1)VhmMknGG zOi29*zPc@^92=1sD?3_5YMGn*UP|bN2mBY0fEj;tb`5$|tdi#}3p<_adT5-~a z@SrQBvo&yWUcEY50+wvtm9d55_e_*VQ;Gf9@n#BImg+&T82PK-q#@spM}7o`J(mF~ zbMs48;ZeNbZk1y*C6a71(}W-ea>z6XW)&z-#$rmYH1Lpqp3WWsyvudNCOmPw5`XKo zI0>99LOicTqfst6;|LoSD+lj1#`9WN3t6Vh0_F`N2(rsfVsC@H)UeA>kDW z7MQcP(exF%%3s9TGv^$IfqRszC1`{mA|EM9GpX#0!xOvDylp-ei4dVVL3>7;7! zZQQCn<3d7pK(lB&i^6u0PS=ORNC3*JM2h7!&Ht6U- za=<6GtFujztt)1PEY^lKmsaO=f)Ql=6uM=JF#E?OpPK$XT zZM*&vqBWO$0y1B#?dz0A14LAkzcmN00ycM44%et=STAW0*@R>-n~}EiF2rQ{N+$%> zT|d97}(oY+Ap4sL^CLX3|9 zuLR(AAj)Vqc%@5%GxlMWU6T)VM=*ur4CSs5Cor>2@BBId=pc3}^STYK#pMxxp$qsnBuv5N^q_E1Mr)ZN7FQ$sI4#a2V>QUaU`wR16a`Cs%gK&X z>CsapwmV@*thaIWD&&oP{*&~`5S28g3+H!|VmMv}-a1nv0%Sd4Ca+IlEvijv*1p$b zSNbIIi9d4eSKgH)?RJ>yEFg-S1-8UcQXtGStOAowgmF;yS9K;i^R(ccr=sT^ z&!0nu=>}?GNSiW~WQ-MtlWm6P58<$_V8K;}IB3llnNoXAnd8H0(z?CCMX`OElPZ=_~GNI*_fPPa5J4 zq$nQ1e$k9A2u}Go!plwEMSXwV%chN2-N%M=Ei3VTgjW(?JRBfvW)E!lfU4VYj9uZFH`67+aOparv4A@;RXC9Ag{h> z!b_}NzA7!3v;n4f3XGJq`6+0Cmd_C`B3I0;oc}Ra7^o2bH=e#FcQoq`c)3}Z_1(K_ zP)IC?^7U^+0TC&tg8Qt-AGc4ww$Ju_bo!;gIs}bzeL(Dd>oWPwInaW6& z)DcZ&Dp41N3e(rgr6hXM<5%IVGe*XyX#wy_JH;hgBiH3=61BNf6-6P&HU+BJ?!>go z3;&O@8g*w_)fh@RVogjfmalQY*l<^-lThnBFbK@zpi@C2RPbNiifla6Few2@1mmVou?3pvrscmNl8`VrP~e zg_Z6!&!`T6n#S2Jgy!<7mv(WpYIbas?O3^JyPZTUSj!2%fP3V0!vfXhox4306&G9P zGy45hOnomLOFwHJg-jl{CR%}#boHGJ7w8qs<2J4@WZmmx%f8C1e@O2fUogQ}H(Sao zWWL&Ve}z+{7(XI{XJJ{j8=HgO#vbX=mjXlhqfujp1$gyvPY8fLU0fVsPURXnWbIr_ z-6sQk^7LKtuPt@dI(^k$s#qG3eDqB?L~X`Sf4c;BY}|5ZwxVFf zil@x<^jdtcb(-mnM3lcGPJ@5xt_k1;X3)SvVJ7^fWZ_?x7Pf3q??=QQwa&quSPHxm zHA+~AJ|4EwxdV0V6WTqh&EfzXOI=j1vbpKdA*X%@x6b+)#nrT^Sdd-LnNp813?_c! zW~(-=|*~fZ5OJmZ54Vk7+*ixx#lBE=~zt`o(=rFX*Sq@c7^M+i8#lx7r* z=aXxr`iL6z22UAq!d*lw9*=TGt6nDaxioPWCKaI=ej-0EBqh2-mh`hCWARv-5)dCF z3wIpjt6~t?&&Uc!3aOB=D<-8k0MmjaW>PuYGBb_G!l?x*;}Si33ZrxB*XoGa3$AlT zs2&moI2ZlRTc=QvoU^C?n`6x>|4xSo3u&eaGsbrCyR-f~{%CCI@fcoyT17$!63GcU zAJR?A`I{reUS>*P^7Ee5UbWr241 zM8(%HTkOBtz>`hUD!2Q8BR57p)ab;#J2FpFh>i5w(37XlblH^=$<5*AHiZD!IwcC$ z)I2MGz%Mly$lAJB=?fvKPgb6VCJXwaCA+M^-?xMbbfnD!Nx7}QLYum=$B?{nykf?5 zvubPDGGhb+?yXq&zm2Y;;j3(2tWa&`P$Y1(NJwUEKcc<{l5^wuo9YEbliQZ;ld#l2dFA18LaZ0#&rD%k_8p)+vaG~kb5fS zTmX&~L|dct;5S7R1$n$cTyGm9dxIVyfBChBt}d?L0KDSk9qBS3K{qBEcS2IG8h4g|n#tZA zEbWTMzWEhBy4f@*>b)W;_ApdJ@KT`8N6NU4(5icyWL}}Ct%)*Kte77xw_!?>Qq8E-2C+ zc;nWuWuZ>AW*)D#9{5~=NjC{g! z`DJu3R5$3luR^Unwyv56?IltIc{x&2K)9-)T`oBs&X{_KY$frLEuC|_KnwrTiz(Z^ zD?;bP5>iBMoa@A@%G?Myp(WerBG{lUz@AX&)3fYQvjpFZl8-TLeCb1OWOT-X^M3}l z$T=pUN>F5NKr!TaJYztZkAS(lap`ZtaOkupEr?3K|D%$Wbw3D;-fth7G|;a2nFy#$ z&W1U~-5Q`k(ClHR6jZ5xix&PGwdE3Fdv1iqknITIRQgvF@IK)N6;>L%Y$9?^F)V7= zWF4!As^m#jnMc?;rybymEigB+p}6=RI0|@)Wsc0C_aihvO1%WX4pkv>cy@(-ku?OR zWb+VqWy+$?oR)Zv|CGB2poZzZ`E}y$VEu`_i7fLXtjncYB6MaVGT^c1IbBlMg1ao) zM^?F-yyMVg)&c=mestsDOX8);r^(#fS;Y`iR>{^;XZJ}XymsUNXe4sJ zo|&UFi_Up1BinpsmR9QjKCR9@-luVFepcgCvO}2P%X(-EYm=kt+0tazsb~W5Jp4?c zL8TVMFlV1D#AxymEDGPSQjdefqeEAp!?gq6rsu`4BlirPTxKFeZxx!hto#{y!k*szE8XK8KL=(kBTAD*_jo$f!FNemY8ItKWRknZ=JPnX z%kt6YRj9kqjE&VGrX{$2-kOv5))3S>Nz)^EA{EV^=23hgNKAJSEQ<%L|ljVgV zoEc?R7G|VKBk_XlQppGQ^s*4i-AO)+Te*ofb3W2#m-^)vBd3V*cDvR!FYJ||B=lq@$5&%hEvrK0T!EZN3ecK>~@QJS|vhlOLuN0crg;qc?HIuk5|Y#=BLsDg$?p=;syL=R?T9deCpfM%FUm@~ zv02Wls`s=~r&1f)J@Zc&Y8hAaenY{K8x^c^*1r49{DD-;lW%Yu0+)Lo9$VJ6RnE)P z{|Be~FCILFX=<9*iN5H9)}H)rVw8jT%(?0ov49klz9PfeCnuoD9^;lw%qW!%P)I~b z@SRbGI;0Ssrv36ulI@y91g$8Vp6$fbTkRG3bDYSqPt>5(ykUB2B`QrcT!2WTD;S!I z^htD;^;fvN9T>$3|Mr#V9_T_V0)5?rB@Vp)ocmMaOXTzHedrAvJ6OOkA);z&ZT+k?GF z!?=rn0L*XiNc_%;m*#s_b)aZyP)O5Ml^GG9nqooZ$VFzDH=!@kulq^)@l&aPc_-Xi zl>%?QMNs0=VY_(hEk`pjr!Dej3O#XN6bj@XA>{E!sF8V{9rWK;QztQ^bro1*v0I`E zarRTX2!t%dD;9RKFG)RJh=CEnO6vFIuHP+e>uJl=Git(fqboU^;sPTKi7y9_=Wp`8 zX6T!w?2e(s2~=*@XjJQJT)8gtXuKdNgWu4rBP>*fF1b|~;ucw^76K%}#8F6&Jx`^bmB<&)EE|8dHR`f3j>n< zEtKD8Bgw^Iuh)tdt+LKr4>irsQ6kpAK;de21sYvf&*l{nSF zL}oWsbzv&<@=|iz^er9H$_xWgRm98+%z{G$yJJ42#o@ zt1}U(9a`>}L!P86LbyvkW-2*dd=y%vBtPQ`d#R5ED9V#!f=vW-i;)?^%1NM5)sa`o z%9Re<`G6&IYa%*Dl=zt!MSFZ1&E08*eX073z4Tnos6@+pCW)A#)*1phP)@E?A{Mse z6$Rdx5(_%gnu78mV?x+sgebT+az3T0Y)xNGdcO!-(WN;s#z(Y-5T`FyFBP&H+`-6M zeS|=#k{I8Y9!{}?j3_}2MwTi0T-5c$0lFKY3Q!=ZptphKj#`3}AyDoKC<4a1q=6Po0{$tmpouk` z1!6}5+NAU^sMIB=4j3wd5?TdgMr(@W2Pq&VZYCFz4Po81RV(jp6N5Df66Dzi3nP)C z0su2BfUcFHQ%ZgNS@+&p3Pyo#MxCB!tFu*TnwGgl?{KPvtn1%*mu0bX8J^I(KO~ke z;n*9e)WCL^I1I=?+Dj)=rN=J9b+h(} z(Z$R`g}Nz;ACYjsL?usk4u+1P0y*yV0TQcgp-F<%u^*Vz)cxo{x`d-(rGH1n&&}88 zH?;K9mT5V=5fWjiP5a7G&*-}zO~z!n*VA zbj#si7s_r!cXh|=G!JXN;jQE_u^?_08F}`qyToi6mMwR=|38K}lA~{?zC!FJ?wZ)e z00J@#6b4)ZxdGTrwgen6Ge4?IN=*ZY09GNw2~1^!om_e-Lq`BZ<9@S6G%vQvOC-HD z!@LBWz7h09{EsQyY^%=dzmu7mo9WfTu9lfU(NPpKYOpyY*b#>v=F2c2Dw__?WlQyR zpx(8ebgB9UyQpP`aRjD_xlVc1`Nc66F^!g1LsU%Y;Zko~s5~`O9yYoA0w_pL7ADb3#-_xqwqfRrV?Hqr+}aaX1LaQ&1{Nu`E4psXl2V%&-lXjw zwT>06c@Vhr17Kz?6sa;=+GH8*(g9Qqss(5{$1yVtLnsE>~&Tzo$%h+rS{lvecC65R>XwoBdyaYo-xJm_0T_CEiU+_E_FmN z+HD!2#4669qaR~vq1X}6atLB;s;Y_ma`3Y~cDu*+NIsYbakwrQXQ>cEFH&pv-hWF; z_c@(zm3LamGTq9owH$6K~) zF_44H&hBUnIcdKI#KXBMgK)n4NxiJCy|+V~GjOrOy$toRl)AXywbN*pxUgY>+XknQkk^Xo=&3^s*G_ zJDqY%TE^x%J{^!+&gVQY%6TbCETW%c5)K-~S9&r@WEo!ub%@QQSwTh3#7Yx+Zg5h3s6Ojc6AoD zH-rHWi+9(}1sh^{U|t#`I!5Lf`dJ~f*iqD_mp&$>|C7;CZ)BHI9AXzzi5$X7a!4eF z2%;gVMA@&Xa7Zr^$(InyOO|Cs9VvH^W5R*uyL5!8HuZI8x=j$L5gbQ>%1LU8Lh1B~ zuq2~9!}qoJmSyDc0r7qAM*>$U797NLlG!3lV+>D{i*@R(^KZ*y4`9l}F`DT-cRE?J ztFjF4ILQLRl0pblMIW|et*~3&FY7~H$sJ!0lW6(0pB*))gG_Wz$r!k~U@Vw-05UySuAfMyDZvKtw{;ywnS1Lcf>@xd{ZbB&h4&+1#V<3WJvElfx zQb&@{Znbo|oUJ`6$fDcftX!9w-?JXpknlA#MHH?pWjV%xb(1O;HY+UQ39i{Z<>d=Dt>V&Mi?F@OA?8Axj^rUXX>b@9s_1W>|ZkFbmHh-O=Dn3G7uNbo3p|pJ`KVgh3TlprM|(Gbi?_*I>IX^ z;Ij&dLQQKK5Ib6IU3Wz!s0uk|$ z=OvkYoi!C}>?AtN1XDiVDqe`>GNPugRmc0{qf zIxQQi$qq3AXjYJ#m8^|VJARbbc5?m}Q(GyW4Q0HFnG#FE{a6}$HKZH9`GJ>4;rRkgAq1;Kz?ZEES-B+b-rO@|t81=uge|_qf zs)yA3fZuJr)Y(yiK&32^;yAQpmM%Ssl~{2jmuZ8SuCwlFO+tG8RwijJx!r!&}=sdfVmI zq9~U2sED4ej0+bMsHB(LC^;*Vm`udSr}npUQvpQcm_l7y<$GLQVfS0*C#T@i&}MKi zf&MHTV9yMvQ7R1pL%?SacNWmFv zTS+vbWTi87o6G{_xvbArr10NQ21Q~yFh^=vN==fXuiD7}KhbbP0o33T&XM?>V>jH9 zPA2-1Bv&9Ciov;+J+}QCdG1ttwY_1;wnl5`;373e+m+yg=pb%?d!J+w!?6D58}Y(eA3Md@ESr;y6P3DOR0DyK$M{eaRKe{ZQ@ zGKxl>3lQ6eh2EVPxH?S+Y*}Fs``#aYo3?QRCJtg)2!QRYfI@Dr_&EgFI+TSGJmdzL z*=dB`x>0Oqj^g2{CO*R)LEuoKimCDtGBub+SdSj+-TFr6b;7Ad5Kpts*LAlt>rpYJ zoKvn&Kf*QWx`8F4Z6cTaQlDKe+<{{k4McXUX`t%qRvSk$R6rer5J3K=ad+{@v{m;5O#a}+i&YLjS z!&!r#1}<)R z&yr)z322Z5ViazVu#Nk729t)Q&~Q-JS_RqD3={X6$-Vq$;@+68!w2_@~-BJ z@;?M9!HByq^o{$&O~t6P4S3KkkEaV2ll)`GfTlGwesFa-ly14C4(N0_0`05l`}s8r zGL;bwU=l>B3R5C(L+aRMmroBIM`EOrOG{$$WTRFWiaZBPHGo5T)G&ybnm@T_>l;8C z#(0q^%cuLuE(X8`D040@VxYrRm_RZeNuaHWAwAThz-!?SZd8&W--Ux>NvFTsY^JxK zw2J=WVMRkXeYUv8=iX>UaWW~Tg+PkfD;C9)CsSC}sooI*Zd-7wu5@#ld}SL(RjR**{_@-4yyr@)O~|Mf!Ei(NUn@?=@oy>eUOvfTsfmElFd-`jKDa!I8M()s7 z{WVzH!XxKHM+Xa6r|FxNs@IX0q^U^naDe#$ zNE=AFnCnyf(J+@1$08Z+)N{ditO+>lz@@^3#l5~=Ke!4kZ%(BtS~v;)w8W$%rKl$s z_F1I)8U|i1Bx!}k+%c;pu1)RmxsQ_XGmhhuqJyfGSTxH-QM+!%tOenyp83+XNY%CD zfcW7b)om8Dm6H3qzShL_<@qXBg5lxuV}l}sNGKXI6#|2)>CfdRsBgd3kn8Mut~9Wy zMQ0IX0&aHm=s9N#HE}QEryE3En%EVws&5_lMXTl=xq%(hTk5AkK%&Q~nHw(88zPxv z%OW^wgGf64RIEw+Np=fy!rY>hwe%Dg8!lDXR~EZcQx$kad?GYInYY(zhq$IO!ddp3xIb!E-2FQG6RLV&L2P_is)^u} zX%v;MGmBW0+S*YK{Pzz`zDUTqRc;q$JFYo;-UG6?DzR#@S1I~zNFwo9!2r=V$>zH4 zr~WKel$42eV{%3LD2S^!gvrSSU(e0eT&kIJP@9`&P)}Ub*<7a185?|YvQy(~i-YCL zfS62N56oxh;$jL64i+3FIC?kiq@vYd%r!=ZlWuqpeF8zGiZfhb;OxY%X_WFnr#P!R3;YB zcN=+PEdW^wNCd=k%Tan>@2Uql-0yl^mJf6?jw6P#(o_Y#g8`l!;E9*9j{E_K79eP& z;YTMURDQ))=6|9hQiv7pIx3C<8mOGLB%go4xxvKqu`1AIh26m=h01L#5Ut%t-?g-B z-><}OoSU8B{))zflMj-rLTA_Jo-0aYkku+K&B|hkm4-THlUyyEcp^E$VnbUy$%3Fb zCx1j|PH$*EY0(D4NeW%6Zz$Q@;C=EqlLu`Q(PEPWKWG#iP8UmiY_Ac}q zd}3khmuntO)XzZgcYI#rtf?!Itv}OBeO1#&qJnBKNjak^+>8vDAU&NR=2ql zaTwbTSFadt7IF*n(B9X1lki#IDkIE^3aONAqW2(xRMQ$oD=U^^8_p#ZA~ zPswR^5N5DhVm?x<@;wpB8(6?}6si{?|8lVn!g9W4DE}5RZZVc8-GE&Z(IIwI<9NwQ zDGz6vt0IOV$d@3b`gXoaibHKiSPJbE4Gtp;peNt7M^n=`qOq{vqBe9Wh+3ev8OncL7Z4LZ-T}gBjoQ)@bb0Vt=Pm`K$-+h;f z=)_ozo8AK&PbFgZ+A&C`pEoQwD_`lgwA20I%&m*kOWLbymnEksB!(0c+Fr#+E>l*Q zUG(bhHG)3BZtON=-9U~=_034wtxuyt(V|5v!_iHr0)YYxo}Rl3boeS0MASwRt-3hD zF+mDJthB)KAPg(1a9&1GPVhhnR{!9PcJG!K6JAB2ueS0CikmDDOx`?cL}}4=12m^? z7z~+2MBqYKVw4F9ohF>Mov|!b@FWx*roNd=?@(tWd@$y!Mb`Qt1$4c3vx_XI#8y;7$9Rqp=Ao9*WD=@cj;p$DrhtUYGovko%z^94$%2C1E?74JNqpz~OfoZ40&}y+JW`>vT}!k`Z7`eeDYKNFpqo&wb{? zCzMHQp3D9))Q8pfcCfolvAdim*oiK-+nhi@!w;4PqUcb-SBm2@y7(6l1V==&aP2(5 z&_c;FQ5Us?tOl7MJkXedE)s!@#pHK$!@-vWC^#yq#1YzK=0-LZFie4Rju}DR6g7S% zO+9f!iA@ycWAxoK+>qn-7O0shk#X|~)x*V9EJGx+JX39Xahjv>kvkN*W#MQvtpS2O zE*m73$F9doa3bY}9|8qn4Jy0F^@U}FI~JgH*3z6zvk%rSt`<>St>imJqROt#G|6`h z)nxi0isgZ3S?pM4NC?z`fp;Jg5D|SM*~tS$Oq%4r%T1=lm?CO(_jg@qf&!0IF)k5e zBE+26i;~0mWoN5GsP<{lrwkRX->Pyl#*$-%_lBXky7I80RZ$TH6P=Y2N-4l1!VKv| z-bz@Ri^KLD@+hODy|d*?EJ(SMPxgdL=JLw=9IoV{=sMO-WW{^1S@T5dw96G5;e8^T zDB19`sL3qMqB3CkOT|_ZBEt$EuXSOd67lb zK-z6nuoOZh28-3Dizk$ExWSv%W|1u=lBUpvurMZrsVE`apyf18?s*lggz3wmkty0NDM%POYb7qni`x94hpbL~6yjBf@q!7S(grBTD!RPuO48X#h3WV~ zOZr88hiv$h2)VsAK>?f|jrgGMazv#ot(Y{OF6Pj%rr=E*feW7za!?|;q{Qyy)jIwZ zTrN|{*R5?ZyJAv&sSl>k%M=3AvEhyz_@3$P$r5_1&E4eF z_LE!g$s^Sj&c^(_D~UlR*3EpRhVQ=jFCZw)U5sl`Dq>cK? zjkj_rYdBIIY>%-t4}08Mn8FCrOc2Kk&_q>q>8nKaGgv8KEZRt@IUl^5x@O<Bh{6OO}!2-`ITBANT#A>Rh&vH5ul1aSI!)dHRzOm zV8faEQxc&%>nOtM&DUzKHkC1crg>*BvleHkYf^GNYPJ(Alu%mlaHnC%Noi9SM`A7C z*H76m${w+fYI`R(*;?X4lNUOxWJOvKA?Wnjj&!nt%Uv=TUq3*xq}eV+pJp<8o%M>s z$l<7OlUL@Mz6($X}% zPN%gGw8K$s?}xBSsM!22n4~pg9yVs55%x_EeN>>SmYi1~G}*YNCR(yvtt&E`C^4JR zWUt`0ND=w7a1-r}J-9^tuOsCi5+Oq?`G_>40#y%}n`KQDNRn-{keL;;%4y;7y;Q^o zq`9+q6IG_HqbZwXojFM|UC}r+u+c0XH#4`T50vRO%VN3sqU|;P(xyA=bxmZNvGeON zL31}b-wc;|W!I2vypSk12mlB`GAAiqH^9j-I|Bt{VBEPgflAJfCwnGmRTOQXO8et< z47gUK)cR+PZ?2OJou`I10&Z4a?XeiMMZc#_I50-uNiSrZ_EbqGu-gD;kpK8K2n`@a z{$T!L{&%%7eprdX3kytm?wEJ;{ZvJ&PAKkGsNiF(MbW7Ho40$1lwC$`b?;<}GJBQF zWuERV)4TQGIWBEU1a+%U=D$ldWZ!fU&b?Ir_)`*?tMlKJ$igV%r;JIgLV{e=!7iz@ zBxgNyV3lPG(?JQFl~iX@OQuEmKHGIYe>^yNo=KznGRl)zh8x7XaEA-uM;sPHoLqo*Y^`f_`IM&?o{;yQ5inf?bI%FSS`qX)<+L$yTOENgH z(H2N~?q4O*Bjny0i3F*2P%3&|6z|iYSrU#~OaHlLYgry|LAUOCx-9)* zRJnI_(xxSbTtSM6mfn)dO^~#jwP|vVk>=Ozs(5-;W*)QN%oqLU(w~0M#5cv1&$ zv^g6JgP^%7*a(%*N_$wOLaacnl)NJshoVzGQmABPNS5P*b>vZ;5E>XDVql2`ns0RS zINk?z_km$$$ozQBQOCudVOlCytg&(-7u~sm?**M%%nM)ceL=$|>}YqqR!+*g*70q= zM!Jk>qKcx9O5-%M1`tTeD#cmy#teDHlInlT*g2k7*(uppc9(pKpIuvw?>(!=T$Znv zMGKQluY3RUYFH2S~b+Dj8JLbv&t>qzs)Z zh{Y)mlxSp4vhozMlGI30p{3=AEm;u%^~6zI${EqYZ`ee^&a|kuVv5(y#S40rG>Ik! zYVZA`X2}&!r^r|RZIC*Lbx2;&!bn;cO5Stgc~w-Qs)|?OyZxUrwI`7$vWMKqsRkdy zQ6g7LjsJoe;(5YekmU9_oMq*xx>uS7_jHQ}vB0)_E=p9rJ3nTZtBSvte*ZezByrbM zTp?D{%#)`_-&`Cb&GP<~I8S*+g!n2;Mnt-(ERwyNl;f0%NwBUQdTfixC61(hqxT7T zn&ln(Q9j!)s)Y>EENY5sTVHK=!n#H9;zE;X;IT%#WQRJfL359#_G6U%V&Ia50=iev zoqi-we;&7;CJdpyvi|7yZjskFe5Fv+vkJI|7+ch+v+JHG;GG{996dBg+PmOVqol5` f?!!c}O?+_YJuBZLv$gFx#s&(EYJCOu=vYP?bgjt| diff --git a/test/torchaudio_unittest/functional/functional_impl.py b/test/torchaudio_unittest/functional/functional_impl.py index c3bebbf076..b355293235 100644 --- a/test/torchaudio_unittest/functional/functional_impl.py +++ b/test/torchaudio_unittest/functional/functional_impl.py @@ -319,16 +319,6 @@ def test_amplitude_to_DB_top_db_clamp(self, shape): f"No values were close to the limit. Did it over-clamp?\n{decibels}" ) - @parameterized.expand( - list(itertools.product([(1, 2, 1025, 400, 2), (1025, 400, 2)], [1, 2, 0.7])) - ) - def test_complex_norm(self, shape, power): - torch.random.manual_seed(42) - complex_tensor = torch.randn(*shape, dtype=self.dtype, device=self.device) - expected_norm_tensor = complex_tensor.pow(2).sum(-1).pow(power / 2) - norm_tensor = F.complex_norm(complex_tensor, power) - self.assertEqual(norm_tensor, expected_norm_tensor, atol=1e-5, rtol=1e-5) - @parameterized.expand( list(itertools.product([(2, 1025, 400), (1, 201, 100)], [100], [0., 30.], [1, 2])) ) diff --git a/test/torchaudio_unittest/functional/torchscript_consistency_impl.py b/test/torchaudio_unittest/functional/torchscript_consistency_impl.py index 4097b20e34..86719875c6 100644 --- a/test/torchaudio_unittest/functional/torchscript_consistency_impl.py +++ b/test/torchaudio_unittest/functional/torchscript_consistency_impl.py @@ -223,14 +223,6 @@ def func(tensor): tensor = torch.rand((1, 10)) self._assert_consistency(func, tensor) - def test_complex_norm(self): - def func(tensor): - power = 2. - return F.complex_norm(tensor, power) - - tensor = torch.randn(1, 2, 1025, 400, 2) - self._assert_consistency(func, tensor) - def test_mask_along_axis(self): def func(tensor): mask_param = 100 diff --git a/test/torchaudio_unittest/transforms/torchscript_consistency_impl.py b/test/torchaudio_unittest/transforms/torchscript_consistency_impl.py index 27f57adba1..39406549f8 100644 --- a/test/torchaudio_unittest/transforms/torchscript_consistency_impl.py +++ b/test/torchaudio_unittest/transforms/torchscript_consistency_impl.py @@ -86,10 +86,6 @@ def test_Resample(self): tensor = common_utils.get_whitenoise(sample_rate=sr1) self._assert_consistency(T.Resample(sr1, sr2), tensor) - def test_ComplexNorm(self): - tensor = torch.rand((1, 2, 201, 2)) - self._assert_consistency(T.ComplexNorm(), tensor) - def test_MuLawEncoding(self): tensor = common_utils.get_whitenoise() self._assert_consistency(T.MuLawEncoding(), tensor) diff --git a/tools/convert_voxpopuli_models.py b/tools/convert_voxpopuli_models.py new file mode 100755 index 0000000000..02ece3c683 --- /dev/null +++ b/tools/convert_voxpopuli_models.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +"""Convert the fairseq models available in voxpopuli repo https://github.com/facebookresearch/voxpopuli + +The available checkpoints should open with fairseq. +But the following error cannot be resolved with almost any version of fairseq. +https://github.com/facebookresearch/voxpopuli/issues/29 + +So this script manually parse the checkpoint file and reconstruct the model. + +Examples + +``` +python convert_voxpopuli_models.py \ + --input-file wav2vec2_base_10k_ft_fr.pt \ + --output-file wav2vec2_voxpopuli_base_10k_asr_fr.pt +``` +""" + + +def _parse_args(): + import argparse + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + '--input-file', required=True, + help='Input checkpoint file.' + ) + parser.add_argument( + '--output-file', required=False, + help='Output model file.' + ) + return parser.parse_args() + + +def _removeprefix(s, prefix): + if s.startswith(prefix): + return s[len(prefix):] + return s + + +def _load(input_file): + import torch + from omegaconf import OmegaConf + + data = torch.load(input_file) + cfg = OmegaConf.to_container(data['cfg']) + for key in list(cfg.keys()): + if key != 'model': + del cfg[key] + if 'w2v_args' in cfg['model']: + del cfg['model']['w2v_args'][key] + state_dict = {_removeprefix(k, 'w2v_encoder.'): v for k, v in data['model'].items()} + return cfg, state_dict + + +def _parse_model_param(cfg, state_dict): + key_mapping = { + "extractor_mode": "extractor_mode", + "conv_feature_layers": "extractor_conv_layer_config", + "conv_bias": "extractor_conv_bias", + "encoder_embed_dim": "encoder_embed_dim", + "dropout_input": "encoder_projection_dropout", + "conv_pos": "encoder_pos_conv_kernel", + "conv_pos_groups": "encoder_pos_conv_groups", + "encoder_layers": "encoder_num_layers", + "encoder_attention_heads": "encoder_num_heads", + "attention_dropout": "encoder_attention_dropout", + "encoder_ffn_embed_dim": "encoder_ff_interm_features", + "activation_dropout": "encoder_ff_interm_dropout", + "dropout": "encoder_dropout", + "layer_norm_first": "encoder_layer_norm_first", + "layerdrop": "encoder_layer_drop", + "encoder_layerdrop": "encoder_layer_drop", + } + params = {} + src_dicts = [cfg['model']] + if 'w2v_args' in cfg['model']: + src_dicts.append(cfg['model']['w2v_args']['model']) + + for src, tgt in key_mapping.items(): + for model_cfg in src_dicts: + if src in model_cfg: + params[tgt] = model_cfg[src] + break + if params["extractor_mode"] == "default": + params["extractor_mode"] = "group_norm" + # the following line is commented out to resolve lint warning; uncomment before running script + # params["extractor_conv_layer_config"] = eval(params["extractor_conv_layer_config"]) + assert len(params) == 15 + params['aux_num_out'] = state_dict['proj.bias'].numel() if 'proj.bias' in state_dict else None + return params + + +def _main(args): + import json + import torch + import torchaudio + from torchaudio.models.wav2vec2.utils.import_fairseq import _convert_state_dict as _convert + + cfg, state_dict = _load(args.input_file) + params = _parse_model_param(cfg, state_dict) + print(json.dumps(params, indent=4)) + model = torchaudio.models.wav2vec2_model(**params) + model.load_state_dict(_convert(state_dict)) + torch.save(model.state_dict(), args.output_file) + + +if __name__ == '__main__': + _main(_parse_args()) diff --git a/torchaudio/backend/soundfile_backend.py b/torchaudio/backend/soundfile_backend.py index d133356054..8b0b27c5cd 100644 --- a/torchaudio/backend/soundfile_backend.py +++ b/torchaudio/backend/soundfile_backend.py @@ -146,7 +146,7 @@ def load( * SPHERE By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with - ``float32`` dtype and the shape of ``[channel, time]``. + ``float32`` dtype and the shape of `[channel, time]`. The samples are normalized to fit in the range of ``[-1.0, 1.0]``. When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit @@ -182,16 +182,16 @@ def load( integer type. This argument has no effect for formats other than integer WAV type. channels_first (bool, optional): - When True, the returned Tensor has dimension ``[channel, time]``. - Otherwise, the returned Tensor's dimension is ``[time, channel]``. + When True, the returned Tensor has dimension `[channel, time]`. + Otherwise, the returned Tensor's dimension is `[time, channel]`. format (str or None, optional): Not used. PySoundFile does not accept format hint. Returns: - Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + (torch.Tensor, int): Resulting Tensor and sample rate. If the input file has integer wav format and normalization is off, then it has integer type, else ``float32`` type. If ``channels_first=True``, it has - ``[channel, time]`` else ``[time, channel]``. + `[channel, time]` else `[time, channel]`. """ with soundfile.SoundFile(filepath, "r") as file_: if file_.format != "WAV" or normalize: @@ -335,8 +335,8 @@ def save( filepath (str or pathlib.Path): Path to audio file. src (torch.Tensor): Audio data to save. must be 2D tensor. sample_rate (int): sampling rate - channels_first (bool, optional): If ``True``, the given tensor is interpreted as ``[channel, time]``, - otherwise ``[time, channel]``. + channels_first (bool, optional): If ``True``, the given tensor is interpreted as `[channel, time]`, + otherwise `[time, channel]`. compression (float of None, optional): Not used. It is here only for interface compatibility reson with "sox_io" backend. format (str or None, optional): Override the audio format. diff --git a/torchaudio/backend/sox_io_backend.py b/torchaudio/backend/sox_io_backend.py index d36609b7ee..b725bcc968 100644 --- a/torchaudio/backend/sox_io_backend.py +++ b/torchaudio/backend/sox_io_backend.py @@ -89,7 +89,7 @@ def load( and corresponding codec libraries such as ``libmad`` or ``libmp3lame`` etc. By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with - ``float32`` dtype and the shape of ``[channel, time]``. + ``float32`` dtype and the shape of `[channel, time]`. The samples are normalized to fit in the range of ``[-1.0, 1.0]``. When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit @@ -131,18 +131,18 @@ def load( integer type. This argument has no effect for formats other than integer WAV type. channels_first (bool, optional): - When True, the returned Tensor has dimension ``[channel, time]``. - Otherwise, the returned Tensor's dimension is ``[time, channel]``. + When True, the returned Tensor has dimension `[channel, time]`. + Otherwise, the returned Tensor's dimension is `[time, channel]`. format (str or None, optional): Override the format detection with the given format. Providing the argument might help when libsox can not infer the format from header or extension, Returns: - Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + (torch.Tensor, int): Resulting Tensor and sample rate. If the input file has integer wav format and normalization is off, then it has integer type, else ``float32`` type. If ``channels_first=True``, it has - ``[channel, time]`` else ``[time, channel]``. + `[channel, time]` else `[time, channel]`. """ if not torch.jit.is_scripting(): if hasattr(filepath, 'read'): @@ -172,8 +172,8 @@ def save( as ``str`` for TorchScript compiler compatibility. src (torch.Tensor): Audio data to save. must be 2D tensor. sample_rate (int): sampling rate - channels_first (bool, optional): If ``True``, the given tensor is interpreted as ``[channel, time]``, - otherwise ``[time, channel]``. + channels_first (bool, optional): If ``True``, the given tensor is interpreted as `[channel, time]`, + otherwise `[time, channel]`. compression (float or None, optional): Used for formats other than WAV. This corresponds to ``-C`` option of ``sox`` command. diff --git a/torchaudio/datasets/cmuarctic.py b/torchaudio/datasets/cmuarctic.py index 445fa2caef..a01399198b 100644 --- a/torchaudio/datasets/cmuarctic.py +++ b/torchaudio/datasets/cmuarctic.py @@ -164,7 +164,7 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, transcript, utterance_id)`` + (Tensor, int, str, str): ``(waveform, sample_rate, transcript, utterance_id)`` """ line = self._walker[n] return load_cmuarctic_item(line, self._path, self._folder_audio, self._ext_audio) diff --git a/torchaudio/datasets/cmudict.py b/torchaudio/datasets/cmudict.py index 4964887a26..2c6acccf21 100644 --- a/torchaudio/datasets/cmudict.py +++ b/torchaudio/datasets/cmudict.py @@ -167,7 +167,7 @@ def __getitem__(self, n: int) -> Tuple[str, List[str]]: n (int): The index of the sample to be loaded. Returns: - tuple: The corresponding word and phonemes ``(word, [phonemes])``. + (str, List[str]): The corresponding word and phonemes ``(word, [phonemes])``. """ return self._dictionary[n] diff --git a/torchaudio/datasets/commonvoice.py b/torchaudio/datasets/commonvoice.py index 1aed80a745..20f9234f89 100644 --- a/torchaudio/datasets/commonvoice.py +++ b/torchaudio/datasets/commonvoice.py @@ -65,8 +65,8 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, dictionary)``, where dictionary is built - from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``, + (Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary + is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``, ``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``. """ line = self._walker[n] diff --git a/torchaudio/datasets/dr_vctk.py b/torchaudio/datasets/dr_vctk.py index a00615db86..bfd2e3e400 100644 --- a/torchaudio/datasets/dr_vctk.py +++ b/torchaudio/datasets/dr_vctk.py @@ -107,8 +107,9 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform_clean, sample_rate_clean, waveform_noisy, sample_rate_noisy, speaker_id, utterance_id,\ - source, channel_id)`` + (Tensor, int, Tensor, int, str, str, str, int): + ``(waveform_clean, sample_rate_clean, waveform_noisy, sample_rate_noisy, speaker_id,\ + utterance_id, source, channel_id)`` """ filename = self._filename_list[n] return self._load_dr_vctk_item(filename) diff --git a/torchaudio/datasets/gtzan.py b/torchaudio/datasets/gtzan.py index 14f3962c85..b78104cacb 100644 --- a/torchaudio/datasets/gtzan.py +++ b/torchaudio/datasets/gtzan.py @@ -1102,7 +1102,7 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, str]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, label)`` + (Tensor, int, str): ``(waveform, sample_rate, label)`` """ fileid = self._walker[n] item = load_gtzan_item(fileid, self._path, self._ext_audio) diff --git a/torchaudio/datasets/librimix.py b/torchaudio/datasets/librimix.py index 32c2854f1e..ab3b3877dc 100644 --- a/torchaudio/datasets/librimix.py +++ b/torchaudio/datasets/librimix.py @@ -84,6 +84,6 @@ def __getitem__(self, key: int) -> SampleType: Args: key (int): The index of the sample to be loaded Returns: - tuple: ``(sample_rate, mix_waveform, list_of_source_waveforms)`` + (int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)`` """ return self._load_sample(self.files[key]) diff --git a/torchaudio/datasets/librispeech.py b/torchaudio/datasets/librispeech.py index 090136831d..ad8a26493c 100644 --- a/torchaudio/datasets/librispeech.py +++ b/torchaudio/datasets/librispeech.py @@ -133,7 +133,8 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)`` + (Tensor, int, str, int, int, int): + ``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)`` """ fileid = self._walker[n] return load_librispeech_item(fileid, self._path, self._ext_audio, self._ext_txt) diff --git a/torchaudio/datasets/libritts.py b/torchaudio/datasets/libritts.py index 9f0c38a751..2c978c426e 100644 --- a/torchaudio/datasets/libritts.py +++ b/torchaudio/datasets/libritts.py @@ -134,8 +134,8 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, original_text, normalized_text, speaker_id, - chapter_id, utterance_id)`` + (Tensor, int, str, str, str, int, int, str): + ``(waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id)`` """ fileid = self._walker[n] return load_libritts_item( diff --git a/torchaudio/datasets/ljspeech.py b/torchaudio/datasets/ljspeech.py index 1a519242c3..a0abcbb9ba 100644 --- a/torchaudio/datasets/ljspeech.py +++ b/torchaudio/datasets/ljspeech.py @@ -68,7 +68,8 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, transcript, normalized_transcript)`` + (Tensor, int, str, str): + ``(waveform, sample_rate, transcript, normalized_transcript)`` """ line = self._flist[n] fileid, transcript, normalized_transcript = line diff --git a/torchaudio/datasets/speechcommands.py b/torchaudio/datasets/speechcommands.py index 0fca1e4fb7..d92d6d44df 100644 --- a/torchaudio/datasets/speechcommands.py +++ b/torchaudio/datasets/speechcommands.py @@ -138,7 +138,8 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, label, speaker_id, utterance_number)`` + (Tensor, int, str, str, int): + ``(waveform, sample_rate, label, speaker_id, utterance_number)`` """ fileid = self._walker[n] return load_speechcommands_item(fileid, self._path) diff --git a/torchaudio/datasets/tedlium.py b/torchaudio/datasets/tedlium.py index 43e3b454f7..fe6222a46f 100644 --- a/torchaudio/datasets/tedlium.py +++ b/torchaudio/datasets/tedlium.py @@ -127,7 +127,8 @@ def _load_tedlium_item(self, fileid: str, line: int, path: str) -> Tuple[Tensor, path (str): Dataset root path Returns: - tuple: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)`` + (Tensor, int, str, int, int, int): + ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)`` """ transcript_path = os.path.join(path, "stm", fileid) with open(transcript_path + ".stm") as f: diff --git a/torchaudio/datasets/utils.py b/torchaudio/datasets/utils.py index 11b10fad0b..fd76f43d09 100644 --- a/torchaudio/datasets/utils.py +++ b/torchaudio/datasets/utils.py @@ -151,7 +151,7 @@ def extract_archive(from_path: str, to_path: Optional[str] = None, overwrite: bo overwrite (bool, optional): overwrite existing files (Default: ``False``) Returns: - list: List of paths to extracted files even if not overwritten. + List[str]: List of paths to extracted files even if not overwritten. Examples: >>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz' diff --git a/torchaudio/datasets/vctk.py b/torchaudio/datasets/vctk.py index c7c77369e9..f0cdfdb410 100644 --- a/torchaudio/datasets/vctk.py +++ b/torchaudio/datasets/vctk.py @@ -134,7 +134,8 @@ def __getitem__(self, n: int) -> SampleType: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, transcript, speaker_id, utterance_id)`` + (Tensor, int, str, str, str): + ``(waveform, sample_rate, transcript, speaker_id, utterance_id)`` """ speaker_id, utterance_id = self._sample_ids[n] return self._load_sample(speaker_id, utterance_id, self._mic_id) diff --git a/torchaudio/datasets/yesno.py b/torchaudio/datasets/yesno.py index 10b38a1260..f33c11852c 100644 --- a/torchaudio/datasets/yesno.py +++ b/torchaudio/datasets/yesno.py @@ -77,7 +77,7 @@ def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]: n (int): The index of the sample to be loaded Returns: - tuple: ``(waveform, sample_rate, labels)`` + (Tensor, int, List[int]): ``(waveform, sample_rate, labels)`` """ fileid = self._walker[n] item = self._load_item(fileid, self._path) diff --git a/torchaudio/functional/__init__.py b/torchaudio/functional/__init__.py index fc3173cd21..2fc66df69e 100644 --- a/torchaudio/functional/__init__.py +++ b/torchaudio/functional/__init__.py @@ -1,7 +1,5 @@ from .functional import ( amplitude_to_DB, - angle, - complex_norm, compute_deltas, compute_kaldi_pitch, create_dct, @@ -12,7 +10,6 @@ detect_pitch_frequency, inverse_spectrogram, griffinlim, - magphase, mask_along_axis, mask_along_axis_iid, mu_law_encoding, @@ -54,8 +51,6 @@ __all__ = [ 'amplitude_to_DB', - 'angle', - 'complex_norm', 'compute_deltas', 'compute_kaldi_pitch', 'create_dct', @@ -65,7 +60,6 @@ 'DB_to_amplitude', 'detect_pitch_frequency', 'griffinlim', - 'magphase', 'mask_along_axis', 'mask_along_axis_iid', 'mu_law_encoding', diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 3c0ba86cf7..0f5d24f67e 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -663,7 +663,7 @@ def filtfilt( Returns: Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs`` - are 2D Tensors, or `(..., time)` otherwise. + are 2D Tensors, or `(..., time)` otherwise. """ forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp=False, batching=True) backward_filtered = lfilter( @@ -987,7 +987,7 @@ def lfilter( Returns: Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs`` - are 2D Tensors, or `(..., time)` otherwise. + are 2D Tensors, or `(..., time)` otherwise. """ assert a_coeffs.size() == b_coeffs.size() assert a_coeffs.ndim <= 2 @@ -1474,7 +1474,7 @@ def vad( in the detector algorithm. (Default: 2000.0) Returns: - Tensor: Tensor of audio of dimension (..., time). + Tensor: Tensor of audio of dimension `(..., time)`. Reference: - http://sox.sourceforge.net/sox.html diff --git a/torchaudio/functional/functional.py b/torchaudio/functional/functional.py index fb1033078a..a973230d2a 100644 --- a/torchaudio/functional/functional.py +++ b/torchaudio/functional/functional.py @@ -28,9 +28,6 @@ "DB_to_amplitude", "mu_law_encoding", "mu_law_decoding", - "complex_norm", - "angle", - "magphase", "phase_vocoder", 'mask_along_axis', 'mask_along_axis_iid', @@ -263,7 +260,7 @@ def griffinlim( rand_init (bool): Initializes phase randomly if True, to zero otherwise. Returns: - torch.Tensor: waveform of `(..., time)`, where time equals the ``length`` parameter if given. + Tensor: waveform of `(..., time)`, where time equals the ``length`` parameter if given. """ assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum) assert momentum >= 0, 'momentum={} < 0'.format(momentum) @@ -724,78 +721,6 @@ def mu_law_decoding( return x -@_mod_utils.deprecated( - "Please convert the input Tensor to complex type with `torch.view_as_complex` then " - "use `torch.abs`. " - "Please refer to https://github.com/pytorch/audio/issues/1337 " - "for more details about torchaudio's plan to migrate to native complex type.", - version="0.11", -) -def complex_norm( - complex_tensor: Tensor, - power: float = 1.0 -) -> Tensor: - r"""Compute the norm of complex tensor input. - - Args: - complex_tensor (Tensor): Tensor shape of `(..., complex=2)` - power (float, optional): Power of the norm. (Default: `1.0`). - - Returns: - Tensor: Power of the normed input tensor. Shape of `(..., )` - """ - - # Replace by torch.norm once issue is fixed - # https://github.com/pytorch/pytorch/issues/34279 - return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) - - -@_mod_utils.deprecated( - "Please convert the input Tensor to complex type with `torch.view_as_complex` then " - "use `torch.angle`. " - "Please refer to https://github.com/pytorch/audio/issues/1337 " - "for more details about torchaudio's plan to migrate to native complex type.", - version="0.11", -) -def angle( - complex_tensor: Tensor -) -> Tensor: - r"""Compute the angle of complex tensor input. - - Args: - complex_tensor (Tensor): Tensor shape of `(..., complex=2)` - - Return: - Tensor: Angle of a complex tensor. Shape of `(..., )` - """ - return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) - - -@_mod_utils.deprecated( - "Please convert the input Tensor to complex type with `torch.view_as_complex` then " - "use `torch.abs` and `torch.angle`. " - "Please refer to https://github.com/pytorch/audio/issues/1337 " - "for more details about torchaudio's plan to migrate to native complex type.", - version="0.11", -) -def magphase( - complex_tensor: Tensor, - power: float = 1.0 -) -> Tuple[Tensor, Tensor]: - r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase. - - Args: - complex_tensor (Tensor): Tensor shape of `(..., complex=2)` - power (float, optional): Power of the norm. (Default: `1.0`) - - Returns: - (Tensor, Tensor): The magnitude and phase of the complex tensor - """ - mag = complex_norm(complex_tensor, power) - phase = angle(complex_tensor) - return mag, phase - - def phase_vocoder( complex_specgrams: Tensor, rate: float, @@ -1369,7 +1294,7 @@ def apply_codec( For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: - torch.Tensor: Resulting Tensor. + Tensor: Resulting Tensor. If ``channels_first=True``, it has `(channel, time)` else `(time, channel)`. """ bytes = io.BytesIO() diff --git a/torchaudio/models/conv_tasnet.py b/torchaudio/models/conv_tasnet.py index 4c501cfba8..39d3dae02f 100644 --- a/torchaudio/models/conv_tasnet.py +++ b/torchaudio/models/conv_tasnet.py @@ -154,7 +154,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input (torch.Tensor): 3D Tensor with shape [batch, features, frames] Returns: - torch.Tensor: shape [batch, num_sources, features, frames] + Tensor: shape [batch, num_sources, features, frames] """ batch_size = input.shape[0] feats = self.input_norm(input) @@ -264,7 +264,7 @@ def _align_num_frames_with_strides( input (torch.Tensor): 3D Tensor with shape (batch_size, channels==1, frames) Returns: - torch.Tensor: Padded Tensor + Tensor: Padded Tensor int: Number of paddings performed """ batch_size, num_channels, num_frames = input.shape @@ -291,7 +291,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input (torch.Tensor): 3D Tensor with shape [batch, channel==1, frames] Returns: - torch.Tensor: 3D Tensor with shape [batch, channel==num_sources, frames] + Tensor: 3D Tensor with shape [batch, channel==num_sources, frames] """ if input.ndim != 3 or input.shape[1] != 1: raise ValueError( diff --git a/torchaudio/models/tacotron2.py b/torchaudio/models/tacotron2.py index 7f3cdf12c4..109d396a7f 100644 --- a/torchaudio/models/tacotron2.py +++ b/torchaudio/models/tacotron2.py @@ -1031,7 +1031,7 @@ def forward( mel_specgram_lengths (Tensor): The length of each mel spectrogram with shape `(n_batch, )`. Returns: - Tensor, Tensor, Tensor, and Tensor: + [Tensor, Tensor, Tensor, Tensor]: Tensor Mel spectrogram before Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. Tensor diff --git a/torchaudio/models/wav2vec2/model.py b/torchaudio/models/wav2vec2/model.py index c2d194f600..a28742a239 100644 --- a/torchaudio/models/wav2vec2/model.py +++ b/torchaudio/models/wav2vec2/model.py @@ -8,7 +8,9 @@ class Wav2Vec2Model(Module): - """Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. + """torchaudio.models.Wav2Vec2Model(feature_extractor: torch.nn.Module, encoder: torch.nn.Module, aux: Optional[torch.nn.Module] = None) + + Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. Note: To build the model, please use one of the factory functions. @@ -23,7 +25,7 @@ class Wav2Vec2Model(Module): aux (torch.nn.Module or None, optional): Auxiliary module. If provided, the output from encoder is passed to this module. - """ + """ # noqa: E501 def __init__( self, feature_extractor: Module, @@ -90,7 +92,7 @@ def forward( lengths (Tensor or None, optional): Indicates the valid length of each audio in the batch. Shape: `(batch, )`. - When the ``waveforms`` contains audios with different duration, + When the ``waveforms`` contains audios with different durations, by providing ``lengths`` argument, the model will compute the corresponding valid output lengths and apply proper mask in transformer attention layer. @@ -104,7 +106,7 @@ def forward( Shape: `(batch, frames, num labels)`. Tensor or None If ``lengths`` argument was provided, a Tensor of shape `(batch, )` - is retuned. + is returned. It indicates the valid length in time axis of the output Tensor. """ x, lengths = self.feature_extractor(waveforms, lengths) @@ -132,7 +134,10 @@ def wav2vec2_model( encoder_layer_drop: float, aux_num_out: Optional[int], ) -> Wav2Vec2Model: - """Build a custom Wav2Vec2Model + # Overriding the signature so that the return type is correct on Sphinx + """wav2vec2_model(extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int]) -> torchaudio.models.Wav2Vec2Model + + Build a custom Wav2Vec2Model Note: The "feature extractor" below corresponds to @@ -287,7 +292,10 @@ def wav2vec2_base( encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: - """Build Wav2Vec2Model with "base" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] + # Overriding the signature so that the return type is correct on Sphinx + """wav2vec2_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model + + Build Wav2Vec2Model with "base" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] Args: encoder_projection_dropout (float): @@ -306,7 +314,7 @@ def wav2vec2_base( Returns: Wav2Vec2Model: The resulting model. - """ + """ # noqa: E501 return wav2vec2_model( extractor_mode="group_norm", extractor_conv_layer_config=None, @@ -335,7 +343,10 @@ def wav2vec2_large( encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: - """Build Wav2Vec2Model with "large" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] + # Overriding the signature so that the return type is correct on Sphinx + """wav2vec2_large(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model + + Build Wav2Vec2Model with "large" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] Args: encoder_projection_dropout (float): @@ -354,7 +365,7 @@ def wav2vec2_large( Returns: Wav2Vec2Model: The resulting model. - """ + """ # noqa: E501 return wav2vec2_model( extractor_mode="group_norm", extractor_conv_layer_config=None, @@ -383,7 +394,10 @@ def wav2vec2_large_lv60k( encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: - """Build Wav2Vec2Model with "large lv-60k" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] + # Overriding the signature so that the return type is correct on Sphinx + """wav2vec2_large_lv60k( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model + + Build Wav2Vec2Model with "large lv-60k" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] Args: encoder_projection_dropout (float): @@ -402,7 +416,7 @@ def wav2vec2_large_lv60k( Returns: Wav2Vec2Model: The resulting model. - """ + """ # noqa: E501 return wav2vec2_model( extractor_mode="layer_norm", extractor_conv_layer_config=None, @@ -431,7 +445,10 @@ def hubert_base( encoder_layer_drop: float = 0.05, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: - """Build HuBERT model with "base" architecture from *HuBERT* [:footcite:`hsu2021hubert`] + # Overriding the signature so that the return type is correct on Sphinx + """hubert_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.05, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model + + Build HuBERT model with "base" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): @@ -450,7 +467,7 @@ def hubert_base( Returns: Wav2Vec2Model: The resulting model. - """ + """ # noqa: E501 return wav2vec2_model( extractor_mode='group_norm', extractor_conv_layer_config=None, @@ -479,7 +496,10 @@ def hubert_large( encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: - """Build HuBERT model with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] + # Overriding the signature so that the return type is correct on Sphinx + """hubert_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model + + Build HuBERT model with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): @@ -498,7 +518,7 @@ def hubert_large( Returns: Wav2Vec2Model: The resulting model. - """ + """ # noqa: E501 return wav2vec2_model( extractor_mode='layer_norm', extractor_conv_layer_config=None, @@ -527,7 +547,10 @@ def hubert_xlarge( encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: - """Build HuBERT model with "extra large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] + # Overriding the signature so that the return type is correct on Sphinx + """hubert_xlarge(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model + + Build HuBERT model with "extra large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): @@ -546,7 +569,7 @@ def hubert_xlarge( Returns: Wav2Vec2Model: The resulting model. - """ + """ # noqa: E501 return wav2vec2_model( extractor_mode='layer_norm', extractor_conv_layer_config=None, diff --git a/torchaudio/models/wav2vec2/utils/import_fairseq.py b/torchaudio/models/wav2vec2/utils/import_fairseq.py index 03d002a16e..e285d3d1e1 100644 --- a/torchaudio/models/wav2vec2/utils/import_fairseq.py +++ b/torchaudio/models/wav2vec2/utils/import_fairseq.py @@ -126,7 +126,10 @@ def _convert_state_dict(state_dict): def import_fairseq_model(original: Module) -> Wav2Vec2Model: - """Build Wav2Vec2Model from pretrained parameters published by `fairseq`_. + # Overriding the signature so that the types are correct on Sphinx + """import_fairseq_model(original: torch.nn.Module) -> torchaudio.models.Wav2Vec2Model + + Build Wav2Vec2Model from the corresponding model object of `fairseq`_. Args: original (torch.nn.Module): diff --git a/torchaudio/models/wav2vec2/utils/import_huggingface.py b/torchaudio/models/wav2vec2/utils/import_huggingface.py index b7da3c22b3..c1a5c4133c 100644 --- a/torchaudio/models/wav2vec2/utils/import_huggingface.py +++ b/torchaudio/models/wav2vec2/utils/import_huggingface.py @@ -50,7 +50,9 @@ def _build(config, original): def import_huggingface_model(original: Module) -> Wav2Vec2Model: - """Import wav2vec2 model from Hugging Face's `Transformers`_. + """import_huggingface_model(original: torch.nn.Module) -> torchaudio.models.Wav2Vec2Model + + Build Wav2Vec2Model from the corresponding model object of Hugging Face's `Transformers`_. Args: original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``. diff --git a/torchaudio/models/wavernn.py b/torchaudio/models/wavernn.py index 88a35b98e7..9f92061906 100644 --- a/torchaudio/models/wavernn.py +++ b/torchaudio/models/wavernn.py @@ -283,7 +283,7 @@ def forward(self, waveform: Tensor, specgram: Tensor) -> Tensor: specgram: the input spectrogram to the WaveRNN layer (n_batch, 1, n_freq, n_time) Return: - Tensor shape: (n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes) + Tensor: shape (n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes) """ assert waveform.size(1) == 1, 'Require the input channel of waveform is 1' @@ -343,7 +343,7 @@ def infer(self, specgram: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Ten lengths (Tensor or None, optional): Indicates the valid length of each audio in the batch. Shape: `(batch, )`. - When the ``specgram`` contains spectrograms with different duration, + When the ``specgram`` contains spectrograms with different durations, by providing ``lengths`` argument, the model will compute the corresponding valid output lengths. If ``None``, it is assumed that all the audio in ``waveforms`` @@ -356,7 +356,7 @@ def infer(self, specgram: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Ten 1 stands for a single channel. Tensor or None If ``lengths`` argument was provided, a Tensor of shape `(batch, )` - is retuned. + is returned. It indicates the valid length in time axis of the output Tensor. """ diff --git a/torchaudio/pipelines/__init__.py b/torchaudio/pipelines/__init__.py index 40f251d79a..c348dc4cb0 100644 --- a/torchaudio/pipelines/__init__.py +++ b/torchaudio/pipelines/__init__.py @@ -1,4 +1,4 @@ -from ._wav2vec2 import ( +from ._wav2vec2.impl import ( Wav2Vec2Bundle, Wav2Vec2ASRBundle, WAV2VEC2_BASE, @@ -14,6 +14,8 @@ WAV2VEC2_ASR_LARGE_LV60K_100H, WAV2VEC2_ASR_LARGE_LV60K_960H, WAV2VEC2_XLSR53, + VOXPOPULI_ASR_BASE_10K_ES, + VOXPOPULI_ASR_BASE_10K_FR, HUBERT_BASE, HUBERT_LARGE, HUBERT_XLARGE, @@ -44,6 +46,8 @@ 'WAV2VEC2_ASR_LARGE_LV60K_100H', 'WAV2VEC2_ASR_LARGE_LV60K_960H', 'WAV2VEC2_XLSR53', + 'VOXPOPULI_ASR_BASE_10K_ES', + 'VOXPOPULI_ASR_BASE_10K_FR', 'HUBERT_BASE', 'HUBERT_LARGE', 'HUBERT_XLARGE', diff --git a/torchaudio/pipelines/_tts/impl.py b/torchaudio/pipelines/_tts/impl.py index 82fe17e768..d1873985c7 100644 --- a/torchaudio/pipelines/_tts/impl.py +++ b/torchaudio/pipelines/_tts/impl.py @@ -83,7 +83,7 @@ def __init__( def sample_rate(self): return self._sample_rate - def forward(self, mel_spec, lengths): + def forward(self, mel_spec, lengths=None): mel_spec = torch.exp(mel_spec) mel_spec = 20 * torch.log10(torch.clamp(mel_spec, min=1e-5)) if self._min_level_db is not None: @@ -120,7 +120,7 @@ def __init__(self): def sample_rate(self): return self._sample_rate - def forward(self, mel_spec, lengths): + def forward(self, mel_spec, lengths=None): mel_spec = torch.exp(mel_spec) mel_spec = mel_spec.clone().detach().requires_grad_(True) spec = self._inv_mel(mel_spec) @@ -242,6 +242,18 @@ class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneM Your browser does not support the audio element. + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + ''') # noqa: E501 TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH = _Tacotron2GriffinLimPhoneBundle( @@ -277,6 +289,19 @@ class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneM Your browser does not support the audio element. + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + + ''') # noqa: E501 TACOTRON2_WAVERNN_CHAR_LJSPEECH = _Tacotron2WaveRNNCharBundle( @@ -313,6 +338,18 @@ class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneM Your browser does not support the audio element. + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + ''') # noqa: E501 TACOTRON2_WAVERNN_PHONE_LJSPEECH = _Tacotron2WaveRNNPhoneBundle( @@ -353,4 +390,17 @@ class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneM Your browser does not support the audio element. + + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + ''') # noqa: E501 diff --git a/torchaudio/pipelines/_tts/interface.py b/torchaudio/pipelines/_tts/interface.py index 3b62b5a642..e78e78331d 100644 --- a/torchaudio/pipelines/_tts/interface.py +++ b/torchaudio/pipelines/_tts/interface.py @@ -7,13 +7,13 @@ class _TextProcessor(ABC): - """Interface of the text processing part of Tacotron2TTS pipeline""" - @property @abstractmethod def tokens(self): """The tokens that the each value in the processed tensor represent. + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. + :type: List[str] """ @@ -21,11 +21,13 @@ def tokens(self): def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: """Encode the given (batch of) texts into numerical tensors + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. + Args: text (str or list of str): The input texts. Returns: - Tensor and Tensor: + (Tensor, Tensor): Tensor: The encoded texts. Shape: `(batch, max length)` Tensor: @@ -34,29 +36,32 @@ def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: class _Vocoder(ABC): - """Interface of the vocoder part of Tacotron2TTS pipeline""" - @property @abstractmethod def sample_rate(self): """The sample rate of the resulting waveform + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. + :type: float """ @abstractmethod - def __call__(self, specgrams: Tensor, lengths: Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]: + def __call__(self, specgrams: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: """Generate waveform from the given input, such as spectrogram + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. + Args: specgrams (Tensor): The input spectrogram. Shape: `(batch, frequency bins, time)`. The expected shape depends on the implementation. lengths (Tensor, or None, optional): The valid length of each sample in the batch. Shape: `(batch, )`. + (Default: `None`) Returns: - Tensor and optional Tensor: + (Tensor, Optional[Tensor]): Tensor: The generated waveform. Shape: `(batch, max length)` Tensor or None: @@ -146,14 +151,21 @@ class Tacotron2TTSBundle(ABC): # new text processing and vocoder will be added in the future, so we want to make these # interfaces specific to this Tacotron2TTS pipeline. class TextProcessor(_TextProcessor): - pass + """Interface of the text processing part of Tacotron2TTS pipeline + + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. + """ class Vocoder(_Vocoder): - pass + """Interface of the vocoder part of Tacotron2TTS pipeline + + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. + """ @abstractmethod def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor: - """get_text_processor(self, *, dl_kwargs=None) -> Tacotron2TTSBundle.TextProcessor: + # Overriding the signature so that the return type is correct on Sphinx + """get_text_processor(self, *, dl_kwargs=None) -> torchaudio.pipelines.Tacotron2TTSBundle.TextProcessor Create a text processor @@ -177,7 +189,7 @@ def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor: Example - Character-based >>> text = [ - >>> "Hello, T T S !", + >>> "Hello World!", >>> "Text-to-speech!", >>> ] >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH @@ -192,9 +204,9 @@ def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor: >>> print(lengths) tensor([12, 15], dtype=torch.int32) >>> - >>> print([processor.tokens[i] for i in input[0]]) - ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!', '_', '_', '_'] - >>> print([processor.tokens[i] for i in input[1]]) + >>> print([processor.tokens[i] for i in input[0, :lengths[0]]]) + ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!'] + >>> print([processor.tokens[i] for i in input[1, :lengths[1]]]) ['t', 'e', 'x', 't', '-', 't', 'o', '-', 's', 'p', 'e', 'e', 'c', 'h', '!'] Example - Phoneme-based @@ -224,7 +236,8 @@ def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor: @abstractmethod def get_vocoder(self, *, dl_kwargs=None) -> Vocoder: - """get_vocoder(self, *, dl_kwargs=None) -> Tacotron2TTSBundle.Vocoder: + # Overriding the signature so that the return type is correct on Sphinx + """get_vocoder(self, *, dl_kwargs=None) -> torchaudio.pipelines.Tacotron2TTSBundle.Vocoder Create a vocoder module, based off of either WaveRNN or GriffinLim. @@ -244,7 +257,10 @@ def get_vocoder(self, *, dl_kwargs=None) -> Vocoder: @abstractmethod def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2: - """Create a Tacotron2 model with pre-trained weight. + # Overriding the signature so that the return type is correct on Sphinx + """get_tacotron2(self, *, dl_kwargs=None) -> torchaudio.models.Tacotron2 + + Create a Tacotron2 model with pre-trained weight. Args: dl_kwargs (dictionary of keyword arguments): diff --git a/torchaudio/pipelines/_tts/utils.py b/torchaudio/pipelines/_tts/utils.py index 4c0b27f5ba..54a5c9a42d 100644 --- a/torchaudio/pipelines/_tts/utils.py +++ b/torchaudio/pipelines/_tts/utils.py @@ -169,7 +169,9 @@ def _load_phonemizer(file, dl_kwargs): logger.setLevel(logging.INFO) try: url = f'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}' - path = os.path.join(torch.hub.get_dir(), 'checkpoints', file) + directory = os.path.join(torch.hub.get_dir(), 'checkpoints') + os.makedirs(directory, exist_ok=True) + path = os.path.join(directory, file) if not os.path.exists(path): dl_kwargs = {} if dl_kwargs is None else dl_kwargs torch.hub.download_url_to_file(url, path, **dl_kwargs) diff --git a/torchaudio/pipelines/_wav2vec2/__init__.py b/torchaudio/pipelines/_wav2vec2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/torchaudio/pipelines/_wav2vec2.py b/torchaudio/pipelines/_wav2vec2/impl.py similarity index 84% rename from torchaudio/pipelines/_wav2vec2.py rename to torchaudio/pipelines/_wav2vec2/impl.py index f97b9a4f2a..86e435cc81 100644 --- a/torchaudio/pipelines/_wav2vec2.py +++ b/torchaudio/pipelines/_wav2vec2/impl.py @@ -1,9 +1,12 @@ from dataclasses import dataclass from typing import Dict, Tuple, Any +import torch from torch.hub import load_state_dict_from_url from torchaudio.models import wav2vec2_model, Wav2Vec2Model +from . import utils + __all__ = [] @@ -52,7 +55,14 @@ def sample_rate(self) -> float: """ return self._sample_rate + def _get_state_dict(self, dl_kwargs): + url = f'https://download.pytorch.org/torchaudio/models/{self._path}' + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + return state_dict + def get_model(self, *, dl_kwargs=None) -> Wav2Vec2Model: + # Overriding the signature so that the return type is correct on Sphinx """get_model(self, *, dl_kwargs=None) -> torchaudio.models.Wav2Vec2Model Construct the model and load the pretrained weight. @@ -64,10 +74,7 @@ def get_model(self, *, dl_kwargs=None) -> Wav2Vec2Model: dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. """ model = wav2vec2_model(**self._params) - url = f'https://download.pytorch.org/torchaudio/models/{self._path}' - dl_kwargs = {} if dl_kwargs is None else dl_kwargs - state_dict = load_state_dict_from_url(url, **dl_kwargs) - model.load_state_dict(state_dict) + model.load_state_dict(self._get_state_dict(dl_kwargs)) model.eval() return model @@ -101,7 +108,7 @@ class Wav2Vec2ASRBundle(Wav2Vec2Bundle): >>> # Check the corresponding labels of the output. >>> labels = bundle.get_labels() >>> print(labels) - ('', '', '', '', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') + ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') >>> >>> # Resample audio to the expected sampling rate >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) @@ -114,71 +121,50 @@ class Wav2Vec2ASRBundle(Wav2Vec2Bundle): >>> transcripts = ctc_decode(emissions, labels) """ # noqa: E501 _labels: Tuple[str] + _remove_aux_axis: Tuple[int] = (1, 2, 3) def get_labels( self, *, - bos: str = '', - pad: str = '', - eos: str = '', - unk: str = '', + blank: str = '-', ) -> Tuple[str]: """The output class labels (only applicable to fine-tuned bundles) - The first four tokens are BOS, padding, EOS and UNK tokens and they can be customized. + The first is blank token, and it is customizable. Args: - bos (str, optional): Beginning of sentence token. (default: ``''``) - pad (str, optional): Padding token. (default: ``''``) - eos (str, optional): End of sentence token. (default: ``''``) - unk (str, optional): Token for unknown class. (default: ``''``) + blank (str, optional): Blank token. (default: ``'-'``) Returns: - Tuple of strings: + Tuple[str]: For models fine-tuned on ASR, returns the tuple of strings representing the output class labels. Example >>> import torchaudio >>> torchaudio.models.HUBERT_ASR_LARGE.get_labels() - ('', '', '', '', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') + ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') """ # noqa: E501 - if self._labels is None: - raise ValueError('Pre-trained models do not have labels.') - return (bos, pad, eos, unk, *self._labels) - - -def _get_labels(): - return ( - '|', - 'E', - 'T', - 'A', - 'O', - 'N', - 'I', - 'H', - 'S', - 'R', - 'D', - 'L', - 'U', - 'M', - 'W', - 'C', - 'F', - 'G', - 'Y', - 'P', - 'B', - 'V', - 'K', - "'", - 'X', - 'J', - 'Q', - 'Z', - ) + return (blank, *self._labels) + + def _get_state_dict(self, dl_kwargs): + state_dict = super()._get_state_dict(dl_kwargs) + if self._remove_aux_axis: + # Remove the seemingly unnecessary axis + # For ASR task, the pretrained weights originated from fairseq has unrelated dimensions at index 1, 2, 3 + # It's originated from the Dictionary implementation of fairseq, which was intended for NLP tasks, + # but not used during the ASR training. + # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/data/dictionary.py#L21-L37 + # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/criterions/ctc.py#L126-L129 + # + # Also, some pretrained weights originated from voxpopuli has an extra dimensions that almost never used and + # that resembles mistake. + # The label `1` shows up in the training dataset of German (1 out of 16M), + # English (1 / 28M), Spanish (1 / 9.4M), Romanian (1 / 4.7M) and Polish (6 / 5.8M) + for key in ['aux.weight', 'aux.bias']: + t = state_dict[key] + state_dict[key] = torch.stack([t[i] for i in range(t.size(0)) if i not in self._remove_aux_axis]) + return state_dict WAV2VEC2_BASE = Wav2Vec2Bundle( @@ -251,9 +237,9 @@ def _get_labels(): 'encoder_dropout': 0.1, 'encoder_layer_norm_first': False, 'encoder_layer_drop': 0.05, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_BASE_10M.__doc__ = """Build "base" wav2vec2 model with an extra linear module @@ -297,9 +283,9 @@ def _get_labels(): 'encoder_dropout': 0.1, 'encoder_layer_norm_first': False, 'encoder_layer_drop': 0.05, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) @@ -343,9 +329,9 @@ def _get_labels(): "encoder_dropout": 0.1, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.05, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_BASE_960H.__doc__ = """Build "base" wav2vec2 model with an extra linear module @@ -432,9 +418,9 @@ def _get_labels(): "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_10M.__doc__ = """Build "large" wav2vec2 model with an extra linear module @@ -478,9 +464,9 @@ def _get_labels(): "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_100H.__doc__ = """Build "large" wav2vec2 model with an extra linear module @@ -524,9 +510,9 @@ def _get_labels(): "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_960H.__doc__ = """Build "large" wav2vec2 model with an extra linear module @@ -613,9 +599,9 @@ def _get_labels(): "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_LV60K_10M.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module @@ -659,9 +645,9 @@ def _get_labels(): "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_LV60K_100H.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module @@ -705,9 +691,9 @@ def _get_labels(): "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, - "aux_num_out": 32, + "aux_num_out": 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_LV60K_960H.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module @@ -931,9 +917,9 @@ def _get_labels(): 'encoder_dropout': 0.0, 'encoder_layer_norm_first': True, 'encoder_layer_drop': 0.1, - 'aux_num_out': 32, + 'aux_num_out': 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) HUBERT_ASR_LARGE.__doc__ = """HuBERT model with "Large" configuration. @@ -978,9 +964,9 @@ def _get_labels(): 'encoder_dropout': 0.0, 'encoder_layer_norm_first': True, 'encoder_layer_drop': 0.1, - 'aux_num_out': 32, + 'aux_num_out': 29, }, - _labels=_get_labels(), + _labels=utils._get_en_labels(), _sample_rate=16000, ) HUBERT_ASR_XLARGE.__doc__ = """HuBERT model with "Extra Large" configuration. @@ -998,3 +984,95 @@ def _get_labels(): Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 + + +VOXPOPULI_ASR_BASE_10K_ES = Wav2Vec2ASRBundle( + 'wav2vec2_voxpopuli_base_10k_asr_es.pt', + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 35 + }, + _labels=utils._get_es_labels(), + _sample_rate=16000, + _remove_aux_axis=(1, 2, 3, 35), +) +VOXPOPULI_ASR_BASE_10K_ES.__doc__ = """wav2vec 2.0 model with "Base" configuration. + +Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] +("10k" subset, consisting of 23 languages). +Fine-tuned for ASR on 166 hours of transcribed audio from "es" subset. + +Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +VOXPOPULI_ASR_BASE_10K_FR = Wav2Vec2ASRBundle( + 'wav2vec2_voxpopuli_base_10k_asr_fr.pt', + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 43 + }, + _labels=utils._get_fr_labels(), + _sample_rate=16000, +) +VOXPOPULI_ASR_BASE_10K_FR.__doc__ = """wav2vec 2.0 model with "Base" configuration. + +Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] +("10k" subset, consisting of 23 languages). +Fine-tuned for ASR on 211 hours of transcribed audio from "fr" subset. + +Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 diff --git a/torchaudio/pipelines/_wav2vec2/utils.py b/torchaudio/pipelines/_wav2vec2/utils.py new file mode 100644 index 0000000000..723cd403ad --- /dev/null +++ b/torchaudio/pipelines/_wav2vec2/utils.py @@ -0,0 +1,117 @@ +def _get_en_labels(): + return ( + '|', + 'E', + 'T', + 'A', + 'O', + 'N', + 'I', + 'H', + 'S', + 'R', + 'D', + 'L', + 'U', + 'M', + 'W', + 'C', + 'F', + 'G', + 'Y', + 'P', + 'B', + 'V', + 'K', + "'", + 'X', + 'J', + 'Q', + 'Z', + ) + + +def _get_es_labels(): + return ( + "|", + "e", + "a", + "o", + "s", + "n", + "r", + "i", + "l", + "d", + "c", + "t", + "u", + "p", + "m", + "b", + "q", + "y", + "g", + "v", + "h", + "ó", + "f", + "í", + "á", + "j", + "z", + "ñ", + "é", + "x", + "ú", + "k", + "w", + "ü", + ) + + +def _get_fr_labels(): + return ( + "|", + "e", + "s", + "n", + "i", + "t", + "r", + "a", + "o", + "u", + "l", + "d", + "c", + "p", + "m", + "é", + "v", + "q", + "f", + "g", + "b", + "h", + "x", + "à", + "j", + "è", + "y", + "ê", + "z", + "ô", + "k", + "ç", + "œ", + "û", + "ù", + "î", + "â", + "w", + "ï", + "ë", + "ü", + "æ", + ) diff --git a/torchaudio/prototype/emformer.py b/torchaudio/prototype/emformer.py index f7f1241053..2d7c66f98a 100644 --- a/torchaudio/prototype/emformer.py +++ b/torchaudio/prototype/emformer.py @@ -276,20 +276,20 @@ def forward( M: number of memory elements. Args: - utterance (torch.Tensor): utterance frames, with shape (T, B, D). - lengths (torch.Tensor): with shape (B,) and i-th element representing + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. - right_context (torch.Tensor): right context frames, with shape (R, B, D). - summary (torch.Tensor): summary elements, with shape (S, B, D). - mems (torch.Tensor): memory elements, with shape (M, B, D). + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + summary (torch.Tensor): summary elements, with shape `(S, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. attention_mask (torch.Tensor): attention mask for underlying attention module. Returns: - torch.Tensor and torch.Tensor: - torch.Tensor - output frames corresponding to utterance and right_context, with shape (T + R, B, D). - torch.Tensor - updated memory elements, with shape (M, B, D). + (Tensor, Tensor): + Tensor + output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. """ output, output_mems, _, _ = self._forward_impl( utterance, lengths, right_context, summary, mems, attention_mask @@ -317,24 +317,24 @@ def infer( M: number of memory elements. Args: - utterance (torch.Tensor): utterance frames, with shape (T, B, D). - lengths (torch.Tensor): with shape (B,) and i-th element representing + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. - right_context (torch.Tensor): right context frames, with shape (R, B, D). - summary (torch.Tensor): summary elements, with shape (S, B, D). - mems (torch.Tensor): memory elements, with shape (M, B, D). + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + summary (torch.Tensor): summary elements, with shape `(S, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. left_context_key (torch.Tensor): left context attention key computed from preceding invocation. left_context_val (torch.Tensor): left context attention value computed from preceding invocation. Returns: - torch.Tensor, torch.Tensor, torch.Tensor, and torch.Tensor: - torch.Tensor - output frames corresponding to utterance and right_context, with shape (T + R, B, D). - torch.Tensor - updated memory elements, with shape (M, B, D). - torch.Tensor + (Tensor, Tensor, Tensor, and Tensor): + Tensor + output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. + Tensor attention key computed for left context and utterance. - torch.Tensor + Tensor attention value computed for left context and utterance. """ query_dim = right_context.size(0) + utterance.size(0) + summary.size(0) @@ -575,21 +575,21 @@ def forward( M: number of memory elements. Args: - utterance (torch.Tensor): utterance frames, with shape (T, B, D). - lengths (torch.Tensor): with shape (B,) and i-th element representing + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. - right_context (torch.Tensor): right context frames, with shape (R, B, D). - mems (torch.Tensor): memory elements, with shape (M, B, D). + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. attention_mask (torch.Tensor): attention mask for underlying attention module. Returns: - torch.Tensor, torch.Tensor, and torch.Tensor: - torch.Tensor - encoded utterance frames, with shape (T, B, D). - torch.Tensor - updated right context frames, with shape (R, B, D). - torch.Tensor - updated memory elements, with shape (M, B, D). + (Tensor, Tensor, Tensor): + Tensor + encoded utterance frames, with shape `(T, B, D)`. + Tensor + updated right context frames, with shape `(R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. """ ( layer_norm_utterance, @@ -625,25 +625,25 @@ def infer( M: number of memory elements. Args: - utterance (torch.Tensor): utterance frames, with shape (T, B, D). - lengths (torch.Tensor): with shape (B,) and i-th element representing + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. - right_context (torch.Tensor): right context frames, with shape (R, B, D). + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. state (List[torch.Tensor] or None): list of tensors representing layer internal state generated in preceding invocation of ``infer``. - mems (torch.Tensor): memory elements, with shape (M, B, D). + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. Returns: - torch.Tensor, torch.Tensor, List[torch.Tensor], and torch.Tensor: - torch.Tensor - encoded utterance frames, with shape (T, B, D). - torch.Tensor - updated right context frames, with shape (R, B, D). - List[torch.Tensor] + (Tensor, Tensor, List[torch.Tensor], Tensor): + Tensor + encoded utterance frames, with shape `(T, B, D)`. + Tensor + updated right context frames, with shape `(R, B, D)`. + List[Tensor] list of tensors representing layer internal state generated in current invocation of ``infer``. - torch.Tensor - updated memory elements, with shape (M, B, D). + Tensor + updated memory elements, with shape `(M, B, D)`. """ ( layer_norm_utterance, @@ -851,16 +851,16 @@ def forward( Args: input (torch.Tensor): utterance frames right-padded with right context frames, with - shape (B, T, D). - lengths (torch.Tensor): with shape (B,) and i-th element representing + shape `(B, T, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``input``. Returns: - torch.Tensor and torch.Tensor: - torch.Tensor - output frames, with shape (B, T - ``right_context_length``, D). - torch.Tensor - output lengths, with shape (B,) and i-th element representing + (Tensor, Tensor): + Tensor + output frames, with shape `(B, T - ``right_context_length``, D)`. + Tensor + output lengths, with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in output frames. """ input = input.permute(1, 0, 2) @@ -894,20 +894,20 @@ def infer( Args: input (torch.Tensor): utterance frames right-padded with right context frames, with - shape (B, T, D). - lengths (torch.Tensor): with shape (B,) and i-th element representing + shape `(B, T, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``input``. states (List[List[torch.Tensor]] or None, optional): list of lists of tensors representing Emformer internal state generated in preceding invocation of ``infer``. (Default: ``None``) Returns: - torch.Tensor, torch.Tensor, and List[List[torch.Tensor]]: - torch.Tensor - output frames, with shape (B, T - ``right_context_length``, D). - torch.Tensor - output lengths, with shape (B,) and i-th element representing + (Tensor, Tensor, List[List[Tensor]]): + Tensor + output frames, with shape `(B, T - ``right_context_length``, D)`. + Tensor + output lengths, with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in output frames. - List[List[torch.Tensor]] + List[List[Tensor]] output states; list of lists of tensors representing Emformer internal state generated in current invocation of ``infer``. """ diff --git a/torchaudio/sox_effects/sox_effects.py b/torchaudio/sox_effects/sox_effects.py index 3223e7c301..c17c356859 100644 --- a/torchaudio/sox_effects/sox_effects.py +++ b/torchaudio/sox_effects/sox_effects.py @@ -73,10 +73,10 @@ def apply_effects_tensor( sample_rate (int): Sample rate effects (List[List[str]]): List of effects. channels_first (bool, optional): Indicates if the input Tensor's dimension is - ``[channels, time]`` or ``[time, channels]`` + `[channels, time]` or `[time, channels]` Returns: - Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + (Tensor, int): Resulting Tensor and sample rate. The resulting Tensor has the same ``dtype`` as the input Tensor, and the same channels order. The shape of the Tensor can be different based on the effects applied. Sample rate can also be different based on the effects applied. @@ -191,20 +191,20 @@ def apply_effects_file( If input file is integer WAV, giving ``False`` will change the resulting Tensor type to integer type. This argument has no effect for formats other than integer WAV type. - channels_first (bool, optional): When True, the returned Tensor has dimension ``[channel, time]``. - Otherwise, the returned Tensor's dimension is ``[time, channel]``. + channels_first (bool, optional): When True, the returned Tensor has dimension `[channel, time]`. + Otherwise, the returned Tensor's dimension is `[time, channel]`. format (str or None, optional): Override the format detection with the given format. Providing the argument might help when libsox can not infer the format from header or extension, Returns: - Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + (Tensor, int): Resulting Tensor and sample rate. If ``normalize=True``, the resulting Tensor is always ``float32`` type. If ``normalize=False`` and the input audio file is of integer WAV file, then the resulting Tensor has corresponding integer type. (Note 24 bit integer type is not supported) - If ``channels_first=True``, the resulting Tensor has dimension ``[channel, time]``, - otherwise ``[time, channel]``. + If ``channels_first=True``, the resulting Tensor has dimension `[channel, time]`, + otherwise `[time, channel]`. Example - Basic usage >>> diff --git a/torchaudio/transforms.py b/torchaudio/transforms.py index 0d9154d5f1..edfacd6186 100644 --- a/torchaudio/transforms.py +++ b/torchaudio/transforms.py @@ -26,7 +26,6 @@ 'MuLawEncoding', 'MuLawDecoding', 'Resample', - 'ComplexNorm', 'TimeStretch', 'Fade', 'FrequencyMasking', @@ -787,7 +786,7 @@ def forward(self, x: Tensor) -> Tensor: x (Tensor): A signal to be encoded. Returns: - x_mu (Tensor): An encoded signal. + Tensor: An encoded signal. """ return F.mu_law_encoding(x, self.quantization_channels) @@ -900,42 +899,6 @@ def forward(self, waveform: Tensor) -> Tensor: self.kernel, self.width) -class ComplexNorm(torch.nn.Module): - r"""Compute the norm of complex tensor input. - - Args: - power (float, optional): Power of the norm. (Default: to ``1.0``) - - Example - >>> complex_tensor = ... # Tensor shape of (…, complex=2) - >>> transform = transforms.ComplexNorm(power=2) - >>> complex_norm = transform(complex_tensor) - """ - __constants__ = ['power'] - - def __init__(self, power: float = 1.0) -> None: - warnings.warn( - 'torchaudio.transforms.ComplexNorm has been deprecated ' - 'and will be removed from future release.' - 'Please convert the input Tensor to complex type with `torch.view_as_complex` then ' - 'use `torch.abs` and `torch.angle`. ' - 'Please refer to https://github.com/pytorch/audio/issues/1337 ' - "for more details about torchaudio's plan to migrate to native complex type." - ) - super(ComplexNorm, self).__init__() - self.power = power - - def forward(self, complex_tensor: Tensor) -> Tensor: - r""" - Args: - complex_tensor (Tensor): Tensor shape of `(..., complex=2)`. - - Returns: - Tensor: norm of the input tensor, shape of `(..., )`. - """ - return F.complex_norm(complex_tensor, self.power) - - class ComputeDeltas(torch.nn.Module): r"""Compute delta coefficients of a tensor, usually a spectrogram. @@ -1629,7 +1592,7 @@ def forward(self, specgram: torch.Tensor, mask: Optional[torch.Tensor] = None): of dimension `(..., channel, freq, time)` if multi_mask is ``True`` Returns: - torch.Tensor: PSD matrix of the input STFT matrix. + Tensor: PSD matrix of the input STFT matrix. Tensor of dimension `(..., freq, channel, channel)` """ # outer product: @@ -1773,7 +1736,7 @@ def _get_updated_mvdr_vector( eps (float, optional): a value added to the denominator in mask normalization. (Default: 1e-8) Returns: - torch.Tensor: the mvdr beamforming weight matrix + Tensor: the mvdr beamforming weight matrix """ if self.multi_mask: # Averaging mask along channel dimension