From 15e3ef0347737f6644f8e2cab01ed308e8afb65a Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Mon, 11 Apr 2022 15:47:00 -0300 Subject: [PATCH] Build docker images for tokenizers main Run the scheduled tests Empty commit to trigger CI with tokenizers --- .github/workflows/build-docker-images.yml | 58 +------------------ .github/workflows/self-scheduled.yml | 47 ++++++++++----- setup.py | 5 +- src/transformers/dependency_versions_table.py | 2 +- 4 files changed, 39 insertions(+), 73 deletions(-) diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index 556e4ff0ebe07f..d4c93356231724 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -43,7 +43,7 @@ jobs: build-args: | REF=main push: true - tags: huggingface/transformers-all-latest-gpu-tokenizers-main + tags: huggingface/internal-transformers-all-latest-gpu-tokenizers-main latest-with-torch-nightly-docker: name: "Nightly PyTorch + Stable TensorFlow" @@ -98,61 +98,7 @@ jobs: build-args: | REF=main push: true - tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }} - - nightly-torch-deepspeed-docker: - name: "Nightly PyTorch + DeepSpeed" - # Push CI doesn't need this image - if: inputs.image_postfix != '-push-ci' - runs-on: ubuntu-latest - steps: - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Check out code - uses: actions/checkout@v2 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build and push - uses: docker/build-push-action@v2 - with: - context: ./docker/transformers-pytorch-deepspeed-nightly-gpu - build-args: | - REF=main - push: true - tags: huggingface/transformers-pytorch-deepspeed-nightly-gpu - - doc-builder: - name: "Doc builder" - # Push CI doesn't need this image - if: inputs.image_postfix != '-push-ci' - runs-on: ubuntu-latest - steps: - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Check out code - uses: actions/checkout@v2 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build and push - uses: docker/build-push-action@v2 - with: - context: ./docker/transformers-doc-builder - push: true - tags: huggingface/transformers-doc-builder + tags: huggingface/internal-transformers-pytorch-deepspeed-latest-gpu-tokenizers-main latest-pytorch: name: "Latest PyTorch [dev]" diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 7de69a573e3852..4a1a3620bac7a7 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -7,6 +7,9 @@ name: Self-hosted runner (scheduled) # `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile` on: + push: + branches: + - post-action-build-test-tokenizers-* repository_dispatch: schedule: - cron: "0 2 * * *" @@ -57,8 +60,11 @@ jobs: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/internal-transformers-all-latest-gpu-tokenizers-main options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + credentials: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: @@ -93,9 +99,12 @@ jobs: machine_type: [single-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/internal-transformers-all-latest-gpu-tokenizers-main options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + credentials: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + needs: [run_check_runners, setup] steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -194,9 +203,12 @@ jobs: name: Examples directory runs-on: [self-hosted, single-gpu-docker] container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/internal-transformers-all-latest-gpu-tokenizers-main options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + credentials: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + needs: [run_check_runners, setup] steps: - name: Update clone working-directory: /transformers @@ -237,9 +249,12 @@ jobs: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} container: - image: huggingface/transformers-pytorch-gpu - options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + image: huggingface/internal-transformers-pytorch-gpu-tokenizers-main + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + credentials: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + needs: [run_check_runners, setup] steps: - name: Update clone working-directory: /transformers @@ -281,9 +296,12 @@ jobs: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} container: - image: huggingface/transformers-tensorflow-gpu - options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + image: huggingface/internal-transformers-tensorflow-gpu-tokenizers-main + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + credentials: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + needs: [run_check_runners, setup] steps: - name: Update clone working-directory: /transformers @@ -327,8 +345,11 @@ jobs: runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} needs: setup container: - image: huggingface/transformers-pytorch-deepspeed-latest-gpu + image: huggingface/internal-transformers-pytorch-deepspeed-latest-gpu-tokenizers-main options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + credentials: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} steps: - name: Update clone working-directory: /workspace/transformers @@ -400,7 +421,7 @@ jobs: env: CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }} - CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} + CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} CI_EVENT: scheduled diff --git a/setup.py b/setup.py index 4d7b4e85382621..37c217a56b7cb1 100644 --- a/setup.py +++ b/setup.py @@ -160,7 +160,7 @@ "tf2onnx", "timeout-decorator", "timm", - "tokenizers>=0.11.1,!=0.11.3,<0.14", + "tokenizers", "torch>=1.7,!=1.12.0", "torchaudio", "pyctcdecode>=0.3.0", @@ -333,8 +333,7 @@ def run(self): extras["testing"] + extras["torch"] + extras["sentencepiece"] - + extras["tokenizers"] - + extras["torch-speech"] + + extras["tokenizers"] + extras["torch-speech"] + extras["vision"] + extras["integrations"] + extras["timm"] diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index bfcb0fc8699b1e..de510642356937 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -66,7 +66,7 @@ "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", - "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", + "tokenizers": "tokenizers", "torch": "torch>=1.7,!=1.12.0", "torchaudio": "torchaudio", "pyctcdecode": "pyctcdecode>=0.3.0",