-
Notifications
You must be signed in to change notification settings - Fork 73
195 lines (180 loc) · 7.6 KB
/
wheels_build.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
name: wheels_build
on:
workflow_call:
inputs:
os:
required: true
type: string
python:
required: true
type: string
torch_version:
required: true
type: string
description: "Example: 1.13.1"
cuda_short_version:
required: true
type: string
description: "Example: 117 for 11.7"
cudnn_version_major:
required: false
default: '8'
type: string
description: "Example: 8
workflow_dispatch:
inputs:
os:
required: true
type: string
python:
required: true
type: string
torch_version:
required: true
type: string
description: "Example: 1.13.1"
cuda_short_version:
required: true
type: string
description: "Example: 117 for 11.7"
cudnn_version_major:
required: false
default: '8'
type: string
description: "Example: 8
# this yaml file can be cleaned up using yaml anchors, but they're not supported in github actions yet
# https://github.com/actions/runner/issues/1182
env:
# you need at least cuda 5.0 for some of the stuff compiled here.
TORCH_CUDA_ARCH_LIST: "5.0+PTX 6.0 6.1 7.0 7.5 8.0+PTX"
MAX_JOBS: 4
DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc
SFAST_APPEND_VERSION: 1
TWINE_USERNAME: __token__
jobs:
build_internal:
name: ${{ inputs.os }}-py${{ inputs.python }}-torch${{ inputs.torch_version }}+cu${{ inputs.cuda_short_version }}
runs-on: ${{ inputs.os }}
env:
# alias for the current python version
# windows does not have per version binary, it is just 'python3'
PY: python${{ contains(inputs.os, 'ubuntu') && inputs.python || '3' }}
container: ${{ contains(inputs.os, 'ubuntu') && 'quay.io/pypa/manylinux2014_x86_64' || null }}
timeout-minutes: 360
defaults:
run:
shell: bash
steps:
- id: cuda_info
shell: python
run: |
import os
import sys
print(sys.version)
cushort = "${{ inputs.cuda_short_version }}"
# https://github.com/Jimver/cuda-toolkit/blob/master/src/links/linux-links.ts
full_version, install_script = {
"121": ("12.1.0", "https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run"),
"118": ("11.8.0", "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"),
"117": ("11.7.1", "https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda_11.7.1_515.65.01_linux.run"),
"116": ("11.6.2", "https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installers/cuda_11.6.2_510.47.03_linux.run"),
}[cushort]
cudnn_pypi_package = {
"121": "nvidia-cudnn-cu12",
"118": "nvidia-cudnn-cu11",
"117": "nvidia-cudnn-cu11",
"116": "nvidia-cudnn-cu11",
}[cushort]
with open(os.environ['GITHUB_OUTPUT'], "r+") as fp:
fp.write("CUDA_VERSION=" + full_version + "\n")
fp.write("CUDA_VERSION_SUFFIX=cu" + cushort + "\n")
# fp.write("TORCH_ORG_S3_PATH=s3://pytorch/whl/" + cushort + "\n")
# fp.write("PUBLISH_PYPI=0\n")
fp.write("CUDA_INSTALL_SCRIPT=" + install_script + "\n")
fp.write("CUDNN_PYPI_PACKAGE=" + cudnn_pypi_package + "\n")
- run: echo "CUDA_VERSION_SUFFIX=${{ steps.cuda_info.outputs.CUDA_VERSION_SUFFIX }}"
# - run: echo "TORCH_ORG_S3_PATH=${{ steps.cuda_info.outputs.TORCH_ORG_S3_PATH }}"
# - run: echo "PUBLISH_PYPI=${{ steps.cuda_info.outputs.PUBLISH_PYPI }}"
- name: Add H100 if nvcc 11.08+
shell: python
run: |
import os
import sys
print(sys.version)
cuda_short_version = "${{ inputs.cuda_short_version }}"
arch_list = os.environ["TORCH_CUDA_ARCH_LIST"]
if cuda_short_version not in ["116", "117"]:
arch_list += " 9.0"
with open(os.environ['GITHUB_ENV'], "r+") as fp:
fp.write("TORCH_CUDA_ARCH_LIST=" + arch_list + "\n")
- run: echo "${TORCH_CUDA_ARCH_LIST}"
- if: runner.os == 'Linux'
name: (Linux) install cuda
run: >
yum install wget git prename -y &&
wget -q "${{ steps.cuda_info.outputs.CUDA_INSTALL_SCRIPT }}" -O cuda.run &&
sh ./cuda.run --silent --toolkit &&
rm ./cuda.run
- name: Recursive checkout
uses: actions/checkout@v3
with:
submodules: recursive
path: "."
fetch-depth: 0 # for tags
- if: runner.os != 'Windows'
name: (Linux) Setup venv for linux
run: |
$PY -m venv venv
. ./venv/bin/activate
which pip
echo "PY=$(which python)" >> ${GITHUB_ENV}
echo "PATH=$PATH" >> ${GITHUB_ENV}
echo "MAX_JOBS=3" >> ${GITHUB_ENV}
- name: Define version
id: sfast_version
env:
VERSION_SOURCE: ${{ github.ref_type == 'tag' && 'tag' || 'dev' }}
run: |
set -Eeuo pipefail
git config --global --add safe.directory "*"
version=`cat version.txt`
# Set build version to x.x.x.devYYYYMMDD+torchxxxcu111
torch_version_suffix=torch$(echo ${{ inputs.torch_version }} | sed 's/\.//g')
cuda_version_suffix=${{ steps.cuda_info.outputs.CUDA_VERSION_SUFFIX }}
nightly_tag=$([[ ${VERSION_SOURCE} == 'tag' ]] && echo '' || echo '.dev'`date +%Y%m%d`)
echo "BUILD_VERSION=${version}${nightly_tag}+${torch_version_suffix}${cuda_version_suffix}" >> ${GITHUB_ENV}
echo "BUILD_VERSION=${version}${nightly_tag}+${torch_version_suffix}${cuda_version_suffix}" >> ${GITHUB_OUTPUT}
cat ${GITHUB_ENV}
- run: echo "sfast-${BUILD_VERSION}"
- run: echo "release version"
if: ${{ !contains(steps.sfast_version.outputs.BUILD_VERSION, '.dev') }}
- name: Setup proper pytorch dependency in "requirements.txt"
run: |
sed -i '/torch/d' ./requirements.txt
echo "torch == ${{ inputs.torch_version }}" >> ./requirements.txt
cat ./requirements.txt
- if: runner.os == 'Windows'
name: (Windows) Setup Runner
uses: ./.github/actions/setup-windows-runner
with:
cuda: ${{ steps.cuda_info.outputs.CUDA_VERSION }}
python: ${{ inputs.python }}
- name: Install dependencies
env:
CUDNN_VERSION_MAJOR: ${{ inputs.cudnn_version_major }}
CUDNN_PYPI_PACKAGE: ${{ steps.cuda_info.outputs.CUDNN_PYPI_PACKAGE }}
run: |
cudnn_next_version_major=$((${CUDNN_VERSION_MAJOR} + 1))
cudnn_package_name="${CUDNN_PYPI_PACKAGE}>=${CUDNN_VERSION_MAJOR}.0.0.0,<${cudnn_next_version_major}.0.0.0"
$PY -m pip install wheel setuptools ninja twine "${cudnn_package_name}" -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cuda_short_version }} --no-cache-dir
- name: Build wheel
run: |
$PY setup.py bdist_wheel -d dist/ -k $PLAT_ARG
env:
PLAT_ARG: ${{ contains(inputs.os, 'ubuntu') && '--plat-name manylinux2014_x86_64' || '' }}
- run: du -h dist/*
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.os }}-py${{ inputs.python }}-torch${{ inputs.torch_version }}+cu${{ inputs.cuda_short_version }}
path: dist/*.whl
# Note: it might be helpful to have additional steps that test if the built wheels actually work