diff --git a/Dockerfile.opencl.amd b/Dockerfile.opencl.amd index 03daecf..5366373 100644 --- a/Dockerfile.opencl.amd +++ b/Dockerfile.opencl.amd @@ -3,7 +3,7 @@ FROM ubuntu:22.04 ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH_OCL_VERSION=0.2.0 -ARG TORCH_VERSION=2.5 +ARG TORCH_VERSION=2.4 ARG PYTHON_VERSION=cp312 ARG PLATFORM=linux_x86_64 diff --git a/Dockerfile.opencl.nvidia b/Dockerfile.opencl.nvidia index 219c042..e0f1702 100644 --- a/Dockerfile.opencl.nvidia +++ b/Dockerfile.opencl.nvidia @@ -1,30 +1,36 @@ FROM python:3.12.7 -ENV DEBIAN_FRONTEND="noninteractive" +ARG PYTORCH_OCL_VERSION=0.2.0 +ARG TORCH_VERSION=2.4 +ARG PYTHON_VERSION=cp312 +ARG PLATFORM=linux_x86_64 + +ENV WHL_FILE=pytorch_ocl-${PYTORCH_OCL_VERSION}+torch${TORCH_VERSION}-${PYTHON_VERSION}-none-${PLATFORM}.whl +ENV WHL_URL=https://github.com/artyom-beilis/pytorch_dlprim/releases/download/${PYTORCH_OCL_VERSION}/${WHL_FILE} + +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,utility WORKDIR /app RUN apt update && \ apt full-upgrade -y && \ - apt install python3 python3-dev git ocl-icd-opencl-dev opencl-clhpp-headers opencl-c-headers ocl-icd-libopencl1 clinfo -y && \ - pip3 install --upgrade pip + apt install python3 python3-full python3-pip python3-venv git wget ocl-icd-opencl-dev opencl-clhpp-headers opencl-c-headers opencl-headers ocl-icd-libopencl1 clinfo -y && \ + python3 -m venv /app/venv && \ + /app/venv/bin/pip install --upgrade pip -COPY requirements.txt /app/requirements.txt -RUN pip3 install --no-cache-dir -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu - -# Install OpenCL backend for PyTorch -RUN wget https://github.com/artyom-beilis/pytorch_dlprim/releases/download/0.2.0/pytorch_ocl-0.2.0+torch2.5-cp312-none-linux_x86_64.whl && \ - pip3 install pytorch_ocl-0.2.0+torch2.5-cp312-none-linux_x86_64.whl && \ - rm pytorch_ocl-0.2.0+torch2.5-cp312-none-linux_x86_64.whl - -# Install Nvidia OpenCL drivers +# Configure OpenCL ICD loaders RUN mkdir -p /etc/OpenCL/vendors && \ echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd -ENV NVIDIA_VISIBLE_DEVICES all -ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,display -RUN rm -f /etc/OpenCL/vendors/mesa.icd +COPY requirements.txt /app/requirements.txt +RUN /app/venv/bin/pip install --no-cache-dir -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu + +# Install OpenCL backend for PyTorch +RUN wget ${WHL_URL} && \ + /app/venv/bin/pip install ${WHL_FILE} && \ + rm ${WHL_FILE} COPY . /app -CMD ["fastapi", "run", "main.py", "--proxy-headers", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file +CMD ["/app/venv/bin/python", "-m", "fastapi", "run", "main.py", "--proxy-headers", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/README.md b/README.md index 37475e1..fbd274e 100644 --- a/README.md +++ b/README.md @@ -86,11 +86,12 @@ services: ```yml services: image_video_classification_cuda: - image: ghcr.io/doppeltilde/image_video_classification:latest-cuda + image: ghcr.io/doppeltilde/image_video_classification:latest-opencl-amd ports: - "8000:8000" volumes: - ./models:/root/.cache/huggingface/hub:rw + - /tmp/.X11-unix:/tmp/.X11-unix environment: - DEFAULT_MODEL_NAME - BATCH_SIZE @@ -99,13 +100,14 @@ services: - USE_API_KEYS - API_KEYS restart: unless-stopped - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: all - capabilities: [ gpu ] + devices: + - /dev/kfd + - /dev/dri + security_opt: + - seccomp:unconfined + group_add: + - "39" + - "109" ``` - Create a `.env` file and set the preferred values. diff --git a/docker-compose.yml b/docker-compose.yml index e4be069..4bef8ce 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,25 +13,3 @@ services: - USE_API_KEYS - API_KEYS restart: unless-stopped - - cuda: - image: ghcr.io/doppeltilde/image_video_classification:latest-cuda - ports: - - "8000:8000" - volumes: - - ./models:/root/.cache/huggingface/hub:rw - environment: - - DEFAULT_MODEL_NAME - - BATCH_SIZE - - ACCESS_TOKEN - - DEFAULT_SCORE - - USE_API_KEYS - - API_KEYS - restart: unless-stopped - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: all - capabilities: [ gpu ]