Here is the example for Stable Diffusion model conversion and inference with OpenVINO runtime.
conda create -n stable_diffusion python=3.10
conda activate stable_diffusion
pip install -r requirements.txt --force-reinstall
sudo apt-get install git-lfs
git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
Convert Pytorch Model to OpenVINO FP32 Model
python ../tools/convert.py --model_id stable-diffusion-v1-5 \
--output_dir stable-diffusion-v1-5-ov --precision FP32
Convert Pytorch Model to OpenVINO FP16 Model
python ../tools/convert.py --model_id stable-diffusion-v1-5 \
--output_dir stable-diffusion-v1-5-ov --precision FP16
Convert Pytorch Model to OpenVINO INT8 Model with Weight Only Compression
python ../tools/convert.py --model_id stable-diffusion-v1-5 \
--output_dir stable-diffusion-v1-5-ov --precision FP16 --compress_weights
Run Stable Diffusion OpenVINO FP32 Model on Intel CPU
python run_sd.py -c stable-diffusion-v1-5-ov/FP32 -p "A cute cat" -d CPU
Run Stable Diffusion OpenVINO FP16 Model on Intel iGPU with static shape using model cache
python run_sd.py -c stable-diffusion-v1-5-ov/FP16 -p "A cute cat" -d GPU.0 \
--static_shape --cache_dir model_cache
Run Stable Diffusion OpenVINO INT8 Model on Intel dGPU with static shape using Model Cache
python run_sd.py -c stable-diffusion-v1-5-ov/INT8 -p "A cute cat" -d GPU.1 \
--static_shape --cache_dir model_cache