-
Notifications
You must be signed in to change notification settings - Fork 106
/
infer_style_sd15.py
57 lines (48 loc) · 2.28 KB
/
infer_style_sd15.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import torch
from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler
from PIL import Image
from ip_adapter import IPAdapter
base_model_path = "sd-legacy/stable-diffusion-v1-5"
image_encoder_path = "models/image_encoder"
ip_ckpt = "models/ip-adapter_sd15.bin"
device = "cuda"
# load SDXL pipeline
pipe = StableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_vae_tiling()
# load ip-adapter
# target_blocks=["block"] for original IP-Adapter
# target_blocks=["up_blocks.1"] for style blocks only (experimental, not obvious as SDXL)
# target_blocks = ["down_blocks.2", "mid_block", "up_blocks.1"] # for style+layout blocks (experimental, not obvious as SDXL)
ip_model = IPAdapter(pipe, image_encoder_path, ip_ckpt, device, target_blocks=["block"])
image = "./assets/3.jpg"
image = Image.open(image)
image.resize((512, 512))
# set negative content
neg_content = "a girl"
neg_content_scale = 0.8
if neg_content is not None:
from transformers import CLIPTextModelWithProjection, CLIPTokenizer
text_encoder = CLIPTextModelWithProjection.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K").to(pipe.device,
dtype=pipe.dtype)
tokenizer = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
tokens = tokenizer([neg_content], return_tensors='pt').to(pipe.device)
neg_content_emb = text_encoder(**tokens).text_embeds
neg_content_emb *= neg_content_scale
else:
neg_content_emb = None
# generate image with content subtraction
images = ip_model.generate(pil_image=image,
prompt="a cat, masterpiece, best quality, high quality",
negative_prompt= "text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry",
scale=1.0,
guidance_scale=5,
num_samples=1,
num_inference_steps=30,
seed=42,
neg_content_emb=neg_content_emb,
)
images[0].save("result.png")