Skip to content

Commit

Permalink
[CLEANUP]
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Apr 16, 2024
1 parent b6e153d commit 445ff8d
Showing 1 changed file with 48 additions and 36 deletions.
84 changes: 48 additions & 36 deletions text_to_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
allow_headers=["*"],
)


def text_to_video(
task: str,
model_name: str = "ByteDance/AnimateDiff-Lightning",
Expand All @@ -55,43 +54,56 @@ def text_to_video(
Returns:
str: The path to the exported GIF file.
"""
try:

device = "cuda"
dtype = torch.float16

repo = model_name
ckpt = f"animatediff_lightning_{inference_steps}step_diffusers.safetensors"
base = "emilianJR/epiCRealism" # Choose to your favorite base model.
adapter = MotionAdapter().to(device, dtype)
adapter.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device))

pipe = AnimateDiffPipeline.from_pretrained(
base, motion_adapter=adapter, torch_dtype=dtype
).to(device)

logger.info(f"Initialized Model: {model_name}")


pipe.scheduler = EulerDiscreteScheduler.from_config(
pipe.scheduler.config,
timestep_spacing="trailing",
beta_schedule="linear",
)

device = "cuda"
dtype = torch.float16

repo = model_name
ckpt = f"animatediff_lightning_{inference_steps}step_diffusers.safetensors"
base = "emilianJR/epiCRealism" # Choose to your favorite base model.
adapter = MotionAdapter().to(device, dtype)
adapter.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device))

pipe = AnimateDiffPipeline.from_pretrained(
base, motion_adapter=adapter, torch_dtype=dtype
).to(device)

pipe.scheduler = EulerDiscreteScheduler.from_config(
pipe.scheduler.config,
timestep_spacing="trailing",
beta_schedule="linear",
)
# outputs = []
# for i in range(n):
# output = pipe(
# prompt=task,
# guidance_scale=guidance_scale,
# num_inference_steps=inference_steps,
# )
# outputs.append(output)
# out = export_to_gif([output], f"{output_path}_{i}.gif")
# else:
# out = export_to_video([output], f"{output_path}_{i}.mp4")
output = pipe(
prompt = task,
guidance_scale = guidance_scale,
num_inference_steps = inference_steps
)

logger.info(f"Output ready: {output}")

out = export_to_gif(output.frames[0], output_path)
logger.info(f"Exported to GIF: {out}")
return out
except Exception as e:
logger.error(f"Error: {e}")
return None

# outputs = []
# for i in range(n):
# output = pipe(
# prompt=task,
# guidance_scale=guidance_scale,
# num_inference_steps=inference_steps,
# )
# outputs.append(output)
# if output_type == ".gif":
# out = export_to_gif([output], f"{output_path}_{i}.gif")
# else:
# out = export_to_video([output], f"{output_path}_{i}.mp4")
output = pipe(
prompt=task, guidance_scale=guidance_scale, num_inference_steps=inference_steps
)
output = export_to_gif(output.frames[0], output_path)
return output


@app.post("/v1/chat/completions", response_model=TextToVideoResponse)
Expand Down

0 comments on commit 445ff8d

Please sign in to comment.