From c35ab173751ca02334bcba7dd90559e85cc28960 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Tue, 26 Dec 2023 22:39:46 +0530 Subject: [PATCH] Update Animatediff docs (#6341) * update * update * update --- docs/source/en/api/pipelines/animatediff.md | 56 +++++++++++++++------ 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/docs/source/en/api/pipelines/animatediff.md b/docs/source/en/api/pipelines/animatediff.md index 422d345b90578..fb38687e882e0 100644 --- a/docs/source/en/api/pipelines/animatediff.md +++ b/docs/source/en/api/pipelines/animatediff.md @@ -38,16 +38,21 @@ The following example demonstrates how to use a *MotionAdapter* checkpoint with ```python import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) scheduler = DDIMScheduler.from_pretrained( - model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 + model_id, + subfolder="scheduler", + clip_sample=False, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, ) pipe.scheduler = scheduler @@ -70,6 +75,7 @@ output = pipe( ) frames = output.frames[0] export_to_gif(frames, "animation.gif") + ``` Here are some sample outputs: @@ -88,7 +94,7 @@ Here are some sample outputs: -AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. +AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to `linear`. @@ -98,18 +104,25 @@ Motion LoRAs are a collection of LoRAs that work with the `guoyww/animatediff-mo ```python import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) -pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) +pipe.load_lora_weights( + "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" +) scheduler = DDIMScheduler.from_pretrained( - model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 + model_id, + subfolder="scheduler", + clip_sample=False, + beta_schedule="linear", + timestep_spacing="linspace", + steps_offset=1, ) pipe.scheduler = scheduler @@ -132,6 +145,7 @@ output = pipe( ) frames = output.frames[0] export_to_gif(frames, "animation.gif") + ``` @@ -160,21 +174,30 @@ Then you can use the following code to combine Motion LoRAs. ```python import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) -pipe.load_lora_weights("diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") -pipe.load_lora_weights("diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left") +pipe.load_lora_weights( + "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", +) +pipe.load_lora_weights( + "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left", +) pipe.set_adapters(["zoom-out", "pan-left"], adapter_weights=[1.0, 1.0]) scheduler = DDIMScheduler.from_pretrained( - model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 + model_id, + subfolder="scheduler", + clip_sample=False, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, ) pipe.scheduler = scheduler @@ -197,6 +220,7 @@ output = pipe( ) frames = output.frames[0] export_to_gif(frames, "animation.gif") + ```