From 76c1007ae01eaf7cd434f7ffe30d57447c0030ef Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Tue, 26 Dec 2023 14:42:14 +0000 Subject: [PATCH 1/3] update --- docs/source/en/api/pipelines/animatediff.md | 79 ++++++++++++++++++--- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/docs/source/en/api/pipelines/animatediff.md b/docs/source/en/api/pipelines/animatediff.md index 422d345b9057..ca3cc44a7533 100644 --- a/docs/source/en/api/pipelines/animatediff.md +++ b/docs/source/en/api/pipelines/animatediff.md @@ -38,7 +38,7 @@ The following example demonstrates how to use a *MotionAdapter* checkpoint with ```python import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter @@ -47,7 +47,49 @@ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5- model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) scheduler = DDIMScheduler.from_pretrained( - model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 + model_id, + subfolder="scheduler", + clip_sample=False, + timestep_spacing="linspace", + import torch +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter +from diffusers.utils import export_to_gif + +# Load the motion adapter +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +# load SD 1.5 based finetuned model +model_id = "SG161222/Realistic_Vision_V5.1_noVAE" +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) +scheduler = DDIMScheduler.from_pretrained( + model_id, + subfolder="scheduler", + clip_sample=False, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, +) +pipe.scheduler = scheduler + +# enable memory savings +pipe.enable_vae_slicing() +pipe.enable_model_cpu_offload() + +output = pipe( + prompt=( + "masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " + "orange sky, warm lighting, fishing boats, ocean waves seagulls, " + "rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " + "golden hour, coastal landscape, seaside scenery" + ), + negative_prompt="bad quality, worse quality", + num_frames=16, + guidance_scale=7.5, + num_inference_steps=25, + generator=torch.Generator("cpu").manual_seed(42), +) +frames = output.frames[0] +export_to_gif(frames, "animation.gif") + steps_offset=1, ) pipe.scheduler = scheduler @@ -88,7 +130,7 @@ Here are some sample outputs: -AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. +AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the scheduler beta scheduler. We recommend setting this to `linear`. @@ -98,7 +140,7 @@ Motion LoRAs are a collection of LoRAs that work with the `guoyww/animatediff-mo ```python import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter @@ -106,10 +148,17 @@ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5- # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) -pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") +pipe.load_lora_weights( + "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" +) scheduler = DDIMScheduler.from_pretrained( - model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 + model_id, + subfolder="scheduler", + clip_sample=False, + beta_schedule="linear", + timestep_spacing="linspace", + steps_offset=1, ) pipe.scheduler = scheduler @@ -132,6 +181,7 @@ output = pipe( ) frames = output.frames[0] export_to_gif(frames, "animation.gif") + ``` @@ -160,7 +210,7 @@ Then you can use the following code to combine Motion LoRAs. ```python import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler +from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter @@ -169,12 +219,20 @@ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5- model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) -pipe.load_lora_weights("diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") -pipe.load_lora_weights("diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left") +pipe.load_lora_weights( + "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" +) +pipe.load_lora_weights( + "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left" +) pipe.set_adapters(["zoom-out", "pan-left"], adapter_weights=[1.0, 1.0]) scheduler = DDIMScheduler.from_pretrained( - model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1 + model_id, + subfolder="scheduler", + clip_sample=False, + timestep_spacing="linspace", + steps_offset=1, ) pipe.scheduler = scheduler @@ -197,6 +255,7 @@ output = pipe( ) frames = output.frames[0] export_to_gif(frames, "animation.gif") + ```
From 40ea8ce23e8006fe38dfc7e91ace75faa27830d5 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Tue, 26 Dec 2023 14:45:13 +0000 Subject: [PATCH 2/3] update --- docs/source/en/api/pipelines/animatediff.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/source/en/api/pipelines/animatediff.md b/docs/source/en/api/pipelines/animatediff.md index ca3cc44a7533..b87c3c017efe 100644 --- a/docs/source/en/api/pipelines/animatediff.md +++ b/docs/source/en/api/pipelines/animatediff.md @@ -56,10 +56,10 @@ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", @@ -144,12 +144,12 @@ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) pipe.load_lora_weights( - "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" + "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", torch_dtype=torch.float16 ) scheduler = DDIMScheduler.from_pretrained( @@ -214,16 +214,16 @@ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) +pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) pipe.load_lora_weights( - "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" + "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", torch_dtype=torch.float16 ) pipe.load_lora_weights( - "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left" + "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left", torch_dtype=torch.float16 ) pipe.set_adapters(["zoom-out", "pan-left"], adapter_weights=[1.0, 1.0]) From 5095916699f5b14c7f4811efdb17b66a417cd104 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Tue, 26 Dec 2023 15:36:38 +0000 Subject: [PATCH 3/3] update --- docs/source/en/api/pipelines/animatediff.md | 45 +++------------------ 1 file changed, 5 insertions(+), 40 deletions(-) diff --git a/docs/source/en/api/pipelines/animatediff.md b/docs/source/en/api/pipelines/animatediff.md index b87c3c017efe..fb38687e882e 100644 --- a/docs/source/en/api/pipelines/animatediff.md +++ b/docs/source/en/api/pipelines/animatediff.md @@ -41,20 +41,6 @@ import torch from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif -# Load the motion adapter -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") -# load SD 1.5 based finetuned model -model_id = "SG161222/Realistic_Vision_V5.1_noVAE" -pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter) -scheduler = DDIMScheduler.from_pretrained( - model_id, - subfolder="scheduler", - clip_sample=False, - timestep_spacing="linspace", - import torch -from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter -from diffusers.utils import export_to_gif - # Load the motion adapter adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model @@ -89,29 +75,7 @@ output = pipe( ) frames = output.frames[0] export_to_gif(frames, "animation.gif") - steps_offset=1, -) -pipe.scheduler = scheduler - -# enable memory savings -pipe.enable_vae_slicing() -pipe.enable_model_cpu_offload() -output = pipe( - prompt=( - "masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " - "orange sky, warm lighting, fishing boats, ocean waves seagulls, " - "rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " - "golden hour, coastal landscape, seaside scenery" - ), - negative_prompt="bad quality, worse quality", - num_frames=16, - guidance_scale=7.5, - num_inference_steps=25, - generator=torch.Generator("cpu").manual_seed(42), -) -frames = output.frames[0] -export_to_gif(frames, "animation.gif") ``` Here are some sample outputs: @@ -130,7 +94,7 @@ Here are some sample outputs: -AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the scheduler beta scheduler. We recommend setting this to `linear`. +AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to `linear`. @@ -149,7 +113,7 @@ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5- model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) pipe.load_lora_weights( - "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", torch_dtype=torch.float16 + "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" ) scheduler = DDIMScheduler.from_pretrained( @@ -220,10 +184,10 @@ model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) pipe.load_lora_weights( - "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", torch_dtype=torch.float16 + "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", ) pipe.load_lora_weights( - "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left", torch_dtype=torch.float16 + "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left", ) pipe.set_adapters(["zoom-out", "pan-left"], adapter_weights=[1.0, 1.0]) @@ -232,6 +196,7 @@ scheduler = DDIMScheduler.from_pretrained( subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", + beta_schedule="linear", steps_offset=1, ) pipe.scheduler = scheduler