Skip to content

Commit

Permalink
Make sure DDPM and diffusers can be used without Transformers (hugg…
Browse files Browse the repository at this point in the history
…ingface#5668)

* fix: import bug

* fix

* fix

* fix import utils for lcm

* fix: pixart alpha init

* Fix

---------

Co-authored-by: Patrick von Platen <[email protected]>
  • Loading branch information
2 people authored and Jimmy committed Apr 26, 2024
1 parent 76d2002 commit 881989b
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 11 deletions.
6 changes: 3 additions & 3 deletions src/diffusers/loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -2390,7 +2390,7 @@ def unfuse_text_encoder_lora(text_encoder):
def set_adapters_for_text_encoder(
self,
adapter_names: Union[List[str], str],
text_encoder: Optional[PreTrainedModel] = None,
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
text_encoder_weights: List[float] = None,
):
"""
Expand Down Expand Up @@ -2429,7 +2429,7 @@ def process_weights(adapter_names, weights):
)
set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)

def disable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel] = None):
def disable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
"""
Disables the LoRA layers for the text encoder.
Expand All @@ -2446,7 +2446,7 @@ def disable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel]
raise ValueError("Text Encoder not found.")
set_adapter_layers(text_encoder, enabled=False)

def enable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel] = None):
def enable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
"""
Enables the LoRA layers for the text encoder.
Expand Down
38 changes: 31 additions & 7 deletions src/diffusers/pipelines/latent_consistency_models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,40 @@
from typing import TYPE_CHECKING

from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)


_import_structure = {
"pipeline_latent_consistency_img2img": ["LatentConsistencyModelImg2ImgPipeline"],
"pipeline_latent_consistency_text2img": ["LatentConsistencyModelPipeline"],
}
_dummy_objects = {}
_import_structure = {}


if TYPE_CHECKING:
from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline
from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403

_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_latent_consistency_img2img"] = ["LatentConsistencyModelImg2ImgPipeline"]
_import_structure["pipeline_latent_consistency_text2img"] = ["LatentConsistencyModelPipeline"]

if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()

except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline
from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline

else:
import sys
Expand All @@ -24,3 +45,6 @@
_import_structure,
module_spec=__spec__,
)

for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
49 changes: 48 additions & 1 deletion src/diffusers/pipelines/pixart_alpha/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,48 @@
from .pipeline_pixart_alpha import PixArtAlphaPipeline
from typing import TYPE_CHECKING

from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)


_dummy_objects = {}
_import_structure = {}


try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403

_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_pixart_alpha"] = ["PixArtAlphaPipeline"]

if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()

except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_pixart_alpha import PixArtAlphaPipeline

else:
import sys

sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)

for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)

0 comments on commit 881989b

Please sign in to comment.