Skip to content

Commit

Permalink
chore: apply black formatter (#153)
Browse files Browse the repository at this point in the history
This commit applies the black formatter to the codebase to ensure the
code formatting is consistent.
  • Loading branch information
rickstaa authored Aug 13, 2024
1 parent 9e8725e commit 915c050
Show file tree
Hide file tree
Showing 9 changed files with 55 additions and 26 deletions.
6 changes: 5 additions & 1 deletion runner/app/pipelines/audio_to_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,11 @@ def __init__(self, model_id: str):
kwargs["torch_dtype"] = torch.bfloat16

model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id, low_cpu_mem_usage=True, use_safetensors=True, cache_dir=get_model_dir(), **kwargs
model_id,
low_cpu_mem_usage=True,
use_safetensors=True,
cache_dir=get_model_dir(),
**kwargs,
).to(torch_device)

processor = AutoProcessor.from_pretrained(model_id, cache_dir=get_model_dir())
Expand Down
22 changes: 15 additions & 7 deletions runner/app/pipelines/image_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,21 @@
import PIL
import torch
from app.pipelines.base import Pipeline
from app.pipelines.utils import (SafetyChecker, get_model_dir,
get_torch_device, is_lightning_model,
is_turbo_model)
from diffusers import (AutoPipelineForImage2Image,
EulerAncestralDiscreteScheduler, EulerDiscreteScheduler,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionXLPipeline, UNet2DConditionModel)
from app.pipelines.utils import (
SafetyChecker,
get_model_dir,
get_torch_device,
is_lightning_model,
is_turbo_model,
)
from diffusers import (
AutoPipelineForImage2Image,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionXLPipeline,
UNet2DConditionModel,
)
from huggingface_hub import file_download, hf_hub_download
from PIL import ImageFile
from safetensors.torch import load_file
Expand Down
3 changes: 1 addition & 2 deletions runner/app/pipelines/optim/sfast.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@

import logging

from sfast.compilers.diffusion_pipeline_compiler import (CompilationConfig,
compile)
from sfast.compilers.diffusion_pipeline_compiler import CompilationConfig, compile

logger = logging.getLogger(__name__)

Expand Down
2 changes: 1 addition & 1 deletion runner/app/pipelines/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
from diffusers import (
AutoPipelineForText2Image,
EulerDiscreteScheduler,
FluxPipeline,
StableDiffusion3Pipeline,
StableDiffusionXLPipeline,
UNet2DConditionModel,
FluxPipeline,
)
from diffusers.models import AutoencoderKL
from huggingface_hub import file_download, hf_hub_download
Expand Down
10 changes: 7 additions & 3 deletions runner/app/pipelines/upscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,13 @@
import PIL
import torch
from app.pipelines.base import Pipeline
from app.pipelines.utils import (SafetyChecker, get_model_dir,
get_torch_device, is_lightning_model,
is_turbo_model)
from app.pipelines.utils import (
SafetyChecker,
get_model_dir,
get_torch_device,
is_lightning_model,
is_turbo_model,
)
from diffusers import StableDiffusionUpscalePipeline
from huggingface_hub import file_download
from PIL import ImageFile
Expand Down
14 changes: 10 additions & 4 deletions runner/app/pipelines/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
"""This module contains several utility functions that are used across the pipelines module."""

from app.pipelines.utils.utils import (SafetyChecker, get_model_dir,
get_model_path, get_torch_device,
is_lightning_model, is_turbo_model,
split_prompt, validate_torch_device)
from app.pipelines.utils.utils import (
SafetyChecker,
get_model_dir,
get_model_path,
get_torch_device,
is_lightning_model,
is_turbo_model,
split_prompt,
validate_torch_device,
)
11 changes: 7 additions & 4 deletions runner/app/pipelines/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,14 @@
import os
import re
from pathlib import Path
from typing import Optional
from typing import Dict, Optional

import numpy as np
import torch
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from PIL import Image
from torch import dtype as TorchDtype
from transformers import CLIPImageProcessor
from typing import Dict

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -99,10 +98,14 @@ def split_prompt(
Returns:
Dict[str, str]: A dictionary of all prompts, including the main prompt.
"""
prompts = [prompt.strip() for prompt in input_prompt.split(separator, max_splits) if prompt.strip()]
prompts = [
prompt.strip()
for prompt in input_prompt.split(separator, max_splits)
if prompt.strip()
]
if not prompts:
return {}

start_index = max(1, len(prompts) - max_splits) if max_splits >= 0 else 1

prompt_dict = {f"{key_prefix}": prompts[0]}
Expand Down
10 changes: 8 additions & 2 deletions runner/gen_openapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,14 @@

import yaml
from app.main import app, use_route_names_as_operation_ids
from app.routes import (audio_to_text, health, image_to_image, image_to_video,
text_to_image, upscale)
from app.routes import (
audio_to_text,
health,
image_to_image,
image_to_video,
text_to_image,
upscale,
)
from fastapi.openapi.utils import get_openapi

# Specify Endpoints for OpenAPI schema generation.
Expand Down
3 changes: 1 addition & 2 deletions runner/modal_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
import os
from pathlib import Path

from app.main import (config_logging, load_route,
use_route_names_as_operation_ids)
from app.main import config_logging, load_route, use_route_names_as_operation_ids
from app.routes import health
from modal import Image, Secret, Stub, Volume, asgi_app, enter, method

Expand Down

0 comments on commit 915c050

Please sign in to comment.