diff --git a/README.md b/README.md index d5f68be..4701231 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ FastAPI powered API for [Fooocus](https://github.com/lllyasviel/Fooocus) -Currently loaded Fooocus version: 2.1.741 +Currently loaded Fooocus version: 2.1.755 ### Run with Replicate Now you can use Fooocus-API by Replicate, the model is in [konieshadow/fooocus-api](https://replicate.com/konieshadow/fooocus-api). diff --git a/docs/openapi.json b/docs/openapi.json index 8f216a1..a74d8b8 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -548,38 +548,6 @@ ], "title": "AllModelNamesResponse" }, - "AspectRatio": { - "type": "string", - "enum": [ - "704×1408", - "704×1344", - "768×1344", - "768×1280", - "832×1216", - "832×1152", - "896×1152", - "896×1088", - "960×1088", - "960×1024", - "1024×1024", - "1024×960", - "1088×960", - "1088×896", - "1152×896", - "1152×832", - "1216×832", - "1280×768", - "1344×768", - "1344×704", - "1408×704", - "1472×704", - "1536×640", - "1600×640", - "1664×576", - "1728×576" - ], - "title": "AspectRatio" - }, "AsyncJobResponse": { "properties": { "job_id": { @@ -674,7 +642,7 @@ "negative_prompt": { "type": "string", "title": "Negative Prompt", - "default": "" + "default": "(embedding:unaestheticXLv31:0.8), low quality, watermark" }, "style_selections": { "items": { @@ -685,7 +653,11 @@ "description": "Fooocus style selections, seperated by comma", "default": [ "Fooocus V2", - "Default (Slightly Cinematic)" + "Fooocus Masterpiece", + "SAI Anime", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art" ] }, "performance_selection": { @@ -697,12 +669,9 @@ "default": "Speed" }, "aspect_ratios_selection": { - "allOf": [ - { - "$ref": "#/components/schemas/AspectRatio" - } - ], - "default": "1152×896" + "type": "string", + "title": "Aspect Ratios Selection", + "default": "896×1152" }, "image_number": { "type": "integer", @@ -735,12 +704,12 @@ "base_model_name": { "type": "string", "title": "Base Model Name", - "default": "sd_xl_base_1.0_0.9vae.safetensors" + "default": "bluePencilXL_v050.safetensors" }, "refiner_model_name": { "type": "string", "title": "Refiner Model Name", - "default": "sd_xl_refiner_1.0_0.9vae.safetensors" + "default": "DreamShaper_8_pruned.safetensors" }, "l1": { "anyOf": [ @@ -1034,7 +1003,7 @@ "negative_prompt": { "type": "string", "title": "Negative Prompt", - "default": "" + "default": "(embedding:unaestheticXLv31:0.8), low quality, watermark" }, "style_selections": { "items": { @@ -1045,7 +1014,11 @@ "description": "Fooocus style selections, seperated by comma", "default": [ "Fooocus V2", - "Default (Slightly Cinematic)" + "Fooocus Masterpiece", + "SAI Anime", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art" ] }, "performance_selection": { @@ -1057,12 +1030,9 @@ "default": "Speed" }, "aspect_ratios_selection": { - "allOf": [ - { - "$ref": "#/components/schemas/AspectRatio" - } - ], - "default": "1152×896" + "type": "string", + "title": "Aspect Ratios Selection", + "default": "896×1152" }, "image_number": { "type": "integer", @@ -1095,12 +1065,12 @@ "base_model_name": { "type": "string", "title": "Base Model Name", - "default": "sd_xl_base_1.0_0.9vae.safetensors" + "default": "bluePencilXL_v050.safetensors" }, "refiner_model_name": { "type": "string", "title": "Refiner Model Name", - "default": "sd_xl_refiner_1.0_0.9vae.safetensors" + "default": "DreamShaper_8_pruned.safetensors" }, "l1": { "anyOf": [ @@ -1228,7 +1198,7 @@ "negative_prompt": { "type": "string", "title": "Negative Prompt", - "default": "" + "default": "(embedding:unaestheticXLv31:0.8), low quality, watermark" }, "style_selections": { "items": { @@ -1239,7 +1209,11 @@ "description": "Fooocus style selections, seperated by comma", "default": [ "Fooocus V2", - "Default (Slightly Cinematic)" + "Fooocus Masterpiece", + "SAI Anime", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art" ] }, "performance_selection": { @@ -1251,12 +1225,9 @@ "default": "Speed" }, "aspect_ratios_selection": { - "allOf": [ - { - "$ref": "#/components/schemas/AspectRatio" - } - ], - "default": "1152×896" + "type": "string", + "title": "Aspect Ratios Selection", + "default": "896×1152" }, "image_number": { "type": "integer", @@ -1289,12 +1260,12 @@ "base_model_name": { "type": "string", "title": "Base Model Name", - "default": "sd_xl_base_1.0_0.9vae.safetensors" + "default": "bluePencilXL_v050.safetensors" }, "refiner_model_name": { "type": "string", "title": "Refiner Model Name", - "default": "sd_xl_refiner_1.0_0.9vae.safetensors" + "default": "DreamShaper_8_pruned.safetensors" }, "l1": { "anyOf": [ @@ -1557,7 +1528,7 @@ "negative_prompt": { "type": "string", "title": "Negative Prompt", - "default": "" + "default": "(embedding:unaestheticXLv31:0.8), low quality, watermark" }, "style_selections": { "items": { @@ -1567,7 +1538,11 @@ "title": "Style Selections", "default": [ "Fooocus V2", - "Default (Slightly Cinematic)" + "Fooocus Masterpiece", + "SAI Anime", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art" ] }, "performance_selection": { @@ -1579,12 +1554,9 @@ "default": "Speed" }, "aspect_ratios_selection": { - "allOf": [ - { - "$ref": "#/components/schemas/AspectRatio" - } - ], - "default": "1152×896" + "type": "string", + "title": "Aspect Ratios Selection", + "default": "896×1152" }, "image_number": { "type": "integer", @@ -1617,12 +1589,12 @@ "base_model_name": { "type": "string", "title": "Base Model Name", - "default": "sd_xl_base_1.0_0.9vae.safetensors" + "default": "bluePencilXL_v050.safetensors" }, "refiner_model_name": { "type": "string", "title": "Refiner Model Name", - "default": "sd_xl_refiner_1.0_0.9vae.safetensors" + "default": "DreamShaper_8_pruned.safetensors" }, "loras": { "items": { diff --git a/fooocus_api_version.py b/fooocus_api_version.py index 41522f1..ffef7d2 100644 --- a/fooocus_api_version.py +++ b/fooocus_api_version.py @@ -1 +1 @@ -version = '0.3.6' \ No newline at end of file +version = '0.3.7' \ No newline at end of file diff --git a/fooocusapi/api_utils.py b/fooocusapi/api_utils.py index 6343e11..cc21d2c 100644 --- a/fooocusapi/api_utils.py +++ b/fooocusapi/api_utils.py @@ -8,8 +8,8 @@ from PIL import Image from fooocusapi.file_utils import get_file_serve_url, output_file_to_base64img, output_file_to_bytesimg from fooocusapi.models import AsyncJobResponse, AsyncJobStage, GeneratedImageResult, GenerationFinishReason, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, Text2ImgRequest -from fooocusapi.parameters import ImageGenerationParams, ImageGenerationResult -from fooocusapi.task_queue import QueueTask, TaskType +from fooocusapi.parameters import ImageGenerationParams, ImageGenerationResult, available_aspect_ratios, default_aspect_ratio +from fooocusapi.task_queue import QueueTask import modules.flags as flags from modules.sdxl_styles import legal_style_names @@ -50,7 +50,7 @@ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams: style_selections = [ s for s in req.style_selections if s in legal_style_names] performance_selection = req.performance_selection.value - aspect_ratios_selection = req.aspect_ratios_selection.value + aspect_ratios_selection = req.aspect_ratios_selection image_number = req.image_number image_seed = None if req.image_seed == -1 else req.image_seed sharpness = req.sharpness @@ -64,6 +64,13 @@ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams: req, ImgUpscaleOrVaryRequest) else req.uov_method.value outpaint_selections = [] if not isinstance(req, ImgInpaintOrOutpaintRequest) else [ s.value for s in req.outpaint_selections] + + if aspect_ratios_selection not in available_aspect_ratios: + print(f"Invalid aspect ratios selection, using default: {default_aspect_ratio}") + aspect_ratios_selection = default_aspect_ratio + + if refiner_model_name == '': + refiner_model_name = 'None' inpaint_input_image = None if isinstance(req, ImgInpaintOrOutpaintRequest): diff --git a/fooocusapi/models.py b/fooocusapi/models.py index 5f10a38..3cd2ce2 100644 --- a/fooocusapi/models.py +++ b/fooocusapi/models.py @@ -6,7 +6,7 @@ from enum import Enum from pydantic_core import InitErrorDetails -from fooocusapi.parameters import GenerationFinishReason, defualt_styles, default_base_model_name, default_refiner_model_name, default_lora, default_lora_weight, default_cfg_scale, default_prompt_negative +from fooocusapi.parameters import GenerationFinishReason, defualt_styles, default_base_model_name, default_refiner_model_name, default_lora_name, default_lora_weight, default_cfg_scale, default_prompt_negative, default_aspect_ratio from fooocusapi.task_queue import TaskType import modules.flags as flags @@ -24,36 +24,6 @@ class PerfomanceSelection(str, Enum): speed = 'Speed' quality = 'Quality' - -class AspectRatio(str, Enum): - a_0_5 = '704×1408' - a_0_52 = '704×1344' - a_0_57 = '768×1344' - a_0_6 = '768×1280' - a_0_68 = '832×1216' - a_0_72 = '832×1152' - a_0_78 = '896×1152' - a_0_82 = '896×1088' - a_0_88 = '960×1088' - a_0_94 = '960×1024' - a_1_0 = '1024×1024' - a_1_07 = '1024×960' - a_1_13 = '1088×960' - a_1_21 = '1088×896' - a_1_29 = '1152×896' - a_1_38 = '1152×832' - a_1_46 = '1216×832' - a_1_67 = '1280×768' - a_1_75 = '1344×768' - a_1_91 = '1344×704' - a_2_0 = '1408×704' - a_2_09 = '1472×704' - a_2_4 = '1536×640' - a_2_5 = '1600×640' - a_2_89 = '1664×576' - a_3_0 = '1728×576' - - class UpscaleOrVaryMethod(str, Enum): subtle_variation = 'Vary (Subtle)' strong_variation = 'Vary (Strong)' @@ -88,7 +58,7 @@ class Text2ImgRequest(BaseModel): negative_prompt: str = default_prompt_negative style_selections: List[str] = defualt_styles performance_selection: PerfomanceSelection = PerfomanceSelection.speed - aspect_ratios_selection: AspectRatio = AspectRatio.a_1_29 + aspect_ratios_selection: str = default_aspect_ratio image_number: int = Field( default=1, description="Image number", min=1, max=32) image_seed: int = Field(default=-1, description="Seed to generate image, -1 for random") @@ -97,7 +67,7 @@ class Text2ImgRequest(BaseModel): base_model_name: str = default_base_model_name refiner_model_name: str = default_refiner_model_name loras: List[Lora] = Field(default=[ - Lora(model_name=default_lora, weight=default_lora_weight)]) + Lora(model_name=default_lora_name, weight=default_lora_weight)]) require_base64: bool = Field(default=False, description="Return base64 data of generated image") async_process: bool = Field(default=False, description="Set to true will run async and return job info for retrieve generataion result later") @@ -114,8 +84,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsa style_selections: List[str] = Form(defualt_styles, description="Fooocus style selections, seperated by comma"), performance_selection: PerfomanceSelection = Form( PerfomanceSelection.speed), - aspect_ratios_selection: AspectRatio = Form( - AspectRatio.a_1_29), + aspect_ratios_selection: str = Form(default_aspect_ratio), image_number: int = Form( default=1, description="Image number", ge=1, le=32), image_seed: int = Form(default=-1, description="Seed to generate image, -1 for random"), @@ -123,7 +92,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsa guidance_scale: float = Form(default=default_cfg_scale, ge=1.0, le=30.0), base_model_name: str = Form(default_base_model_name), refiner_model_name: str = Form(default_refiner_model_name), - l1: str | None = Form(default_lora), + l1: str | None = Form(default_lora_name), w1: float = Form(default=default_lora_weight, ge=-2, le=2), l2: str | None = Form(None), w2: float = Form(default=default_lora_weight, ge=-2, le=2), @@ -173,8 +142,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpa style_selections: List[str] = Form(defualt_styles, description="Fooocus style selections, seperated by comma"), performance_selection: PerfomanceSelection = Form( PerfomanceSelection.speed), - aspect_ratios_selection: AspectRatio = Form( - AspectRatio.a_1_29), + aspect_ratios_selection: str = Form(default_aspect_ratio), image_number: int = Form( default=1, description="Image number", ge=1, le=32), image_seed: int = Form(default=-1, description="Seed to generate image, -1 for random"), @@ -182,7 +150,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpa guidance_scale: float = Form(default=default_cfg_scale, ge=1.0, le=30.0), base_model_name: str = Form(default_base_model_name), refiner_model_name: str = Form(default_refiner_model_name), - l1: str | None = Form(default_lora), + l1: str | None = Form(default_lora_name), w1: float = Form(default=default_lora_weight, ge=-2, le=2), l2: str | None = Form(None), w2: float = Form(default=default_lora_weight, ge=-2, le=2), @@ -272,8 +240,7 @@ def as_form(cls, cn_img1: UploadFile = Form(File(None), description="Input image style_selections: List[str] = Form(defualt_styles, description="Fooocus style selections, seperated by comma"), performance_selection: PerfomanceSelection = Form( PerfomanceSelection.speed), - aspect_ratios_selection: AspectRatio = Form( - AspectRatio.a_1_29), + aspect_ratios_selection: str = Form(default_aspect_ratio), image_number: int = Form( default=1, description="Image number", ge=1, le=32), image_seed: int = Form(default=-1, description="Seed to generate image, -1 for random"), @@ -281,7 +248,7 @@ def as_form(cls, cn_img1: UploadFile = Form(File(None), description="Input image guidance_scale: float = Form(default=default_cfg_scale, ge=1.0, le=30.0), base_model_name: str = Form(default_base_model_name), refiner_model_name: str = Form(default_refiner_model_name), - l1: str | None = Form(default_lora), + l1: str | None = Form(default_lora_name), w1: float = Form(default=default_lora_weight, ge=-2, le=2), l2: str | None = Form(None), w2: float = Form(default=default_lora_weight, ge=-2, le=2), @@ -368,4 +335,8 @@ class JobQueueInfo(BaseModel): class AllModelNamesResponse(BaseModel): model_filenames: List[str] - lora_filenames: List[str] \ No newline at end of file + lora_filenames: List[str] + + model_config = ConfigDict( + protected_namespaces=('protect_me_', 'also_protect_') + ) \ No newline at end of file diff --git a/fooocusapi/parameters.py b/fooocusapi/parameters.py index 580f1e9..a86def7 100644 --- a/fooocusapi/parameters.py +++ b/fooocusapi/parameters.py @@ -11,13 +11,14 @@ defualt_styles = ['Fooocus V2', 'Fooocus Enhance', 'Fooocus Sharp'] default_base_model_name = 'sd_xl_base_1.0_0.9vae.safetensors' default_refiner_model_name = 'sd_xl_refiner_1.0_0.9vae.safetensors' -default_lora = 'sd_xl_offset_example-lora_1.0.safetensors' +default_lora_name = 'sd_xl_offset_example-lora_1.0.safetensors' default_lora_weight = 0.5 default_cfg_scale = 7.0 default_prompt_negative = '' +default_aspect_ratio = '1152×896' -aspect_ratios = [ +available_aspect_ratios = [ '704×1408', '704×1344', '768×1344', diff --git a/fooocusapi/repositories_versions.py b/fooocusapi/repositories_versions.py index 38742c5..0af2e41 100644 --- a/fooocusapi/repositories_versions.py +++ b/fooocusapi/repositories_versions.py @@ -1,5 +1,5 @@ import os -fooocus_version = '2.1.741' +fooocus_version = '2.1.755' fooocus_commit_hash = os.environ.get( - 'FOOOCUS_COMMIT_HASH', "01b1e98d378e7ceed08171c0397b2e5d89ea0047") + 'FOOOCUS_COMMIT_HASH', "759bfadefacb1d7827843c35cf22f5996508bb2e") diff --git a/fooocusapi/worker.py b/fooocusapi/worker.py index c9232e8..ea03d5c 100644 --- a/fooocusapi/worker.py +++ b/fooocusapi/worker.py @@ -34,7 +34,7 @@ def process_generate(queue_task: QueueTask, params: ImageGenerationParams) -> Li import modules.constants as constants import fooocus_extras.preprocessors as preprocessors import fooocus_extras.ip_adapter as ip_adapter - from modules.util import join_prompts, remove_empty_str, resize_image, HWC3, set_image_shape_ceil, get_image_shape_ceil, get_shape_ceil + from modules.util import join_prompts, remove_empty_str, resize_image, HWC3, set_image_shape_ceil, get_image_shape_ceil, get_shape_ceil, resample_image from modules.private_logger import log from modules.upscaler import perform_upscale from modules.expansion import safe_str @@ -106,6 +106,7 @@ def make_results_from_outputs(): guidance_scale = params.guidance_scale base_model_name = params.base_model_name refiner_model_name = params.refiner_model_name + refiner_switch = path.default_refiner_switch loras = params.loras input_image_checkbox = params.uov_input_image is not None or params.inpaint_input_image is not None or len(params.image_prompts) > 0 current_tab = 'uov' if params.uov_method != flags.disabled else 'inpaint' if params.inpaint_input_image is not None else 'ip' if len(params.image_prompts) > 0 else None @@ -205,10 +206,8 @@ def build_advanced_parameters(): if performance_selection == 'Speed': steps = 30 - switch = 20 else: steps = 60 - switch = 40 sampler_name = advanced_parameters.sampler_name scheduler_name = advanced_parameters.scheduler_name @@ -229,10 +228,8 @@ def build_advanced_parameters(): else: if performance_selection == 'Speed': steps = 18 - switch = 12 else: steps = 36 - switch = 24 progressbar(1, 'Downloading upscale models ...') path.downloading_upscale_model() if (current_tab == 'inpaint' or (current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint))\ @@ -264,6 +261,8 @@ def build_advanced_parameters(): pipeline.refresh_controlnets([controlnet_canny_path, controlnet_cpds_path]) ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path) + switch = int(round(steps * refiner_switch)) + if advanced_parameters.overwrite_step > 0: steps = advanced_parameters.overwrite_step @@ -283,12 +282,16 @@ def build_advanced_parameters(): if not skip_prompt_processing: - prompts = remove_empty_str([safe_str(p) for p in prompt.split('\n')], default='') - negative_prompts = remove_empty_str([safe_str(p) for p in negative_prompt.split('\n')], default='') + prompts = remove_empty_str([safe_str(p) for p in prompt.splitlines()], default='') + negative_prompts = remove_empty_str([safe_str(p) for p in negative_prompt.splitlines()], default='') prompt = prompts[0] negative_prompt = negative_prompts[0] + if prompt == '': + # disable expansion when empty since it is not meaningful and influences image prompt + use_expansion = False + extra_positive_prompts = prompts[1:] if len(prompts) > 1 else [] extra_negative_prompts = negative_prompts[1:] if len(negative_prompts) > 1 else [] @@ -312,8 +315,8 @@ def build_advanced_parameters(): if use_style: for s in style_selections: p, n = apply_style(s, positive=task_prompt) - positive_basic_workloads.append(p) - negative_basic_workloads.append(n) + positive_basic_workloads = positive_basic_workloads + p + negative_basic_workloads = negative_basic_workloads + n else: positive_basic_workloads.append(task_prompt) @@ -402,10 +405,14 @@ def build_advanced_parameters(): f = 1.0 shape_ceil = get_shape_ceil(H * f, W * f) + if shape_ceil < 1024: print(f'[Upscale] Image is resized because it is too small.') + uov_input_image = set_image_shape_ceil(uov_input_image, 1024) shape_ceil = 1024 - uov_input_image = set_image_shape_ceil(uov_input_image, shape_ceil) + else: + uov_input_image = resample_image(uov_input_image, width=W * f, height=H * f) + image_is_super_large = shape_ceil > 2800 if 'fast' in uov_method: diff --git a/main.py b/main.py index e05925e..f131282 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,4 @@ import argparse -import json import os import re import shutil @@ -248,32 +247,6 @@ def prepare_environments(args) -> bool: if not skip_sync_repo: download_repositories() - preset_json = None - if args.preset is not None: - # Remove and copy preset folder - origin_preset_folder = os.path.abspath(os.path.join(script_path, dir_repos, fooocus_name, 'presets')) - preset_folder = os.path.abspath(os.path.join(script_path, 'presets')) - if os.path.exists(preset_folder): - shutil.rmtree(preset_folder) - shutil.copytree(origin_preset_folder, preset_folder) - - preset_config = os.path.join(preset_folder, f"{args.preset}.json") - if os.path.exists(preset_config) and os.path.isfile(preset_config): - with open(preset_config, "r", encoding="utf-8") as json_file: - preset_json = json.load(json_file) - print(f"Using preset: {args.preset}") - - import fooocusapi.parameters as parameters - parameters.defualt_styles = preset_json['default_styles'] - parameters.default_base_model_name = preset_json['default_model'] - parameters.default_refiner_model_name = preset_json['default_refiner'] - parameters.default_lora = preset_json['default_lora'] - parameters.default_lora_weight = preset_json['default_lora_weight'] - parameters.default_cfg_scale = preset_json['default_cfg_scale'] - parameters.default_prompt_negative = preset_json['default_prompt_negative'] - if parameters.default_refiner_model_name == '': - parameters.default_refiner_model_name = 'None' - import fooocusapi.worker as worker worker.task_queue.queue_size = args.queue_size worker.task_queue.history_size = args.queue_history @@ -294,13 +267,32 @@ def prepare_environments(args) -> bool: backend_path = os.path.join(fooocus_path, 'backend', 'headless') if backend_path not in sys.path: sys.path.append(backend_path) - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" sys.argv = [sys.argv[0]] - if preset_json is not None: + if args.preset is not None: + # Remove and copy preset folder + origin_preset_folder = os.path.abspath(os.path.join(script_path, dir_repos, fooocus_name, 'presets')) + preset_folder = os.path.abspath(os.path.join(script_path, 'presets')) + if os.path.exists(preset_folder): + shutil.rmtree(preset_folder) + shutil.copytree(origin_preset_folder, preset_folder) + sys.argv.append('--preset') sys.argv.append(args.preset) + import modules.path as path + import fooocusapi.parameters as parameters + parameters.defualt_styles = path.default_styles + parameters.default_base_model_name = path.default_base_model_name + parameters.default_refiner_model_name = path.default_refiner_model_name + parameters.default_lora_name = path.default_lora_name + parameters.default_lora_weight = path.default_lora_weight + parameters.default_cfg_scale = path.default_cfg_scale + parameters.default_prompt_negative = path.default_prompt_negative + parameters.default_aspect_ratio = path.default_aspect_ratio.replace('*', '×') + parameters.available_aspect_ratios = [a.replace('*', '×') for a in path.available_aspect_ratios] + ini_cbh_args() download_models() diff --git a/predict.py b/predict.py index 17dadf0..5a2e4fc 100644 --- a/predict.py +++ b/predict.py @@ -6,7 +6,7 @@ from typing import List from cog import BasePredictor, Input, Path -from fooocusapi.parameters import GenerationFinishReason, ImageGenerationParams, aspect_ratios, uov_methods, outpaint_expansions, defualt_styles, default_base_model_name, default_refiner_model_name, default_lora, default_lora_weight, default_cfg_scale, default_prompt_negative +from fooocusapi.parameters import GenerationFinishReason, ImageGenerationParams, available_aspect_ratios, uov_methods, outpaint_expansions, defualt_styles, default_base_model_name, default_refiner_model_name, default_lora_name, default_lora_weight, default_cfg_scale, default_prompt_negative from fooocusapi.task_queue import TaskType from fooocusapi.worker import process_generate, task_queue from fooocusapi.file_utils import output_dir @@ -31,7 +31,7 @@ def predict( performance_selection: str = Input( default='Speed', description="Performance selection", choices=['Speed', 'Quality']), aspect_ratios_selection: str = Input( - default='1152×896', description="The generated image's size", choices=aspect_ratios), + default='1152×896', description="The generated image's size", choices=available_aspect_ratios), image_number: int = Input( default=1, description="How many image to generate", ge=1, le=8), image_seed: int = Input( @@ -86,7 +86,7 @@ def predict( base_model_name = default_base_model_name refiner_model_name = default_refiner_model_name - loras = [(default_lora, default_lora_weight)] + loras = [(default_lora_name, default_lora_weight)] style_selections_arr = [] for s in style_selections.strip().split(','):