From c7a10b3cb9c6869a12e75cbadac7900f270c3be7 Mon Sep 17 00:00:00 2001 From: Konie Date: Wed, 25 Oct 2023 16:50:42 +0800 Subject: [PATCH] Add url field for generation apis --- README.md | 6 ++++- fooocus_api_version.py | 2 +- fooocusapi/api.py | 39 ++++++++++++++++++----------- fooocusapi/api_utils.py | 21 +++++++++++----- fooocusapi/file_utils.py | 54 ++++++++++++++++++++++++++++++++++++++++ fooocusapi/models.py | 17 ++++++++----- fooocusapi/parameters.py | 2 +- fooocusapi/worker.py | 9 +++++-- main.py | 12 +++++++-- 9 files changed, 129 insertions(+), 33 deletions(-) create mode 100644 fooocusapi/file_utils.py diff --git a/README.md b/README.md index 77e6342..2997132 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,11 @@ You can import it in [Swagger-UI](https://swagger.io/tools/swagger-ui/) editor. All the generation api support for response in PNG bytes directly when request's 'Accept' header is 'image/png'. -All the generation api support async process by pass parameter `async_process`` to true. And then use query job api to retrieve progress and generation results. +All the generation api support async process by pass parameter `async_process` to true. And then use query job api to retrieve progress and generation results. + +Break change from v0.3.0: +* The generation apis won't return `base64` field expect request parameters set `require_base64` to true. +* The generation apis return a `url` field where the generated image can be requested via a static file url. #### Text to Image > POST /v1/generation/text-to-image diff --git a/fooocus_api_version.py b/fooocus_api_version.py index 7acb9cd..d5b63fd 100644 --- a/fooocus_api_version.py +++ b/fooocus_api_version.py @@ -1 +1 @@ -version = '0.2.5' \ No newline at end of file +version = '0.3.0' \ No newline at end of file diff --git a/fooocusapi/api.py b/fooocusapi/api.py index d5673d6..fe30d87 100644 --- a/fooocusapi/api.py +++ b/fooocusapi/api.py @@ -1,9 +1,11 @@ from typing import List, Optional from fastapi import Depends, FastAPI, Header, Query, Response, UploadFile from fastapi.params import File +from fastapi.staticfiles import StaticFiles import uvicorn from fooocusapi.api_utils import generation_output, req_to_params -from fooocusapi.models import AllModelNamesResponse, AsyncJobResponse, GeneratedImageBase64, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, JobQueueInfo, Text2ImgRequest +import fooocusapi.file_utils as file_utils +from fooocusapi.models import AllModelNamesResponse, AsyncJobResponse, GeneratedImageResult, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, JobQueueInfo, Text2ImgRequest from fooocusapi.parameters import GenerationFinishReason, ImageGenerationResult from fooocusapi.task_queue import TaskType from fooocusapi.worker import process_generate, task_queue @@ -11,7 +13,8 @@ app = FastAPI() -work_executor = ThreadPoolExecutor(max_workers=task_queue.queue_size*2, thread_name_prefix="worker_") +work_executor = ThreadPoolExecutor( + max_workers=task_queue.queue_size*2, thread_name_prefix="worker_") img_generate_responses = { "200": { @@ -37,6 +40,7 @@ } } + def call_worker(req: Text2ImgRequest, accept: str): task_type = TaskType.text_2_img if isinstance(req, ImgUpscaleOrVaryRequest): @@ -47,12 +51,13 @@ def call_worker(req: Text2ImgRequest, accept: str): task_type = TaskType.img_prompt params = req_to_params(req) - queue_task = task_queue.add_task(task_type, {'params': params.__dict__, 'accept': accept}) + queue_task = task_queue.add_task( + task_type, {'params': params.__dict__, 'accept': accept, 'require_base64': req.require_base64}) if queue_task is None: print("[Task Queue] The task queue has reached limit") results = [ImageGenerationResult(im=None, seed=0, - finish_reason=GenerationFinishReason.queue_is_full)] + finish_reason=GenerationFinishReason.queue_is_full)] elif req.async_process: work_executor.submit(process_generate, queue_task, params) results = queue_task @@ -61,12 +66,13 @@ def call_worker(req: Text2ImgRequest, accept: str): return results + @app.get("/") def home(): return Response(content='Swagger-UI to: /docs', media_type="text/html") -@app.post("/v1/generation/text-to-image", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses) +@app.post("/v1/generation/text-to-image", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses) def text2img_generation(req: Text2ImgRequest, accept: str = Header(None), accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")): if accept_query is not None and len(accept_query) > 0: @@ -80,10 +86,10 @@ def text2img_generation(req: Text2ImgRequest, accept: str = Header(None), streaming_output = False results = call_worker(req, accept) - return generation_output(results, streaming_output) + return generation_output(results, streaming_output, req.require_base64) -@app.post("/v1/generation/image-upscale-vary", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses) +@app.post("/v1/generation/image-upscale-vary", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses) def img_upscale_or_vary(input_image: UploadFile, req: ImgUpscaleOrVaryRequest = Depends(ImgUpscaleOrVaryRequest.as_form), accept: str = Header(None), accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")): @@ -98,10 +104,10 @@ def img_upscale_or_vary(input_image: UploadFile, req: ImgUpscaleOrVaryRequest = streaming_output = False results = call_worker(req, accept) - return generation_output(results, streaming_output) + return generation_output(results, streaming_output, req.require_base64) -@app.post("/v1/generation/image-inpait-outpaint", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses) +@app.post("/v1/generation/image-inpait-outpaint", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses) def img_inpaint_or_outpaint(input_image: UploadFile, req: ImgInpaintOrOutpaintRequest = Depends(ImgInpaintOrOutpaintRequest.as_form), accept: str = Header(None), accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")): @@ -116,10 +122,10 @@ def img_inpaint_or_outpaint(input_image: UploadFile, req: ImgInpaintOrOutpaintRe streaming_output = False results = call_worker(req, accept) - return generation_output(results, streaming_output) + return generation_output(results, streaming_output, req.require_base64) -@app.post("/v1/generation/image-prompt", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses) +@app.post("/v1/generation/image-prompt", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses) def img_prompt(cn_img1: Optional[UploadFile] = File(None), req: ImgPromptRequest = Depends(ImgPromptRequest.as_form), accept: str = Header(None), @@ -135,7 +141,7 @@ def img_prompt(cn_img1: Optional[UploadFile] = File(None), streaming_output = False results = call_worker(req, accept) - return generation_output(results, streaming_output) + return generation_output(results, streaming_output, req.require_base64) @app.get("/v1/generation/query-job", response_model=AsyncJobResponse, description="Query async generation job") @@ -143,8 +149,8 @@ def query_job(job_id: int): queue_task = task_queue.get_task(job_id, True) if queue_task is None: return Response(content="Job not found", status_code=404) - - return generation_output(queue_task, False) + + return generation_output(queue_task, False, False) @app.get("/v1/generation/job-queue", response_model=JobQueueInfo, description="Query job queue info") @@ -170,6 +176,11 @@ def all_styles(): from modules.sdxl_styles import legal_style_names return legal_style_names + +app.mount("/files", StaticFiles(directory=file_utils.output_dir), name="files") + + def start_app(args): + file_utils.static_serve_base_url = args.base_url + "/files/" uvicorn.run("fooocusapi.api:app", host=args.host, port=args.port, log_level=args.log_level) diff --git a/fooocusapi/api_utils.py b/fooocusapi/api_utils.py index 969aa33..09baeeb 100644 --- a/fooocusapi/api_utils.py +++ b/fooocusapi/api_utils.py @@ -6,7 +6,8 @@ import numpy as np from fastapi import Response, UploadFile from PIL import Image -from fooocusapi.models import AsyncJobResponse, AsyncJobStage, GeneratedImageBase64, GenerationFinishReason, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, Text2ImgRequest +from fooocusapi.file_utils import get_file_serve_url, output_file_to_base64img, output_file_to_bytesimg +from fooocusapi.models import AsyncJobResponse, AsyncJobStage, GeneratedImageResult, GenerationFinishReason, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, Text2ImgRequest from fooocusapi.parameters import ImageGenerationParams, ImageGenerationResult from fooocusapi.task_queue import QueueTask, TaskType import modules.flags as flags @@ -104,7 +105,7 @@ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams: ) -def generation_output(results: QueueTask | List[ImageGenerationResult], streaming_output: bool) -> Response | List[GeneratedImageBase64] | AsyncJobResponse: +def generation_output(results: QueueTask | List[ImageGenerationResult], streaming_output: bool, require_base64: bool) -> Response | List[GeneratedImageResult] | AsyncJobResponse: if isinstance(results, QueueTask): task = results job_stage = AsyncJobStage.running @@ -117,7 +118,11 @@ def generation_output(results: QueueTask | List[ImageGenerationResult], streamin else: if task.task_result != None: job_stage = AsyncJobStage.success - job_result = generation_output(task.task_result, False) + task_result_require_base64 = False + if 'require_base64' in task.req_param and task.req_param['require_base64']: + task_result_require_base64 = True + + job_result = generation_output(task.task_result, False, task_result_require_base64) return AsyncJobResponse(job_id=task.seq, job_type=task.type, job_stage=job_stage, @@ -128,11 +133,15 @@ def generation_output(results: QueueTask | List[ImageGenerationResult], streamin if streaming_output: if len(results) == 0 or results[0].finish_reason != GenerationFinishReason.success: return Response(status_code=500) - bytes = narray_to_bytesimg(results[0].im) + bytes = output_file_to_bytesimg(results[0].im) return Response(bytes, media_type='image/png') else: - results = [GeneratedImageBase64(base64=narray_to_base64img( - item.im), seed=item.seed, finish_reason=item.finish_reason) for item in results] + results = [GeneratedImageResult( + base64=output_file_to_base64img( + item.im) if require_base64 else None, + url=get_file_serve_url(item.im), + seed=item.seed, + finish_reason=item.finish_reason) for item in results] return results diff --git a/fooocusapi/file_utils.py b/fooocusapi/file_utils.py new file mode 100644 index 0000000..8c127a6 --- /dev/null +++ b/fooocusapi/file_utils.py @@ -0,0 +1,54 @@ +import base64 +import datetime +from io import BytesIO +import os +import numpy as np +from PIL import Image +import uuid + +output_dir = os.path.abspath(os.path.join( + os.path.dirname(__file__), '..', 'outputs', 'files')) +os.makedirs(output_dir, exist_ok=True) + +static_serve_base_url = 'http://127.0.0.1:8888/files/' + + +def save_output_file(img: np.ndarray) -> str: + current_time = datetime.datetime.now() + date_string = current_time.strftime("%Y-%m-%d") + + filename = os.path.join(date_string, str(uuid.uuid4()) + '.png') + file_path = os.path.join(output_dir, filename) + + os.makedirs(os.path.dirname(file_path), exist_ok=True) + Image.fromarray(img).save(file_path) + return filename + + +def output_file_to_base64img(filename: str) -> str: + file_path = os.path.join(output_dir, filename) + if not os.path.exists(file_path) or not os.path.isfile(file_path): + return None + + img = Image.open(file_path) + output_buffer = BytesIO() + img.save(output_buffer, format='PNG') + byte_data = output_buffer.getvalue() + base64_str = base64.b64encode(byte_data) + return base64_str + + +def output_file_to_bytesimg(filename: str) -> bytes: + file_path = os.path.join(output_dir, filename) + if not os.path.exists(file_path) or not os.path.isfile(file_path): + return None + + img = Image.open(file_path) + output_buffer = BytesIO() + img.save(output_buffer, format='PNG') + byte_data = output_buffer.getvalue() + return byte_data + + +def get_file_serve_url(filename: str) -> str: + return static_serve_base_url + filename diff --git a/fooocusapi/models.py b/fooocusapi/models.py index bab55a3..66340a5 100644 --- a/fooocusapi/models.py +++ b/fooocusapi/models.py @@ -98,6 +98,7 @@ class Text2ImgRequest(BaseModel): refiner_model_name: str = 'sd_xl_refiner_1.0_0.9vae.safetensors' loras: List[Lora] = Field(default=[ Lora(model_name='sd_xl_offset_example-lora_1.0.safetensors', weight=0.5)]) + require_base64: bool = Field(default=False, description="Return base64 data of generated image") async_process: bool = Field(default=False, description="Set to true will run async and return job info for retrieve generataion result later") @@ -135,6 +136,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsa w4: float = Form(default=0.5, ge=-2, le=2), l5: str | None = Form(None), w5: float = Form(default=0.5, ge=-2, le=2), + require_base64: bool = Form(default=False, description="Return base64 data of generated image"), async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"), ): style_selection_arr: List[str] = [] @@ -155,7 +157,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsa performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection, image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale, base_model_name=base_model_name, refiner_model_name=refiner_model_name, - loras=loras, async_process=async_process) + loras=loras, require_base64=require_base64, async_process=async_process) class ImgInpaintOrOutpaintRequest(Text2ImgRequest): @@ -196,6 +198,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpa w4: float = Form(default=0.5, ge=-2, le=2), l5: str | None = Form(None), w5: float = Form(default=0.5, ge=-2, le=2), + require_base64: bool = Form(default=False, description="Return base64 data of generated image"), async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"), ): @@ -232,7 +235,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpa performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection, image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale, base_model_name=base_model_name, refiner_model_name=refiner_model_name, - loras=loras, async_process=async_process) + loras=loras, require_base64=require_base64, async_process=async_process) class ImgPromptRequest(Text2ImgRequest): @@ -297,6 +300,7 @@ def as_form(cls, cn_img1: UploadFile = Form(File(None), description="Input image w4: float = Form(default=0.5, ge=-2, le=2), l5: str | None = Form(None), w5: float = Form(default=0.5, ge=-2, le=2), + require_base64: bool = Form(default=False, description="Return base64 data of generated image"), async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"), ): if isinstance(cn_img1, File): @@ -338,12 +342,13 @@ def as_form(cls, cn_img1: UploadFile = Form(File(None), description="Input image performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection, image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale, base_model_name=base_model_name, refiner_model_name=refiner_model_name, - loras=loras, async_process=async_process) + loras=loras, require_base64=require_base64, async_process=async_process) -class GeneratedImageBase64(BaseModel): +class GeneratedImageResult(BaseModel): base64: str | None = Field( - description="Image encoded in base64, or null if finishReasen is not 'SUCCESS'") + description="Image encoded in base64, or null if finishReasen is not 'SUCCESS', only return when request require base64") + url: str | None = Field(description="Image file static serve url, or null if finishReasen is not 'SUCCESS'") seed: int = Field(description="The seed associated with this image") finish_reason: GenerationFinishReason @@ -361,7 +366,7 @@ class AsyncJobResponse(BaseModel): job_stage: AsyncJobStage job_progess: int job_status: str | None - job_result: List[GeneratedImageBase64] | None + job_result: List[GeneratedImageResult] | None class JobQueueInfo(BaseModel): diff --git a/fooocusapi/parameters.py b/fooocusapi/parameters.py index a083855..0401486 100644 --- a/fooocusapi/parameters.py +++ b/fooocusapi/parameters.py @@ -58,7 +58,7 @@ class GenerationFinishReason(str, Enum): class ImageGenerationResult(object): - def __init__(self, im: np.ndarray | None, seed: int, finish_reason: GenerationFinishReason): + def __init__(self, im: str | None, seed: int, finish_reason: GenerationFinishReason): self.im = im self.seed = seed self.finish_reason = finish_reason diff --git a/fooocusapi/worker.py b/fooocusapi/worker.py index b8c322d..11b307d 100644 --- a/fooocusapi/worker.py +++ b/fooocusapi/worker.py @@ -4,6 +4,7 @@ import numpy as np import torch from typing import List +from fooocusapi.file_utils import save_output_file from fooocusapi.parameters import inpaint_model_version, GenerationFinishReason, ImageGenerationParams, ImageGenerationResult from fooocusapi.task_queue import QueueTask, TaskQueue @@ -44,7 +45,8 @@ def make_results_from_outputs(): if item[0] == 'results': for im in item[1]: if isinstance(im, np.ndarray): - results.append(ImageGenerationResult(im=im, seed=seed, finish_reason=GenerationFinishReason.success)) + img_filename = save_output_file(im) + results.append(ImageGenerationResult(im=img_filename, seed=seed, finish_reason=GenerationFinishReason.success)) queue_task.set_result(results, False) task_queue.finish_task(queue_task.seq) print(f"[Task Queue] Finish task, seq={queue_task.seq}") @@ -593,6 +595,7 @@ def callback(step, x0, x, total_steps, y): if inpaint_worker.current_task is not None: imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] + img_filenames = [] for x in imgs: d = [ ('Prompt', task['log_positive_prompt']), @@ -615,11 +618,13 @@ def callback(step, x0, x, total_steps, y): d.append((f'LoRA [{n}] weight', w)) if save_log: log(x, d, single_line_number=3) + img_filename = save_output_file(x) + img_filenames.append(img_filename) # Fooocus async_worker.py code end results.append(ImageGenerationResult( - im=imgs[0], seed=task['task_seed'], finish_reason=GenerationFinishReason.success)) + im=img_filenames[0], seed=task['task_seed'], finish_reason=GenerationFinishReason.success)) except Exception as e: print('Process error:', e) results.append(ImageGenerationResult( diff --git a/main.py b/main.py index 3beb6ea..0ff6c1a 100644 --- a/main.py +++ b/main.py @@ -253,6 +253,12 @@ def prepare_environments(args) -> bool: if not skip_sync_repo: download_repositories() + if args.base_url is None or len(args.base_url.strip()) == 0: + host = args.host + if host == '0.0.0.0': + host = '127.0.0.1' + args.base_url = f"http://{host}:{args.port}" + # Add dependent repositories to import path sys.path.append(script_path) fooocus_path = os.path.join(script_path, dir_repos, fooocus_name) @@ -275,11 +281,12 @@ def prepare_environments(args) -> bool: def pre_setup(skip_sync_repo: bool=False, disable_private_log: bool=False, load_all_models: bool=False, preload_pipeline: bool=False): class Args(object): + base_url = None sync_repo = None disable_private_log = False preload_pipeline = False queue_size = 3 - queue_history = 6 + queue_history = 100 print("[Pre Setup] Prepare environments") @@ -316,6 +323,7 @@ def ini_cbh_args(): help="Set the listen port, default: 8888") parser.add_argument("--host", type=str, default='127.0.0.1', help="Set the listen host, default: 127.0.0.1") + parser.add_argument("--base-url", type=str, default=None, help="Set base url for outside visit, default is http://host:port") parser.add_argument("--log-level", type=str, default='info', help="Log info for Uvicorn, default: info") parser.add_argument("--sync-repo", default=None, @@ -323,7 +331,7 @@ def ini_cbh_args(): parser.add_argument("--disable-private-log", default=False, action="store_true", help="Disable Fooocus private log, won't save output files (include generated image files)") parser.add_argument("--preload-pipeline", default=False, action="store_true", help="Preload pipeline before start http server") parser.add_argument("--queue-size", type=int, default=3, help="Working queue size, default: 3, generation requests exceeding working queue size will return failure") - parser.add_argument("--queue-history", type=int, default=6, help="Finished jobs reserve in memory size, default: 6") + parser.add_argument("--queue-history", type=int, default=100, help="Finished jobs reserve in memory size, default: 100") args = parser.parse_args()