Skip to content

Commit

Permalink
Add url field for generation apis
Browse files Browse the repository at this point in the history
  • Loading branch information
konieshadow committed Oct 25, 2023
1 parent da5d102 commit c7a10b3
Show file tree
Hide file tree
Showing 9 changed files with 129 additions and 33 deletions.
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,11 @@ You can import it in [Swagger-UI](https://swagger.io/tools/swagger-ui/) editor.

All the generation api support for response in PNG bytes directly when request's 'Accept' header is 'image/png'.

All the generation api support async process by pass parameter `async_process`` to true. And then use query job api to retrieve progress and generation results.
All the generation api support async process by pass parameter `async_process` to true. And then use query job api to retrieve progress and generation results.

Break change from v0.3.0:
* The generation apis won't return `base64` field expect request parameters set `require_base64` to true.
* The generation apis return a `url` field where the generated image can be requested via a static file url.

#### Text to Image
> POST /v1/generation/text-to-image
Expand Down
2 changes: 1 addition & 1 deletion fooocus_api_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '0.2.5'
version = '0.3.0'
39 changes: 25 additions & 14 deletions fooocusapi/api.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
from typing import List, Optional
from fastapi import Depends, FastAPI, Header, Query, Response, UploadFile
from fastapi.params import File
from fastapi.staticfiles import StaticFiles
import uvicorn
from fooocusapi.api_utils import generation_output, req_to_params
from fooocusapi.models import AllModelNamesResponse, AsyncJobResponse, GeneratedImageBase64, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, JobQueueInfo, Text2ImgRequest
import fooocusapi.file_utils as file_utils
from fooocusapi.models import AllModelNamesResponse, AsyncJobResponse, GeneratedImageResult, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, JobQueueInfo, Text2ImgRequest
from fooocusapi.parameters import GenerationFinishReason, ImageGenerationResult
from fooocusapi.task_queue import TaskType
from fooocusapi.worker import process_generate, task_queue
from concurrent.futures import ThreadPoolExecutor

app = FastAPI()

work_executor = ThreadPoolExecutor(max_workers=task_queue.queue_size*2, thread_name_prefix="worker_")
work_executor = ThreadPoolExecutor(
max_workers=task_queue.queue_size*2, thread_name_prefix="worker_")

img_generate_responses = {
"200": {
Expand All @@ -37,6 +40,7 @@
}
}


def call_worker(req: Text2ImgRequest, accept: str):
task_type = TaskType.text_2_img
if isinstance(req, ImgUpscaleOrVaryRequest):
Expand All @@ -47,12 +51,13 @@ def call_worker(req: Text2ImgRequest, accept: str):
task_type = TaskType.img_prompt

params = req_to_params(req)
queue_task = task_queue.add_task(task_type, {'params': params.__dict__, 'accept': accept})
queue_task = task_queue.add_task(
task_type, {'params': params.__dict__, 'accept': accept, 'require_base64': req.require_base64})

if queue_task is None:
print("[Task Queue] The task queue has reached limit")
results = [ImageGenerationResult(im=None, seed=0,
finish_reason=GenerationFinishReason.queue_is_full)]
finish_reason=GenerationFinishReason.queue_is_full)]
elif req.async_process:
work_executor.submit(process_generate, queue_task, params)
results = queue_task
Expand All @@ -61,12 +66,13 @@ def call_worker(req: Text2ImgRequest, accept: str):

return results


@app.get("/")
def home():
return Response(content='Swagger-UI to: <a href="/docs">/docs</a>', media_type="text/html")


@app.post("/v1/generation/text-to-image", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses)
@app.post("/v1/generation/text-to-image", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
def text2img_generation(req: Text2ImgRequest, accept: str = Header(None),
accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
if accept_query is not None and len(accept_query) > 0:
Expand All @@ -80,10 +86,10 @@ def text2img_generation(req: Text2ImgRequest, accept: str = Header(None),
streaming_output = False

results = call_worker(req, accept)
return generation_output(results, streaming_output)
return generation_output(results, streaming_output, req.require_base64)


@app.post("/v1/generation/image-upscale-vary", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses)
@app.post("/v1/generation/image-upscale-vary", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
def img_upscale_or_vary(input_image: UploadFile, req: ImgUpscaleOrVaryRequest = Depends(ImgUpscaleOrVaryRequest.as_form),
accept: str = Header(None),
accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
Expand All @@ -98,10 +104,10 @@ def img_upscale_or_vary(input_image: UploadFile, req: ImgUpscaleOrVaryRequest =
streaming_output = False

results = call_worker(req, accept)
return generation_output(results, streaming_output)
return generation_output(results, streaming_output, req.require_base64)


@app.post("/v1/generation/image-inpait-outpaint", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses)
@app.post("/v1/generation/image-inpait-outpaint", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
def img_inpaint_or_outpaint(input_image: UploadFile, req: ImgInpaintOrOutpaintRequest = Depends(ImgInpaintOrOutpaintRequest.as_form),
accept: str = Header(None),
accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
Expand All @@ -116,10 +122,10 @@ def img_inpaint_or_outpaint(input_image: UploadFile, req: ImgInpaintOrOutpaintRe
streaming_output = False

results = call_worker(req, accept)
return generation_output(results, streaming_output)
return generation_output(results, streaming_output, req.require_base64)


@app.post("/v1/generation/image-prompt", response_model=List[GeneratedImageBase64] | AsyncJobResponse, responses=img_generate_responses)
@app.post("/v1/generation/image-prompt", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
def img_prompt(cn_img1: Optional[UploadFile] = File(None),
req: ImgPromptRequest = Depends(ImgPromptRequest.as_form),
accept: str = Header(None),
Expand All @@ -135,16 +141,16 @@ def img_prompt(cn_img1: Optional[UploadFile] = File(None),
streaming_output = False

results = call_worker(req, accept)
return generation_output(results, streaming_output)
return generation_output(results, streaming_output, req.require_base64)


@app.get("/v1/generation/query-job", response_model=AsyncJobResponse, description="Query async generation job")
def query_job(job_id: int):
queue_task = task_queue.get_task(job_id, True)
if queue_task is None:
return Response(content="Job not found", status_code=404)
return generation_output(queue_task, False)

return generation_output(queue_task, False, False)


@app.get("/v1/generation/job-queue", response_model=JobQueueInfo, description="Query job queue info")
Expand All @@ -170,6 +176,11 @@ def all_styles():
from modules.sdxl_styles import legal_style_names
return legal_style_names


app.mount("/files", StaticFiles(directory=file_utils.output_dir), name="files")


def start_app(args):
file_utils.static_serve_base_url = args.base_url + "/files/"
uvicorn.run("fooocusapi.api:app", host=args.host,
port=args.port, log_level=args.log_level)
21 changes: 15 additions & 6 deletions fooocusapi/api_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import numpy as np
from fastapi import Response, UploadFile
from PIL import Image
from fooocusapi.models import AsyncJobResponse, AsyncJobStage, GeneratedImageBase64, GenerationFinishReason, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, Text2ImgRequest
from fooocusapi.file_utils import get_file_serve_url, output_file_to_base64img, output_file_to_bytesimg
from fooocusapi.models import AsyncJobResponse, AsyncJobStage, GeneratedImageResult, GenerationFinishReason, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, Text2ImgRequest
from fooocusapi.parameters import ImageGenerationParams, ImageGenerationResult
from fooocusapi.task_queue import QueueTask, TaskType
import modules.flags as flags
Expand Down Expand Up @@ -104,7 +105,7 @@ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams:
)


def generation_output(results: QueueTask | List[ImageGenerationResult], streaming_output: bool) -> Response | List[GeneratedImageBase64] | AsyncJobResponse:
def generation_output(results: QueueTask | List[ImageGenerationResult], streaming_output: bool, require_base64: bool) -> Response | List[GeneratedImageResult] | AsyncJobResponse:
if isinstance(results, QueueTask):
task = results
job_stage = AsyncJobStage.running
Expand All @@ -117,7 +118,11 @@ def generation_output(results: QueueTask | List[ImageGenerationResult], streamin
else:
if task.task_result != None:
job_stage = AsyncJobStage.success
job_result = generation_output(task.task_result, False)
task_result_require_base64 = False
if 'require_base64' in task.req_param and task.req_param['require_base64']:
task_result_require_base64 = True

job_result = generation_output(task.task_result, False, task_result_require_base64)
return AsyncJobResponse(job_id=task.seq,
job_type=task.type,
job_stage=job_stage,
Expand All @@ -128,11 +133,15 @@ def generation_output(results: QueueTask | List[ImageGenerationResult], streamin
if streaming_output:
if len(results) == 0 or results[0].finish_reason != GenerationFinishReason.success:
return Response(status_code=500)
bytes = narray_to_bytesimg(results[0].im)
bytes = output_file_to_bytesimg(results[0].im)
return Response(bytes, media_type='image/png')
else:
results = [GeneratedImageBase64(base64=narray_to_base64img(
item.im), seed=item.seed, finish_reason=item.finish_reason) for item in results]
results = [GeneratedImageResult(
base64=output_file_to_base64img(
item.im) if require_base64 else None,
url=get_file_serve_url(item.im),
seed=item.seed,
finish_reason=item.finish_reason) for item in results]
return results


Expand Down
54 changes: 54 additions & 0 deletions fooocusapi/file_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import base64
import datetime
from io import BytesIO
import os
import numpy as np
from PIL import Image
import uuid

output_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'outputs', 'files'))
os.makedirs(output_dir, exist_ok=True)

static_serve_base_url = 'http://127.0.0.1:8888/files/'


def save_output_file(img: np.ndarray) -> str:
current_time = datetime.datetime.now()
date_string = current_time.strftime("%Y-%m-%d")

filename = os.path.join(date_string, str(uuid.uuid4()) + '.png')
file_path = os.path.join(output_dir, filename)

os.makedirs(os.path.dirname(file_path), exist_ok=True)
Image.fromarray(img).save(file_path)
return filename


def output_file_to_base64img(filename: str) -> str:
file_path = os.path.join(output_dir, filename)
if not os.path.exists(file_path) or not os.path.isfile(file_path):
return None

img = Image.open(file_path)
output_buffer = BytesIO()
img.save(output_buffer, format='PNG')
byte_data = output_buffer.getvalue()
base64_str = base64.b64encode(byte_data)
return base64_str


def output_file_to_bytesimg(filename: str) -> bytes:
file_path = os.path.join(output_dir, filename)
if not os.path.exists(file_path) or not os.path.isfile(file_path):
return None

img = Image.open(file_path)
output_buffer = BytesIO()
img.save(output_buffer, format='PNG')
byte_data = output_buffer.getvalue()
return byte_data


def get_file_serve_url(filename: str) -> str:
return static_serve_base_url + filename
17 changes: 11 additions & 6 deletions fooocusapi/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ class Text2ImgRequest(BaseModel):
refiner_model_name: str = 'sd_xl_refiner_1.0_0.9vae.safetensors'
loras: List[Lora] = Field(default=[
Lora(model_name='sd_xl_offset_example-lora_1.0.safetensors', weight=0.5)])
require_base64: bool = Field(default=False, description="Return base64 data of generated image")
async_process: bool = Field(default=False, description="Set to true will run async and return job info for retrieve generataion result later")


Expand Down Expand Up @@ -135,6 +136,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsa
w4: float = Form(default=0.5, ge=-2, le=2),
l5: str | None = Form(None),
w5: float = Form(default=0.5, ge=-2, le=2),
require_base64: bool = Form(default=False, description="Return base64 data of generated image"),
async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"),
):
style_selection_arr: List[str] = []
Expand All @@ -155,7 +157,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsa
performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection,
image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale,
base_model_name=base_model_name, refiner_model_name=refiner_model_name,
loras=loras, async_process=async_process)
loras=loras, require_base64=require_base64, async_process=async_process)


class ImgInpaintOrOutpaintRequest(Text2ImgRequest):
Expand Down Expand Up @@ -196,6 +198,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpa
w4: float = Form(default=0.5, ge=-2, le=2),
l5: str | None = Form(None),
w5: float = Form(default=0.5, ge=-2, le=2),
require_base64: bool = Form(default=False, description="Return base64 data of generated image"),
async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"),
):

Expand Down Expand Up @@ -232,7 +235,7 @@ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpa
performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection,
image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale,
base_model_name=base_model_name, refiner_model_name=refiner_model_name,
loras=loras, async_process=async_process)
loras=loras, require_base64=require_base64, async_process=async_process)


class ImgPromptRequest(Text2ImgRequest):
Expand Down Expand Up @@ -297,6 +300,7 @@ def as_form(cls, cn_img1: UploadFile = Form(File(None), description="Input image
w4: float = Form(default=0.5, ge=-2, le=2),
l5: str | None = Form(None),
w5: float = Form(default=0.5, ge=-2, le=2),
require_base64: bool = Form(default=False, description="Return base64 data of generated image"),
async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"),
):
if isinstance(cn_img1, File):
Expand Down Expand Up @@ -338,12 +342,13 @@ def as_form(cls, cn_img1: UploadFile = Form(File(None), description="Input image
performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection,
image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale,
base_model_name=base_model_name, refiner_model_name=refiner_model_name,
loras=loras, async_process=async_process)
loras=loras, require_base64=require_base64, async_process=async_process)


class GeneratedImageBase64(BaseModel):
class GeneratedImageResult(BaseModel):
base64: str | None = Field(
description="Image encoded in base64, or null if finishReasen is not 'SUCCESS'")
description="Image encoded in base64, or null if finishReasen is not 'SUCCESS', only return when request require base64")
url: str | None = Field(description="Image file static serve url, or null if finishReasen is not 'SUCCESS'")
seed: int = Field(description="The seed associated with this image")
finish_reason: GenerationFinishReason

Expand All @@ -361,7 +366,7 @@ class AsyncJobResponse(BaseModel):
job_stage: AsyncJobStage
job_progess: int
job_status: str | None
job_result: List[GeneratedImageBase64] | None
job_result: List[GeneratedImageResult] | None


class JobQueueInfo(BaseModel):
Expand Down
2 changes: 1 addition & 1 deletion fooocusapi/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class GenerationFinishReason(str, Enum):


class ImageGenerationResult(object):
def __init__(self, im: np.ndarray | None, seed: int, finish_reason: GenerationFinishReason):
def __init__(self, im: str | None, seed: int, finish_reason: GenerationFinishReason):
self.im = im
self.seed = seed
self.finish_reason = finish_reason
Expand Down
9 changes: 7 additions & 2 deletions fooocusapi/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy as np
import torch
from typing import List
from fooocusapi.file_utils import save_output_file
from fooocusapi.parameters import inpaint_model_version, GenerationFinishReason, ImageGenerationParams, ImageGenerationResult
from fooocusapi.task_queue import QueueTask, TaskQueue

Expand Down Expand Up @@ -44,7 +45,8 @@ def make_results_from_outputs():
if item[0] == 'results':
for im in item[1]:
if isinstance(im, np.ndarray):
results.append(ImageGenerationResult(im=im, seed=seed, finish_reason=GenerationFinishReason.success))
img_filename = save_output_file(im)
results.append(ImageGenerationResult(im=img_filename, seed=seed, finish_reason=GenerationFinishReason.success))
queue_task.set_result(results, False)
task_queue.finish_task(queue_task.seq)
print(f"[Task Queue] Finish task, seq={queue_task.seq}")
Expand Down Expand Up @@ -593,6 +595,7 @@ def callback(step, x0, x, total_steps, y):
if inpaint_worker.current_task is not None:
imgs = [inpaint_worker.current_task.post_process(x) for x in imgs]

img_filenames = []
for x in imgs:
d = [
('Prompt', task['log_positive_prompt']),
Expand All @@ -615,11 +618,13 @@ def callback(step, x0, x, total_steps, y):
d.append((f'LoRA [{n}] weight', w))
if save_log:
log(x, d, single_line_number=3)
img_filename = save_output_file(x)
img_filenames.append(img_filename)

# Fooocus async_worker.py code end

results.append(ImageGenerationResult(
im=imgs[0], seed=task['task_seed'], finish_reason=GenerationFinishReason.success))
im=img_filenames[0], seed=task['task_seed'], finish_reason=GenerationFinishReason.success))
except Exception as e:
print('Process error:', e)
results.append(ImageGenerationResult(
Expand Down
Loading

0 comments on commit c7a10b3

Please sign in to comment.