Skip to content

Commit

Permalink
Merge pull request #109 from ototadana/feature/use-upscaler
Browse files Browse the repository at this point in the history
add upscaler option
  • Loading branch information
ototadana authored Jul 2, 2023
2 parents 6a3c0f5 + 4bfe12d commit aeefdc4
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 20 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ The `Blur` face processor is employed here, which, as the name suggests, applies
As a result, the entire area of each detected face gets blurred. This can be useful in situations where you need to anonymize faces in images for privacy reasons.


#### Example 6: Different Processing Based on Face Position
### Example 6: Different Processing Based on Face Position

This workflow employs the `RetinaFace` face detector and applies different processing depending on the position of the detected faces in the image.

Expand Down
13 changes: 9 additions & 4 deletions scripts/entities/face.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import cv2
import numpy as np
from modules import images
from PIL import Image

from scripts.entities.option import Option
from scripts.entities.rect import Rect


class Face:
def __init__(self, entire_image: np.ndarray, face_area: Rect, face_margin: float, face_size: int):
def __init__(self, entire_image: np.ndarray, face_area: Rect, face_margin: float, face_size: int, upscaler: str):
self.face_area = face_area
self.center = face_area.center
left, top, right, bottom = face_area.to_square()
Expand All @@ -18,7 +20,7 @@ def __init__(self, entire_image: np.ndarray, face_area: Rect, face_margin: float
self.width = self.right - self.left
self.height = self.bottom - self.top

self.image = self.__crop_face_image(entire_image, face_size)
self.image = self.__crop_face_image(entire_image, face_size, upscaler)
self.face_area_on_image = self.__get_face_area_on_image(face_size)

def __get_face_area_on_image(self, face_size: int):
Expand All @@ -30,9 +32,12 @@ def __get_face_area_on_image(self, face_size: int):
int((self.face_area.bottom - self.top) * scaleFactor),
)

def __crop_face_image(self, entire_image: np.ndarray, face_size: int):
def __crop_face_image(self, entire_image: np.ndarray, face_size: int, upscaler: str):
cropped = entire_image[self.top : self.bottom, self.left : self.right, :]
return Image.fromarray(cv2.resize(cropped, dsize=(face_size, face_size)))
if upscaler and upscaler != Option.DEFAULT_UPSCALER:
return images.resize_image(0, Image.fromarray(cropped), face_size, face_size, upscaler)
else:
return Image.fromarray(cv2.resize(cropped, dsize=(face_size, face_size)))

def __ensure_margin(self, left: int, top: int, right: int, bottom: int, entire_image: np.ndarray, margin: float):
entire_height, entire_width = entire_image.shape[:2]
Expand Down
5 changes: 5 additions & 0 deletions scripts/entities/option.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ class Option:
DEFAULT_IGNORE_LARGER_FACES = True
DEFAULT_AFFECTED_AREAS = ["Face"]
DEFAULT_WORKFLOW = open(os.path.join(workflows_dir, "default.json")).read()
DEFAULT_UPSCALER = "None"

def __init__(self, *args) -> None:
self.extra_options: Dict[str, Dict[str, str]] = {}
Expand All @@ -44,6 +45,7 @@ def __init__(self, *args) -> None:
self.affected_areas = Option.DEFAULT_AFFECTED_AREAS
self.show_original_image = Option.DEFAULT_SHOW_ORIGINAL_IMAGE
self.workflow = Option.DEFAULT_WORKFLOW
self.upscaler = Option.DEFAULT_UPSCALER

if len(args) > 0 and isinstance(args[0], dict):
self.update_by_dict(args[0])
Expand Down Expand Up @@ -78,6 +80,7 @@ def update_by_list(self, args: tuple) -> None:
self.affected_areas = args[15] if arg_len > 15 and isinstance(args[15], list) else self.affected_areas
self.show_original_image = args[16] if arg_len > 16 and isinstance(args[16], bool) else self.show_original_image
self.workflow = args[17] if arg_len > 17 and isinstance(args[17], str) else self.workflow
self.upscaler = args[18] if arg_len > 18 and isinstance(args[18], str) else self.upscaler

def update_by_dict(self, params: dict) -> None:
self.face_margin = params.get("face_margin", self.face_margin)
Expand All @@ -98,6 +101,7 @@ def update_by_dict(self, params: dict) -> None:
self.affected_areas = params.get("affected_areas", self.affected_areas)
self.show_original_image = params.get("show_original_image", self.show_original_image)
self.workflow = params.get("workflow", self.workflow)
self.upscaler = params.get("upscaler", self.upscaler)

for k, v in params.items():
if isinstance(v, dict):
Expand All @@ -121,6 +125,7 @@ def to_dict(self) -> dict:
Option.add_prefix("ignore_larger_faces"): self.ignore_larger_faces,
Option.add_prefix("affected_areas"): str.join(";", self.affected_areas),
Option.add_prefix("workflow"): self.workflow,
Option.add_prefix("upscaler"): self.upscaler,
}

for option_group_name, options in self.extra_options.items():
Expand Down
11 changes: 11 additions & 0 deletions scripts/ui/ui_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,16 @@ def __build(self, workflow_selector: gr.Dropdown):
)
self.infotext_fields.append((ignore_larger_faces, Option.add_prefix("ignore_larger_faces")))

upscalers = [upscaler.name for upscaler in shared.sd_upscalers]
if Option.DEFAULT_UPSCALER not in upscalers:
upscalers.append(Option.DEFAULT_UPSCALER)
upscaler = gr.Dropdown(
label="Upscaler",
choices=[upscaler.name for upscaler in shared.sd_upscalers],
value=Option.DEFAULT_UPSCALER,
)
self.infotext_fields.append((upscaler, Option.add_prefix("upscaler")))

with gr.Accordion("(3) Recreate the Faces", open=False):
strength1 = gr.Slider(
minimum=0.1,
Expand Down Expand Up @@ -162,6 +172,7 @@ def __build(self, workflow_selector: gr.Dropdown):
affected_areas,
show_original_image,
workflow,
upscaler,
]


Expand Down
37 changes: 28 additions & 9 deletions scripts/use_cases/image_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,14 @@ def proc_image(
if wildcards_script is not None:
p.prompt = self.__apply_wildcards(wildcards_script, p.prompt, i)

jobs = self.workflow.select_jobs(faces, i, entire_width, entire_height)
rule = self.workflow.select_rule(faces, i, entire_width, entire_height)

if len(jobs) == 0:
if rule is None or len(rule.then) == 0:
continue
jobs = rule.then

if option.show_intermediate_steps:
original_face = np.array(face.image.copy())

proc_image = self.workflow.process(jobs, face, p, option)
if proc_image is None:
Expand All @@ -144,13 +148,20 @@ def proc_image(
mask_image = self.workflow.generate_mask(jobs, face_image, face, option)

if option.show_intermediate_steps:
feature = self.__get_feature(p.prompt, entire_prompt)
coverage = MaskGenerator.calculate_mask_coverage(mask_image) * 100
mask_info = f"size:{option.mask_size}, blur:{option.mask_blur}, cov:{coverage:.0f}%"
tag = f"{face.face_area.tag} ({face.face_area.width}x{face.face_area.height})"
upscaler_name = option.upscaler if option.upscaler != Option.DEFAULT_UPSCALER else ""
output_images.append(
Image.fromarray(self.__add_comment(self.__add_comment(face_image, feature), tag, True))
Image.fromarray(self.__add_comment(self.__add_comment(original_face, upscaler_name), tag, True))
)

feature = self.__get_feature(p.prompt, entire_prompt)
criteria = rule.when.criteria if rule.when is not None and rule.when.criteria is not None else ""
output_images.append(
Image.fromarray(self.__add_comment(self.__add_comment(face_image, feature), criteria, True))
)

coverage = MaskGenerator.calculate_mask_coverage(mask_image) * 100
mask_info = f"size:{option.mask_size}, blur:{option.mask_blur}, cov:{coverage:.0f}%"
output_images.append(
Image.fromarray(self.__add_comment(self.__to_masked_image(mask_image, face_image), mask_info))
)
Expand Down Expand Up @@ -314,16 +325,24 @@ def __to_masked_image(self, mask_image: np.ndarray, image: np.ndarray) -> np.nda

def __crop_face(self, image: Image, option: Option) -> List[Face]:
face_areas = self.workflow.detect_faces(image, option)
return self.__crop(image, face_areas, option.face_margin, option.face_size, option.ignore_larger_faces)
return self.__crop(
image, face_areas, option.face_margin, option.face_size, option.ignore_larger_faces, option.upscaler
)

def __crop(
self, image: Image, face_areas: List[Rect], face_margin: float, face_size: int, ignore_larger_faces: bool
self,
image: Image,
face_areas: List[Rect],
face_margin: float,
face_size: int,
ignore_larger_faces: bool,
upscaler: str,
) -> List[Face]:
image = np.array(image, dtype=np.uint8)

areas: List[Face] = []
for face_area in face_areas:
face = Face(image, face_area, face_margin, face_size)
face = Face(image, face_area, face_margin, face_size, upscaler)
if ignore_larger_faces and face.width > face_size:
continue
areas.append(face)
Expand Down
12 changes: 6 additions & 6 deletions scripts/use_cases/workflow_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from modules.processing import StableDiffusionProcessingImg2Img
from PIL import Image

from scripts.entities.definitions import Condition, Job, Workflow
from scripts.entities.definitions import Condition, Job, Rule, Workflow
from scripts.entities.face import Face
from scripts.entities.option import Option
from scripts.entities.rect import Rect
Expand Down Expand Up @@ -44,20 +44,20 @@ def detect_faces(self, image: Image, option: Option) -> List[Rect]:

return results

def select_jobs(self, faces: List[Face], index: int, width: int, height: int) -> List[Job]:
def select_rule(self, faces: List[Face], index: int, width: int, height: int) -> Rule:
if faces[index].face_area is None:
return []
return None

for rule in self.workflow.rules:
if rule.when is None:
return rule.then
return rule

if self.__is_tag_match(rule.when, faces[index]) and self.__is_criteria_match(
rule.when, faces, index, width, height
):
return rule.then
return rule

return []
return None

def __is_tag_match(self, condition: Condition, face: Face) -> bool:
if condition.tag is None or len(condition.tag) == 0:
Expand Down

0 comments on commit aeefdc4

Please sign in to comment.