diff --git a/README.md b/README.md index 51f5377..32b84aa 100644 --- a/README.md +++ b/README.md @@ -353,7 +353,7 @@ The `Blur` face processor is employed here, which, as the name suggests, applies As a result, the entire area of each detected face gets blurred. This can be useful in situations where you need to anonymize faces in images for privacy reasons. -#### Example 6: Different Processing Based on Face Position +### Example 6: Different Processing Based on Face Position This workflow employs the `RetinaFace` face detector and applies different processing depending on the position of the detected faces in the image. diff --git a/scripts/entities/face.py b/scripts/entities/face.py index ca77421..a3f162b 100644 --- a/scripts/entities/face.py +++ b/scripts/entities/face.py @@ -1,12 +1,14 @@ import cv2 import numpy as np +from modules import images from PIL import Image +from scripts.entities.option import Option from scripts.entities.rect import Rect class Face: - def __init__(self, entire_image: np.ndarray, face_area: Rect, face_margin: float, face_size: int): + def __init__(self, entire_image: np.ndarray, face_area: Rect, face_margin: float, face_size: int, upscaler: str): self.face_area = face_area self.center = face_area.center left, top, right, bottom = face_area.to_square() @@ -18,7 +20,7 @@ def __init__(self, entire_image: np.ndarray, face_area: Rect, face_margin: float self.width = self.right - self.left self.height = self.bottom - self.top - self.image = self.__crop_face_image(entire_image, face_size) + self.image = self.__crop_face_image(entire_image, face_size, upscaler) self.face_area_on_image = self.__get_face_area_on_image(face_size) def __get_face_area_on_image(self, face_size: int): @@ -30,9 +32,12 @@ def __get_face_area_on_image(self, face_size: int): int((self.face_area.bottom - self.top) * scaleFactor), ) - def __crop_face_image(self, entire_image: np.ndarray, face_size: int): + def __crop_face_image(self, entire_image: np.ndarray, face_size: int, upscaler: str): cropped = entire_image[self.top : self.bottom, self.left : self.right, :] - return Image.fromarray(cv2.resize(cropped, dsize=(face_size, face_size))) + if upscaler and upscaler != Option.DEFAULT_UPSCALER: + return images.resize_image(0, Image.fromarray(cropped), face_size, face_size, upscaler) + else: + return Image.fromarray(cv2.resize(cropped, dsize=(face_size, face_size))) def __ensure_margin(self, left: int, top: int, right: int, bottom: int, entire_image: np.ndarray, margin: float): entire_height, entire_width = entire_image.shape[:2] diff --git a/scripts/entities/option.py b/scripts/entities/option.py index 5b8df46..f19a0f4 100644 --- a/scripts/entities/option.py +++ b/scripts/entities/option.py @@ -23,6 +23,7 @@ class Option: DEFAULT_IGNORE_LARGER_FACES = True DEFAULT_AFFECTED_AREAS = ["Face"] DEFAULT_WORKFLOW = open(os.path.join(workflows_dir, "default.json")).read() + DEFAULT_UPSCALER = "None" def __init__(self, *args) -> None: self.extra_options: Dict[str, Dict[str, str]] = {} @@ -44,6 +45,7 @@ def __init__(self, *args) -> None: self.affected_areas = Option.DEFAULT_AFFECTED_AREAS self.show_original_image = Option.DEFAULT_SHOW_ORIGINAL_IMAGE self.workflow = Option.DEFAULT_WORKFLOW + self.upscaler = Option.DEFAULT_UPSCALER if len(args) > 0 and isinstance(args[0], dict): self.update_by_dict(args[0]) @@ -78,6 +80,7 @@ def update_by_list(self, args: tuple) -> None: self.affected_areas = args[15] if arg_len > 15 and isinstance(args[15], list) else self.affected_areas self.show_original_image = args[16] if arg_len > 16 and isinstance(args[16], bool) else self.show_original_image self.workflow = args[17] if arg_len > 17 and isinstance(args[17], str) else self.workflow + self.upscaler = args[18] if arg_len > 18 and isinstance(args[18], str) else self.upscaler def update_by_dict(self, params: dict) -> None: self.face_margin = params.get("face_margin", self.face_margin) @@ -98,6 +101,7 @@ def update_by_dict(self, params: dict) -> None: self.affected_areas = params.get("affected_areas", self.affected_areas) self.show_original_image = params.get("show_original_image", self.show_original_image) self.workflow = params.get("workflow", self.workflow) + self.upscaler = params.get("upscaler", self.upscaler) for k, v in params.items(): if isinstance(v, dict): @@ -121,6 +125,7 @@ def to_dict(self) -> dict: Option.add_prefix("ignore_larger_faces"): self.ignore_larger_faces, Option.add_prefix("affected_areas"): str.join(";", self.affected_areas), Option.add_prefix("workflow"): self.workflow, + Option.add_prefix("upscaler"): self.upscaler, } for option_group_name, options in self.extra_options.items(): diff --git a/scripts/ui/ui_builder.py b/scripts/ui/ui_builder.py index fab6b53..39a8a4f 100644 --- a/scripts/ui/ui_builder.py +++ b/scripts/ui/ui_builder.py @@ -108,6 +108,16 @@ def __build(self, workflow_selector: gr.Dropdown): ) self.infotext_fields.append((ignore_larger_faces, Option.add_prefix("ignore_larger_faces"))) + upscalers = [upscaler.name for upscaler in shared.sd_upscalers] + if Option.DEFAULT_UPSCALER not in upscalers: + upscalers.append(Option.DEFAULT_UPSCALER) + upscaler = gr.Dropdown( + label="Upscaler", + choices=[upscaler.name for upscaler in shared.sd_upscalers], + value=Option.DEFAULT_UPSCALER, + ) + self.infotext_fields.append((upscaler, Option.add_prefix("upscaler"))) + with gr.Accordion("(3) Recreate the Faces", open=False): strength1 = gr.Slider( minimum=0.1, @@ -162,6 +172,7 @@ def __build(self, workflow_selector: gr.Dropdown): affected_areas, show_original_image, workflow, + upscaler, ] diff --git a/scripts/use_cases/image_processor.py b/scripts/use_cases/image_processor.py index 3bc3c6f..91fb269 100644 --- a/scripts/use_cases/image_processor.py +++ b/scripts/use_cases/image_processor.py @@ -128,10 +128,14 @@ def proc_image( if wildcards_script is not None: p.prompt = self.__apply_wildcards(wildcards_script, p.prompt, i) - jobs = self.workflow.select_jobs(faces, i, entire_width, entire_height) + rule = self.workflow.select_rule(faces, i, entire_width, entire_height) - if len(jobs) == 0: + if rule is None or len(rule.then) == 0: continue + jobs = rule.then + + if option.show_intermediate_steps: + original_face = np.array(face.image.copy()) proc_image = self.workflow.process(jobs, face, p, option) if proc_image is None: @@ -144,13 +148,20 @@ def proc_image( mask_image = self.workflow.generate_mask(jobs, face_image, face, option) if option.show_intermediate_steps: - feature = self.__get_feature(p.prompt, entire_prompt) - coverage = MaskGenerator.calculate_mask_coverage(mask_image) * 100 - mask_info = f"size:{option.mask_size}, blur:{option.mask_blur}, cov:{coverage:.0f}%" tag = f"{face.face_area.tag} ({face.face_area.width}x{face.face_area.height})" + upscaler_name = option.upscaler if option.upscaler != Option.DEFAULT_UPSCALER else "" output_images.append( - Image.fromarray(self.__add_comment(self.__add_comment(face_image, feature), tag, True)) + Image.fromarray(self.__add_comment(self.__add_comment(original_face, upscaler_name), tag, True)) ) + + feature = self.__get_feature(p.prompt, entire_prompt) + criteria = rule.when.criteria if rule.when is not None and rule.when.criteria is not None else "" + output_images.append( + Image.fromarray(self.__add_comment(self.__add_comment(face_image, feature), criteria, True)) + ) + + coverage = MaskGenerator.calculate_mask_coverage(mask_image) * 100 + mask_info = f"size:{option.mask_size}, blur:{option.mask_blur}, cov:{coverage:.0f}%" output_images.append( Image.fromarray(self.__add_comment(self.__to_masked_image(mask_image, face_image), mask_info)) ) @@ -314,16 +325,24 @@ def __to_masked_image(self, mask_image: np.ndarray, image: np.ndarray) -> np.nda def __crop_face(self, image: Image, option: Option) -> List[Face]: face_areas = self.workflow.detect_faces(image, option) - return self.__crop(image, face_areas, option.face_margin, option.face_size, option.ignore_larger_faces) + return self.__crop( + image, face_areas, option.face_margin, option.face_size, option.ignore_larger_faces, option.upscaler + ) def __crop( - self, image: Image, face_areas: List[Rect], face_margin: float, face_size: int, ignore_larger_faces: bool + self, + image: Image, + face_areas: List[Rect], + face_margin: float, + face_size: int, + ignore_larger_faces: bool, + upscaler: str, ) -> List[Face]: image = np.array(image, dtype=np.uint8) areas: List[Face] = [] for face_area in face_areas: - face = Face(image, face_area, face_margin, face_size) + face = Face(image, face_area, face_margin, face_size, upscaler) if ignore_larger_faces and face.width > face_size: continue areas.append(face) diff --git a/scripts/use_cases/workflow_manager.py b/scripts/use_cases/workflow_manager.py index 8ab49f6..da7626d 100644 --- a/scripts/use_cases/workflow_manager.py +++ b/scripts/use_cases/workflow_manager.py @@ -5,7 +5,7 @@ from modules.processing import StableDiffusionProcessingImg2Img from PIL import Image -from scripts.entities.definitions import Condition, Job, Workflow +from scripts.entities.definitions import Condition, Job, Rule, Workflow from scripts.entities.face import Face from scripts.entities.option import Option from scripts.entities.rect import Rect @@ -44,20 +44,20 @@ def detect_faces(self, image: Image, option: Option) -> List[Rect]: return results - def select_jobs(self, faces: List[Face], index: int, width: int, height: int) -> List[Job]: + def select_rule(self, faces: List[Face], index: int, width: int, height: int) -> Rule: if faces[index].face_area is None: - return [] + return None for rule in self.workflow.rules: if rule.when is None: - return rule.then + return rule if self.__is_tag_match(rule.when, faces[index]) and self.__is_criteria_match( rule.when, faces, index, width, height ): - return rule.then + return rule - return [] + return None def __is_tag_match(self, condition: Condition, face: Face) -> bool: if condition.tag is None or len(condition.tag) == 0: