diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..6712a62 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,49 @@ +name: Python CI/CD Workflow + +on: + push: + branches: + - feature/ci + pull_request: + branches: + - feature/ci + +jobs: + build_macos: + runs-on: macos-latest + + steps: + # Restore the .git directory from the cache + - name: Cache repository + uses: actions/cache@v3 + with: + path: .git + key: ${{ runner.os }}-git-${{ hashFiles('.git/HEAD') }} + restore-keys: | + ${{ runner.os }}-git- + + # Check out the repository without LFS support + - name: Check out repository + uses: actions/checkout@v3 + with: + lfs: false # Disable Git LFS + + # Set up Python 3.11 + - name: Set up Python 3.11 + uses: actions/setup-python@v2 + with: + python-version: 3.11 + + # Install dependencies + # Step 1: Install dependencies + - name: Install dependencies + run: | + brew update + brew install pkg-config autoconf automake autoconf-archive + + # Run build_dev.sh script + - name: Run build_dev.sh script + run: | + chmod +x build_dev.sh + ./build.sh + diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..750ff8d --- /dev/null +++ b/environment.yml @@ -0,0 +1,81 @@ +name: OpenVPCal +channels: + - conda-forge + - defaults + - pytorch-nightly +dependencies: + - python=3.11 + - conda-forge::openimageio=2.5.9.0 + - conda-forge::py-openimageio=2.5.9.0 + - pip + - pip: + - alabaster==0.7.13 + - altgraph==0.17.4 + - astroid==3.0.2 + - Babel==2.14.0 + - certifi==2024.7.4 + - charset-normalizer==3.3.2 + - colour-checker-detection==0.1.5 + - colour-science==0.4.3 + - contourpy==1.2.0 + - coverage==7.3.3 + - cycler==0.12.1 + - dill==0.3.7 + - docutils==0.20.1 + - execnet==2.0.2 + - flake8==6.1.0 + - fonttools==4.46.0 + - idna==3.7 + - imageio==2.33.1 + - imagesize==1.4.1 + - importlib-resources==6.1.1 + - iniconfig==2.0.0 + - isort==5.13.2 + - Jinja2==3.1.4 + - kiwisolver==1.4.5 + - lazy-object-proxy==1.9.0 + - macholib==1.16.3 + - MarkupSafe==2.1.3 + - matplotlib==3.8.2 + - mccabe==0.7.0 + - numpy==1.26.2 + - opencolorio==2.3.1 + - opencv-python==4.8.1.78 + - packaging==23.2 + - pillow==10.3.0 + - platformdirs==4.1.0 + - pluggy==1.3.0 + - pycodestyle==2.11.1 + - pyflakes==3.1.0 + - Pygments==2.17.2 + - pyinstaller==6.3.0 + - pyinstaller-hooks-contrib==2023.10 + - pylint==3.0.3 + - pyparsing==3.1.1 + - pyqtgraph==0.13.3 + - PySide6==6.5.3 + - PySide6-Addons==6.5.3 + - PySide6-Essentials==6.5.3 + - pytest==7.4.3 + - pytest-xdist==3.5.0 + - python-dateutil==2.8.2 + - requests==2.32.0 + - scipy==1.11.4 + - shiboken6==6.5.3 + - six==1.16.0 + - snowballstemmer==2.2.0 + - Sphinx==7.2.6 + - sphinx-rtd-theme==2.0.0 + - sphinxcontrib-applehelp==1.0.7 + - sphinxcontrib-devhelp==1.0.5 + - sphinxcontrib-htmlhelp==2.0.4 + - sphinxcontrib-jquery==4.1 + - sphinxcontrib-jsmath==1.0.1 + - sphinxcontrib-qthelp==1.0.6 + - sphinxcontrib-serializinghtml==1.1.9 + - tomli==2.0.1 + - tomlkit==0.12.3 + - typing_extensions==4.9.0 + - urllib3==2.2.2 + - wrapt==1.16.0 + - zipp==3.19.1 \ No newline at end of file diff --git a/src/open_vp_cal/__init__.py b/src/open_vp_cal/__init__.py index 488842d..30d954f 100644 --- a/src/open_vp_cal/__init__.py +++ b/src/open_vp_cal/__init__.py @@ -15,7 +15,7 @@ Init Module defines a few module level variables """ -__version__ = "1.1.0" +__version__ = "1.2.0" __authors__ = [ "Adam Davis", "Adrian Pueyo", "Carol Payne", "Francesco Luigi Giardiello", "Daniel Heckenberg" ] diff --git a/src/open_vp_cal/framework/auto_roi.py b/src/open_vp_cal/framework/auto_roi.py index 86de718..0c2d019 100644 --- a/src/open_vp_cal/framework/auto_roi.py +++ b/src/open_vp_cal/framework/auto_roi.py @@ -19,10 +19,11 @@ import sys from typing import List + from open_vp_cal.core.utils import clamp from open_vp_cal.led_wall_settings import LedWallSettings - +from open_vp_cal.imaging import imaging_utils from open_vp_cal.framework.sample_patch import BaseSamplePatch from open_vp_cal.framework.identify_separation import SeparationResults from open_vp_cal.core import constants @@ -32,6 +33,7 @@ class AutoROIResults: """ Class to store the results of the roi detection """ + def __init__(self): """ Initialize an instance of AutoROIResults. @@ -144,14 +146,17 @@ class AutoROI(BaseSamplePatch): The main class which deals with identifying the region of interest within the image sequence which we want to extract """ - def __init__(self, led_wall_settings: LedWallSettings, separation_results: SeparationResults): + + def __init__(self, led_wall_settings: LedWallSettings, + separation_results: SeparationResults): """ Initialize an instance of AutoROI Args: led_wall_settings: The LED wall we want to detect the roi for separation_results: The results of the separation detection for the LED wall sequence """ - super().__init__(led_wall_settings, separation_results, constants.PATCHES.DISTORT_AND_ROI) + super().__init__(led_wall_settings, separation_results, + constants.PATCHES.DISTORT_AND_ROI) def run(self) -> AutoROIResults: """ @@ -166,12 +171,22 @@ def run(self) -> AutoROIResults: if first_patch_frame > self.led_wall.sequence_loader.end_frame: return results - frame = self.led_wall.sequence_loader.get_frame(first_patch_frame + self.trim_frames) + frame = self.led_wall.sequence_loader.get_frame( + first_patch_frame + self.trim_frames) + pixel_buffer = 5 detection_threshold = 1.7 - for y_pos in range(frame.image_buf.spec().height): - for x_pos in range(frame.image_buf.spec().width): - pixel = frame.image_buf.getpixel(x_pos, y_pos) + + # Create the white balance matrix + white_balance_matrix = self.get_white_balance_matrix_from_slate() + + # Apply the white balance matrix to the frame + balanced_image = imaging_utils.apply_matrix_to_img_buf( + frame.image_buf, white_balance_matrix + ) + for y_pos in range(balanced_image.spec().height): + for x_pos in range(balanced_image.spec().width): + pixel = balanced_image.getpixel(x_pos, y_pos) red = clamp(pixel[0], 0, sys.float_info.max) green = clamp(pixel[1], 0, sys.float_info.max) blue = clamp(pixel[2], 0, sys.float_info.max) @@ -183,12 +198,14 @@ def run(self) -> AutoROIResults: if green > results.green_value: if green > max(red, blue) * detection_threshold: - results.green_pixel = (x_pos - pixel_buffer, y_pos + pixel_buffer) + results.green_pixel = ( + x_pos - pixel_buffer, y_pos + pixel_buffer) results.green_value = green if blue > results.blue_value: if blue > max(red, green) * detection_threshold: - results.blue_pixel = (x_pos + pixel_buffer, y_pos - pixel_buffer) + results.blue_pixel = ( + x_pos + pixel_buffer, y_pos - pixel_buffer) results.blue_value = blue white = (red + green + blue) / 3 diff --git a/src/open_vp_cal/framework/generation.py b/src/open_vp_cal/framework/generation.py index b7ece57..9929d88 100644 --- a/src/open_vp_cal/framework/generation.py +++ b/src/open_vp_cal/framework/generation.py @@ -931,7 +931,7 @@ def _add_slate_inner_squares( """ patch_roi = Oiio.ROI(start_x, start_x + patch_width, start_y, start_y + patch_height) inner_edge_roi = self.reduce_roi(patch_roi, 1) - inner_patch_roi = self.reduce_roi(inner_edge_roi, 1) + inner_patch_roi = self.reduce_roi(inner_edge_roi, 0.5) Oiio.ImageBufAlgo.fill(patch, (self.percent_18_lum, self.percent_18_lum, self.percent_18_lum), roi=patch_roi) Oiio.ImageBufAlgo.fill(patch, (0.0, 0.0, 0.0), roi=inner_edge_roi) Oiio.ImageBufAlgo.fill(patch, (self.percent_18_lum, self.percent_18_lum, self.percent_18_lum), diff --git a/src/open_vp_cal/framework/identify_separation.py b/src/open_vp_cal/framework/identify_separation.py index d978a33..061881a 100644 --- a/src/open_vp_cal/framework/identify_separation.py +++ b/src/open_vp_cal/framework/identify_separation.py @@ -127,10 +127,22 @@ def _find_first_red_and_green_frames(self) -> None: distances = [] previous_mean_frame = None + + slate_frame = self.led_wall.sequence_loader.get_frame( + self.led_wall.sequence_loader.start_frame + ) + white_balance_matrix = imaging_utils.calculate_white_balance_matrix_from_img_buf( + slate_frame.image_buf) + for frame in self.led_wall.sequence_loader: # Load the image from the frame image = frame.extract_roi(self.led_wall.roi) + # Apply the white balance matrix to the frame + image = imaging_utils.apply_matrix_to_img_buf( + image, white_balance_matrix + ) + # Compute the average for all the values which are above the initial average mean_color, _ = imaging_utils.get_average_value_above_average(image) distance = 0 diff --git a/src/open_vp_cal/framework/sample_patch.py b/src/open_vp_cal/framework/sample_patch.py index aa0ab2a..bfdfbad 100644 --- a/src/open_vp_cal/framework/sample_patch.py +++ b/src/open_vp_cal/framework/sample_patch.py @@ -17,9 +17,10 @@ """ import threading import numpy as np -from colour_checker_detection.detection.segmentation import \ - detect_colour_checkers_segmentation +from colour_checker_detection.detection.segmentation import ( + detect_colour_checkers_segmentation) +from open_vp_cal.core.utils import find_factors_pairs from open_vp_cal.imaging import imaging_utils from open_vp_cal.core.structures import SamplePatchResults from open_vp_cal.framework.identify_separation import SeparationResults @@ -81,6 +82,20 @@ def calculate_first_and_last_patch_frame(self) -> tuple[int, int]: self.trim_frames = trim_frames return first_patch_frame, last_patch_frame + def get_white_balance_matrix_from_slate(self) -> np.ndarray: + """ Get the white balance matrix from the slate frame + + Returns: + np.ndarray: The white balance matrix + + """ + slate_frame = self.led_wall.sequence_loader.get_frame( + self.led_wall.sequence_loader.start_frame + ) + white_balance_matrix = imaging_utils.calculate_white_balance_matrix_from_img_buf( + slate_frame.image_buf) + return white_balance_matrix + class SamplePatch(BaseSamplePatch): """ @@ -239,21 +254,60 @@ def _sample_patch(self) -> None: # We trim a number of frames off either side of the patch to ensure we remove multiplexing sample_results = SamplePatchResults() samples = [] + white_balance_matrix = self.get_white_balance_matrix_from_slate() for frame_num in range(first_patch_frame + self.trim_frames, (last_patch_frame - self.trim_frames) + 1): frame = self.led_wall.sequence_loader.get_frame(frame_num) - section = frame.extract_roi(self.led_wall.roi) sample_results.frames.append(frame) - section_np_array = imaging_utils.image_buf_to_np_array(section) - for colour_checker_swatches_data in detect_colour_checkers_segmentation( - section_np_array, additional_data=True): - swatch_colours, _, _ = ( - colour_checker_swatches_data.values) + # Extract our region + section_orig = frame.extract_roi(self.led_wall.roi) + + # White balance the images so we increase the detection likelihood of + # success + section_orig = imaging_utils.apply_matrix_to_img_buf( + section_orig, white_balance_matrix + ) + + section_display_np_array = imaging_utils.image_buf_to_np_array(section_orig) + imaging_utils.apply_color_converstion_to_np_array( + section_display_np_array, + str(self.led_wall.input_plate_gamut), + "ACEScct", + ) + + # Run the detections + detections = detect_colour_checkers_segmentation( + section_display_np_array, additional_data=True) + + for colour_checker_swatches_data in detections: + # Get the swatch colours + swatch_colours, _, _ = colour_checker_swatches_data.values + swatch_colours = np.array(swatch_colours, dtype=np.float32) + + # Reshape the number of swatches from a 24, 3 array to an x, y, 3 array + num_swatches = swatch_colours.shape[0] + factor_pairs = find_factors_pairs(num_swatches) + x, y = factor_pairs[0] + array_x_y_3 = swatch_colours.reshape(x, y, 3) + + # Convert the colours back to the input plate gamut + imaging_utils.apply_color_converstion_to_np_array( + array_x_y_3, + "ACEScct", + str(self.led_wall.input_plate_gamut)) + + # Inverse the white balance back to the original values + inv_wb_matrix = np.linalg.inv(white_balance_matrix) + array_x_y_3 = array_x_y_3 @ inv_wb_matrix + + # Reshape the array back to a 24, 3 array + swatch_colours = array_x_y_3.reshape(num_swatches, 3) samples.append(swatch_colours) - # Compute the mean for each tuple index across all tuples, if the detection fails and we get nans, then we - # replace the nans with black patches as these are not used in the calibration directly + # Compute the mean for each tuple index across all tuples, if the + # detection fails, and we get nans, then we replace the nans with black patches + # as these are not used in the calibration directly averaged_tuple = np.mean(np.array(samples), axis=0) if not np.isnan(averaged_tuple).any(): sample_results.samples = averaged_tuple.tolist() diff --git a/src/open_vp_cal/framework/validation.py b/src/open_vp_cal/framework/validation.py index 6e968fb..13f4f35 100644 --- a/src/open_vp_cal/framework/validation.py +++ b/src/open_vp_cal/framework/validation.py @@ -74,7 +74,7 @@ def exposure_validation(calibration_results: Dict) -> ValidationResult: if measured_18_percent < quarter_stop_down_18_percent or measured_18_percent > quarter_stop_up_18_percent: result.status = ValidationStatus.FAIL result.message = ( - f"The Measured Exposure: {measured_18_percent}\n" + f"The Exposure of the 18% Patch is measured at {round(measured_18_percent, 1) * 100}%\n" "It seems that you have not exposed the calibration patches correctly. " "Please ensure to expose the first 18% patch correctly using the camera false colour or light meter." ) @@ -87,8 +87,7 @@ def exposure_validation(calibration_results: Dict) -> ValidationResult: else: result.status = ValidationStatus.WARNING result.message = ( - f"The Measured Exposure: {measured_18_percent} is not ideal\n" - "It seems that you have not exposed the calibration patches correctly. " + f"The Exposure of the 18% Patch is measured at {round(measured_18_percent, 1) * 100}%, this is not ideal.\n" "Please ensure to expose the first 18% patch correctly using the camera false colour or light meter." ) @@ -202,7 +201,7 @@ def check_scaled_18_percent(calibration_results: Dict) -> ValidationResult: if not is_between: result.status = ValidationStatus.FAIL result.message = ( - f"When scaled the measured 18 percent patch is not within a reasonable range: {scaled_18_percent_nits}." + f"When scaled the measured 18 percent patch is not within a reasonable range: {round(scaled_18_percent_nits, 1)} nits." " Please check that the wall settings match the actual peak luminance of your wall also check your " "imaging chain from content engine to LED processor and re shoot the plates" ) diff --git a/src/open_vp_cal/imaging/imaging_utils.py b/src/open_vp_cal/imaging/imaging_utils.py index 8a5b7f0..682084d 100644 --- a/src/open_vp_cal/imaging/imaging_utils.py +++ b/src/open_vp_cal/imaging/imaging_utils.py @@ -933,3 +933,56 @@ def is_within_range(value: float, target: float, x: float) -> bool: bool: True if value is within x units of target, False otherwise. """ return abs(value - target) <= x + + +def calculate_white_balance_matrix_from_img_buf( + image_buf: Oiio.ImageBuf, remove_percent: float = 0.20) -> np.array: + """ Takes a given image buffer and removes 20% of the pixels around the image, + returning a white balance matrix based on the average RGB values + of the center section + + Args: + image_buf: The image buffer to calculate the white balance matrix from + remove_percent: The percentage of pixels to remove from the image + + Returns: + np.array: The white balance matrix + + """ + image_np_array = image_buf_to_np_array(image_buf) + height, width, _ = image_np_array.shape + remove_height = int(height * remove_percent) + remove_width = int(width * remove_percent) + + # Slice the array to remove 20% from each side + center_section = image_np_array[ + remove_height:height - remove_height, + remove_width:width - remove_width + ] + + average_rgb = np.mean(center_section, axis=(0, 1)) + red_mult_val = average_rgb[1] / average_rgb[0] + blue_mult_val = average_rgb[1] / average_rgb[2] + white_balance_matrix = np.asarray( + [[red_mult_val, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, blue_mult_val]]) + return white_balance_matrix + + +def apply_matrix_to_img_buf( + image_buf: Oiio.ImageBuf, matrix: np.array) -> Oiio.ImageBuf: + """ Applies the given matrix to the image buffer, for instance a white balancing + matrix + + Args: + image_buf: The image buffer to apply the matrix to + matrix: The matrix to apply to the image buffer + + Returns: + Oiio.ImageBuf: The image buffer with the matrix applied + + """ + frame_np_array = image_buf_to_np_array(image_buf) + image_reshaped = frame_np_array.reshape((-1, 3)) + white_balanced_image = image_reshaped @ matrix + white_balanced_image = white_balanced_image.reshape(frame_np_array.shape) + return img_buf_from_numpy_array(white_balanced_image) diff --git a/src/open_vp_cal/led_wall_settings.py b/src/open_vp_cal/led_wall_settings.py index 5e7fee5..272537e 100644 --- a/src/open_vp_cal/led_wall_settings.py +++ b/src/open_vp_cal/led_wall_settings.py @@ -60,7 +60,7 @@ def __init__(self, project_settings: "ProjectSettings", name="Wall1"): constants.LedWallSettingsKeys.WHITE_POINT_OFFSET_SOURCE: "", constants.LedWallSettingsKeys.IS_VERIFICATION_WALL: False, constants.LedWallSettingsKeys.VERIFICATION_WALL: "", - constants.LedWallSettingsKeys.AVOID_CLIPPING: True + constants.LedWallSettingsKeys.AVOID_CLIPPING: False } self._led_settings = copy.deepcopy(self._default_led_settings) diff --git a/tests/test_open_vp_cal/resources/export/patches/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084.000000.exr b/tests/test_open_vp_cal/resources/export/patches/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084.000000.exr index 32ee471..9c76aaa 100644 --- a/tests/test_open_vp_cal/resources/export/patches/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084.000000.exr +++ b/tests/test_open_vp_cal/resources/export/patches/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084/OpenVPCal_Wall1_ITU_R_BT_2020_ST_2084.000000.exr @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08ca6cb664101923e74c82804e81d197d3ba04e20006c2ff203b2a47715b3f6c +oid sha256:6cffcf4c732702a14fd5dc4829dc27c0d0fd6e3ff88d9b4eaf22ffeda402ba9f size 49801538