diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/config/intrinsics_calibrator.yaml b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/config/intrinsics_calibrator.yaml index 740178bd..51a73517 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/config/intrinsics_calibrator.yaml +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/config/intrinsics_calibrator.yaml @@ -1,5 +1,51 @@ -board_type: dot_board +# yamllint disable-line rule:truthy +board_type: chess_board board_parameters: cols: 8 rows: 6 cell_size: 0.1 + +calibration_parameters: + use_ransac_pre_rejection: true # yamllint disable-line rule:truthy + pre_rejection_iterations: 100 + pre_rejection_min_hypotheses: 6 + pre_rejection_max_rms_error: 0.35 + max_calibration_samples: 80 + max_fast_calibration_samples: 20 + use_entropy_maximization_subsampling: true # yamllint disable-line rule:truthy + subsampling_pixel_cells: 16 + subsampling_tilt_resolution: 15.0 + subsampling_max_tilt_deg: 45.0 + use_post_rejection: true # yamllint disable-line rule:truthy + post_rejection_max_rms_error: 0.35 + plot_calibration_data_statistics: true # yamllint disable-line rule:truthy + plot_calibration_results_statistics: true # yamllint disable-line rule:truthy + viz_pixel_cells: 16 + viz_tilt_resolution: 15.0 + viz_max_tilt_deg: 45.0 + viz_z_cells: 12 + radial_distortion_coefficients: 5 + +calibrator_type: opencv + +data_collector: + max_samples: 500 + decorrelate_eval_samples: 5 # cspell:disable-line + max_allowed_tilt: 45.0 + filter_by_speed: true # yamllint disable-line rule:truthy + max_allowed_pixel_speed: 10.0 + max_allowed_speed: 0.1 + filter_by_reprojection_error: true # yamllint disable-line rule:truthy + max_allowed_max_reprojection_error: 0.5 + max_allowed_rms_reprojection_error: 0.3 + filter_by_2d_redundancy: true # yamllint disable-line rule:truthy + min_normalized_2d_center_difference: 0.05 + min_normalized_skew_difference: 0.05 + min_normalized_2d_size_difference: 0.05 + filter_by_3d_redundancy: true # yamllint disable-line rule:truthy + min_3d_center_difference: 1.0 + min_tilt_difference: 15.0 + heatmap_cells: 16 + rotation_heatmap_angle_res: 10 + point_2d_hist_bins: 20 + point_3d_hist_bins: 20 diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/array_board_detection.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/array_board_detection.py index dfbd8672..d201a9cc 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/array_board_detection.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/array_board_detection.py @@ -62,23 +62,55 @@ def squared_error(p, p1, p2): p = p - p1 p2 = p2 - p1 p2 /= np.linalg.norm(p2) - return np.abs(np.power(np.linalg.norm(p), 2) - np.power(np.dot(p, p2), 2)) + squared_distance = np.abs(np.power(np.linalg.norm(p), 2) - np.power(np.dot(p, p2), 2)) + return squared_distance - if self._cached_linear_error_rms is not None: - return self._cached_linear_error_rms + if ( + self._cached_linear_error_rows_rms is not None + and self._cached_linear_error_cols_rms is not None + ): + return self._cached_linear_error_rows_rms, self._cached_linear_error_cols_rms - error = 0 + error_rows = 0 + pct_err_rows = 0.0 for j in range(self.rows): p1 = self.image_points[j, 0] p2 = self.image_points[j, -1] + points_dist = np.linalg.norm(p2 - p1) for i in range(1, self.cols - 1): p = self.image_points[j, i] - error += squared_error(p, p1, p2) + sq_error = squared_error(p, p1, p2) + error_rows += sq_error + pct_err_rows += np.sqrt(sq_error) / points_dist - self._cached_linear_error_rms = np.sqrt(error / (self.rows * (self.cols - 2))) - return self._cached_linear_error_rms + self._cached_linear_error_rows_rms = np.sqrt(error_rows / (self.rows * (self.cols - 2))) + pct_err_rows = pct_err_rows / (self.rows * (self.cols - 2)) + + error_cols = 0 + pct_err_cols = 0.0 + + for j in range(self.cols): + p1 = self.image_points[0, j] + p2 = self.image_points[-1, j] + points_dist = np.linalg.norm(p2 - p1) + + for i in range(1, self.rows - 1): + p = self.image_points[i, j] + sq_error = squared_error(p, p1, p2) + error_cols += sq_error + pct_err_cols += np.sqrt(sq_error) / points_dist + + self._cached_linear_error_cols_rms = np.sqrt(error_cols / (self.cols * (self.rows - 2))) + pct_err_cols = pct_err_cols / (self.cols * (self.rows - 2)) + + return ( + self._cached_linear_error_rows_rms, + self._cached_linear_error_cols_rms, + pct_err_rows, + pct_err_cols, + ) def get_flattened_cell_sizes(self): if self._cached_flattened_cell_sizes is not None: @@ -96,3 +128,27 @@ def get_flattened_cell_sizes(self): self._cached_flattened_cell_sizes = cell_sizes.flatten() return self._cached_flattened_cell_sizes + + def get_aspect_ratio_pattern(self) -> float: + """Get aspect ratio using the calibration pattern, which should be squared.""" + tilt, pan = self.get_rotation_angles() + acceptance_angle = 10 + + # dont update if we the detection has big angles, calculation will not be accurate + if np.abs(tilt) > acceptance_angle or np.abs(pan) > acceptance_angle: + return 0.0 + # Calculate distances between adjacent corners + aspect_ratio = 0 + count = 0 + for j in range(self.rows - 1): + for i in range(self.cols - 1): + p = self.image_points[j, i] + point_col = self.image_points[j + 1, i] + point_row = self.image_points[j, i + 1] + horizontal_distance = np.linalg.norm(p - point_row) + vertical_distance = np.linalg.norm(p - point_col) + aspect_ratio = aspect_ratio + (horizontal_distance / vertical_distance) + count += 1 + aspect_ratio = aspect_ratio / ((self.rows - 1) * (self.cols - 1)) + + return aspect_ratio diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/board_detection.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/board_detection.py index bb02b831..9e993441 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/board_detection.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/board_detections/board_detection.py @@ -44,6 +44,8 @@ def __init__( self._cached_normalized_skew = None self._cached_normalized_size = None self._cached_linear_error_rms = None + self._cached_linear_error_rows_rms = None + self._cached_linear_error_cols_rms = None self._cached_flattened_cell_sizes = None self._cached_center_2d = None @@ -100,6 +102,14 @@ def get_linear_error_rms(self) -> float: """Return RMS error product of the projection of the lines of each row of the detection into the line produced by the first and line point of each row.""" raise NotImplementedError + def get_aspect_ratio_pattern(self) -> float: + """Return aspect ratio obtained from the mean of the horizontal and vertical distances of the calibration pattern.""" + raise NotImplementedError + + def restart_linearity_heatmap(self): + """Restart linearity heatmap.""" + raise NotImplementedError + def get_center_2d(self) -> np.array: """Return the center of detection in the image.""" if self._cached_center_2d is not None: diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/calibrators/opencv_calibrator.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/calibrators/opencv_calibrator.py index 86ce559b..6d58cb23 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/calibrators/opencv_calibrator.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/calibrators/opencv_calibrator.py @@ -38,6 +38,8 @@ def __init__(self, lock: threading.RLock, cfg: Dict = {}): ) self.fix_principal_point = Parameter(bool, value=False, min_value=False, max_value=True) self.fix_aspect_ratio = Parameter(bool, value=False, min_value=False, max_value=True) + self.use_lu_decomposition = Parameter(bool, value=False, min_value=False, max_value=True) + self.use_qr_decomposition = Parameter(bool, value=False, min_value=False, max_value=True) self.set_parameters(**cfg) @@ -54,6 +56,8 @@ def _calibration_impl(self, detections: List[BoardDetection]) -> CameraModel: flags |= cv2.CALIB_FIX_K3 if self.radial_distortion_coefficients.value < 3 else 0 flags |= cv2.CALIB_FIX_K2 if self.radial_distortion_coefficients.value < 2 else 0 flags |= cv2.CALIB_FIX_K1 if self.radial_distortion_coefficients.value < 1 else 0 + flags |= cv2.CALIB_USE_LU if self.use_lu_decomposition.value else 0 + flags |= cv2.CALIB_USE_QR if self.use_qr_decomposition.value else 0 height = detections[0].get_image_height() width = detections[0].get_image_width() diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_calibrator.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_calibrator.py index f77df4d9..3814c6ee 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_calibrator.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_calibrator.py @@ -123,6 +123,7 @@ def __init__(self, cfg): self.image_view_mode = ImageViewMode.SOURCE_UNRECTIFIED self.paused = False + self.last_detection = None for calibrator_type in CalibratorEnum: calibrator_cfg = defaultdict() @@ -165,41 +166,6 @@ def __init__(self, cfg): self.right_menu_widget.setFixedWidth(300) self.right_menu_layout = QVBoxLayout(self.right_menu_widget) self.right_menu_layout.setAlignment(Qt.AlignTop) - - # Mode group - self.make_mode_group() - - # Calibration group - self.make_calibration_group() - - # Detector group - self.make_detector_group() - - # Detections group - self.make_detection_group() - - # Data collection group - self.make_data_collection_group() - - # Visualization group - self.make_visualization_group() - - # self.menu_layout.addWidget(label) - self.left_menu_layout.addWidget(self.calibration_group) - self.left_menu_layout.addWidget(self.detector_options_group) - self.left_menu_layout.addWidget(self.raw_detection_results_group) - self.left_menu_layout.addWidget(self.single_shot_detection_results_group) - - self.right_menu_layout.addWidget(self.mode_options_group) - self.right_menu_layout.addWidget(self.data_collection_group) - self.right_menu_layout.addWidget(self.visualization_options_group) - - self.layout.addWidget(self.graphics_view) - - self.layout.addWidget(self.left_menu_widget) - self.layout.addWidget(self.right_menu_widget) - - self.show() self.setEnabled(False) self.initialization_view = InitializationView(self, cfg) @@ -455,13 +421,13 @@ def make_detection_group(self): self.raw_detection_results_group = QGroupBox("Detection results") self.raw_detection_results_group.setFlat(True) - self.single_shot_detection_results_group = QGroupBox( - "Single-shot calibration detection results" - ) + self.single_shot_detection_results_group = QGroupBox("Single-shot detection results") self.single_shot_detection_results_group.setFlat(True) self.raw_detection_label = QLabel("Detected:") - self.raw_linear_error_rms_label = QLabel("Linear error (rms):") + self.raw_linear_error_rows_rms_label = QLabel("Linear error rows (rms):") + self.raw_linear_error_cols_rms_label = QLabel("Linear error cols (rms):") + self.aspect_ratio_label = QLabel("Aspect Ratio:") self.rough_tilt_label = QLabel("Rough tilt:") self.rough_angles_label = QLabel("Rough angles:") self.rough_position_label = QLabel("Rough position:") @@ -484,7 +450,9 @@ def make_detection_group(self): raw_detection_results_layout.addWidget(self.rough_position_label) raw_detection_results_layout.addWidget(self.skew_label) raw_detection_results_layout.addWidget(self.relative_area_label) - raw_detection_results_layout.addWidget(self.raw_linear_error_rms_label) + raw_detection_results_layout.addWidget(self.raw_linear_error_rows_rms_label) + raw_detection_results_layout.addWidget(self.raw_linear_error_cols_rms_label) + raw_detection_results_layout.addWidget(self.aspect_ratio_label) single_shot_detection_results_layout.addWidget( self.single_shot_reprojection_error_max_label @@ -598,10 +566,28 @@ def draw_evaluation_heatmap_callback(value): self.image_view.set_draw_evaluation_heatmap(value == Qt.Checked) self.should_process_image.emit() + def draw_linearity_heatmap_callback(value): + self.image_view.set_draw_linearity_heatmap(value == Qt.Checked) + self.should_process_image.emit() + + def on_restart_linearity_heatmap_clicked(): + print("restart_lin_heatmap", flush=True) + self.data_collector.restart_linearity_heatmap() + + self.restart_linearity_heatmap_button = QPushButton("Clear heatmap linearity") + self.restart_linearity_heatmap_button.clicked.connect(on_restart_linearity_heatmap_clicked) + self.draw_evaluation_heatmap_checkbox = QCheckBox("Draw evaluation occupancy") self.draw_evaluation_heatmap_checkbox.setChecked(False) self.draw_evaluation_heatmap_checkbox.stateChanged.connect(draw_evaluation_heatmap_callback) + self.draw_linearity_heatmap_checkbox = QCheckBox("Draw linearity error") + self.draw_linearity_heatmap_checkbox.setChecked(False) + self.draw_linearity_heatmap_checkbox.stateChanged.connect(draw_linearity_heatmap_callback) + + self.draw_indicators_checkbox = QCheckBox("Draw indicators") + self.draw_indicators_checkbox.setChecked(False) + rendering_alpha_label = QLabel("Drawings alpha:") self.rendering_alpha_spinbox = QDoubleSpinBox() @@ -621,8 +607,17 @@ def draw_evaluation_heatmap_callback(value): self.undistortion_alpha_spinbox.valueChanged.connect( lambda: self.should_process_image.emit() ) + self.undistortion_alpha_spinbox.valueChanged.connect(on_restart_linearity_heatmap_clicked) self.undistortion_alpha_spinbox.setEnabled(False) + indicators_alpha_label = QLabel("Indicators alpha:") + + self.indicators_alpha_spinbox = QDoubleSpinBox() + self.indicators_alpha_spinbox.setDecimals(2) + self.indicators_alpha_spinbox.setRange(0.0, 1.0) + self.indicators_alpha_spinbox.setSingleStep(0.05) + self.indicators_alpha_spinbox.setValue(1.0) + visualization_options_layout = QVBoxLayout() visualization_options_layout.setAlignment(Qt.AlignTop) visualization_options_layout.addWidget(draw_detection_checkbox) @@ -630,10 +625,15 @@ def draw_evaluation_heatmap_callback(value): visualization_options_layout.addWidget(self.draw_evaluation_points_checkbox) visualization_options_layout.addWidget(self.draw_training_heatmap_checkbox) visualization_options_layout.addWidget(self.draw_evaluation_heatmap_checkbox) + visualization_options_layout.addWidget(self.draw_linearity_heatmap_checkbox) + visualization_options_layout.addWidget(self.draw_indicators_checkbox) visualization_options_layout.addWidget(rendering_alpha_label) visualization_options_layout.addWidget(self.rendering_alpha_spinbox) visualization_options_layout.addWidget(undistortion_alpha_label) visualization_options_layout.addWidget(self.undistortion_alpha_spinbox) + visualization_options_layout.addWidget(indicators_alpha_label) + visualization_options_layout.addWidget(self.indicators_alpha_spinbox) + visualization_options_layout.addWidget(self.restart_linearity_heatmap_button) self.visualization_options_group.setLayout(visualization_options_layout) def start( @@ -649,9 +649,56 @@ def start( self.board_type = board_type self.board_parameters = board_parameters self.current_camera_model = initial_intrinsics + + # Creating the UI elements after selecting CALIBRATION or EVALUATION + # Mode group + self.make_mode_group() + + # Calibration group + if self.operation_mode == OperationMode.CALIBRATION: + self.make_calibration_group() + + # Detector group + if self.operation_mode == OperationMode.CALIBRATION: + self.make_detector_group() + + # Detections group + self.make_detection_group() + + # Data collection group + if self.operation_mode == OperationMode.CALIBRATION: + self.make_data_collection_group() + + # Visualization group + self.make_visualization_group() + + # self.menu_layout.addWidget(label) + if self.operation_mode == OperationMode.CALIBRATION: + self.left_menu_layout.addWidget(self.calibration_group) + self.left_menu_layout.addWidget(self.detector_options_group) + self.left_menu_layout.addWidget(self.raw_detection_results_group) + self.left_menu_layout.addWidget(self.single_shot_detection_results_group) + + self.right_menu_layout.addWidget(self.mode_options_group) + if self.operation_mode == OperationMode.CALIBRATION: + self.right_menu_layout.addWidget(self.data_collection_group) + self.right_menu_layout.addWidget(self.visualization_options_group) + + self.layout.addWidget(self.graphics_view) + + self.layout.addWidget(self.left_menu_widget) + self.layout.addWidget(self.right_menu_widget) + self.show() self.setEnabled(True) - self.setWindowTitle(f"Camera intrinsics calibrator ({self.data_source.get_camera_name()})") + if self.operation_mode == OperationMode.CALIBRATION: + self.setWindowTitle( + f"Camera intrinsics calibrator ({self.data_source.get_camera_name()})" + ) + if self.operation_mode == OperationMode.EVALUATION: + self.setWindowTitle( + f"Camera intrinsics Evaluation Mode ({self.data_source.get_camera_name()})" + ) logging.info("Init") logging.info(f"\tmode : {mode}") @@ -668,7 +715,25 @@ def start( ) if self.operation_mode == OperationMode.EVALUATION: - self.calibration_button.setEnabled(False) + # Initial state of the elements on evaluation mode + self.calibrated_camera_model = self.current_camera_model + self.image_view_type_combobox.setEnabled(True) + self.undistortion_alpha_spinbox.setEnabled(True) + self.draw_evaluation_heatmap_checkbox.setEnabled(False) + self.draw_evaluation_points_checkbox.setEnabled(False) + self.draw_training_points_checkbox.setEnabled(False) + self.draw_training_heatmap_checkbox.setEnabled(False) + self.training_sample_slider.setEnabled(False) + self.evaluation_sample_slider.setEnabled(False) + self.image_view_type_combobox.clear() + # Order of of how items are added to the combobox matters, + # default index is 0, so rectified image is added first to be default view + self.image_view_type_combobox.addItem( + ImageViewMode.SOURCE_RECTIFIED.value, ImageViewMode.SOURCE_RECTIFIED + ) + self.image_view_type_combobox.addItem( + ImageViewMode.SOURCE_UNRECTIFIED.value, ImageViewMode.SOURCE_UNRECTIFIED + ) self.detector.moveToThread(self.detector_thread) self.detector.detection_results_signal.connect(self.process_detection_results) @@ -787,6 +852,22 @@ def process_evaluation_results( def on_consumed(self): self.data_source.consumed() + def save_parameters(self, filename): + data_coll_params = self.data_collector.parameters_value() + board_params = self.board_parameters.parameters_value() + detector_params = self.detector.parameters_value() + calibrator_type = self.calibrator_type_combobox.currentData() + print(calibrator_type.value["name"], flush=True) + calib_params = self.calibrator_dict[calibrator_type].parameters_value() + with open(filename, "w") as file: + yaml.dump({"board_parameters": board_params}, file, default_flow_style=False) + yaml.dump( + {"calibrator_type": calibrator_type.value["name"]}, file, default_flow_style=False + ) + yaml.dump({"calibration_parameters": calib_params}, file, default_flow_style=False) + yaml.dump({"data_collector": data_coll_params}, file, default_flow_style=False) + yaml.dump({"detector_params": detector_params}, file, default_flow_style=False) + def on_save_clicked(self): output_folder = QFileDialog.getExistingDirectory( None, @@ -807,6 +888,8 @@ def on_save_clicked(self): os.path.join(output_folder, f"{self.data_source.get_camera_name()}_info.yaml"), ) + self.save_parameters(os.path.join(output_folder, "parameters.yaml")) + training_folder = os.path.join(output_folder, "training_images") evaluation_folder = os.path.join(output_folder, "evaluation_images") @@ -817,9 +900,25 @@ def on_save_clicked(self): for index, image in enumerate(self.data_collector.get_training_images()): cv2.imwrite(os.path.join(training_folder, f"{index:04d}.jpg"), image) # noqa E231 + np.savetxt( + os.path.join(training_folder, f"{index:04d}_training_img_points.txt"), + self.data_collector.get_training_detection(index).get_flattened_image_points(), + ) + np.savetxt( + os.path.join(training_folder, f"{index:04d}_training_obj_points.txt"), + self.data_collector.get_training_detection(index).get_flattened_object_points(), + ) for index, image in enumerate(self.data_collector.get_evaluation_images()): cv2.imwrite(os.path.join(evaluation_folder, f"{index:04d}.jpg"), image) # noqa E231 + np.savetxt( + os.path.join(evaluation_folder, f"{index:04d}_eval_img_points.txt"), + self.data_collector.get_evaluation_detection(index).get_flattened_image_points(), + ) + np.savetxt( + os.path.join(evaluation_folder, f"{index:04d}_eval_obj_points.txt"), + self.data_collector.get_evaluation_detection(index).get_flattened_object_points(), + ) def process_detection_results(self, img: np.array, detection: BoardDetection, img_stamp: float): """Process the results from an object detection.""" @@ -842,7 +941,9 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im self.image_view.set_detection_ordered_points(None) self.raw_detection_label.setText("Detected: False") - self.raw_linear_error_rms_label.setText("Linear error rms:") + self.raw_linear_error_rows_rms_label.setText("Linear error rows rms:") + self.raw_linear_error_cols_rms_label.setText("Linear error cols rms:") + self.aspect_ratio_label.setText("Aspect Ratio:") self.rough_tilt_label.setText("Rough tilt:") self.rough_angles_label.setText("Rough angles:") self.rough_position_label.setText("Rough position:") @@ -852,6 +953,22 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im self.single_shot_reprojection_error_max_label.setText("Reprojection error (max):") self.single_shot_reprojection_error_avg_label.setText("Reprojection error (avg):") self.single_shot_reprojection_error_rms_label.setText("Reprojection error (rms):") + board_speed = None + self.image_view.set_draw_indicators( + board_speed, + self.data_collector.max_allowed_pixel_speed.value, + self.data_collector.get_skew_percentage(), + self.data_collector.get_size_percentage(), + 0, + 0, # rows cols linear error + 0, + 0, # rows cols percentage linear error + 0.0, # aspect ratio + 0, + 0, + self.indicators_alpha_spinbox.value(), + False, + ) else: camera_model = ( @@ -870,6 +987,17 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im else: filter_result = CollectionStatus.NOT_EVALUATED + if ( + self.operation_mode == OperationMode.EVALUATION + and self.image_view_type_combobox.currentData() == ImageViewMode.SOURCE_RECTIFIED + ): + self.data_collector.process_detection_eval_mode( + image=img, + detection=detection, + camera_model=camera_model, + mode=self.operation_mode, + ) + # For each new sample that is accepted we try to update the current (partial) calibration if ( filter_result == CollectionStatus.ACCEPTED @@ -885,12 +1013,13 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im } self.image_view.set_draw_detection_color(filter_result_color_dict[filter_result]) - self.data_collection_training_label.setText( - f"Training samples: {self.data_collector.get_num_training_samples()}" - ) - self.data_collection_evaluation_label.setText( - f"Evaluation samples: {self.data_collector.get_num_evaluation_samples()}" - ) + if self.operation_mode == OperationMode.CALIBRATION: + self.data_collection_training_label.setText( + f"Training samples: {self.data_collector.get_num_training_samples()}" + ) + self.data_collection_evaluation_label.setText( + f"Evaluation samples: {self.data_collector.get_num_evaluation_samples()}" + ) # object_points = detection.get_object_points() ordered_image_points = detection.get_ordered_image_points() @@ -915,8 +1044,17 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im rough_angles = detection.get_rotation_angles(camera_model) self.raw_detection_label.setText("Detected: True") - self.raw_linear_error_rms_label.setText( - f"Linear error rms: {detection.get_linear_error_rms():.2f} px" # noqa E231 + err_rms_rows, err_rms_cols, pct_err_rows, pct_err_cols = ( + detection.get_linear_error_rms() + ) + self.raw_linear_error_rows_rms_label.setText( + f"Linear error rows rms: {err_rms_rows:.2f} px" # noqa E231 + ) + self.raw_linear_error_cols_rms_label.setText( + f"Linear error cols rms: {err_rms_cols:.2f} px" # noqa E231 + ) + self.aspect_ratio_label.setText( + f"Aspect Ratio: {detection.get_aspect_ratio_pattern():.2f} px" # noqa E231 ) self.rough_tilt_label.setText( f"Rough tilt: {detection.get_tilt():.2f} degrees" # noqa E231 @@ -942,11 +1080,33 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im f"Reprojection error (rms): {reprojection_error_rms:.3f} px ({100.0 * reprojection_error_rms_relative:.2f}%)" # noqa E231 ) - self.training_occupancy_rate_label.setText( - f"Training occupancy: {100.0*self.data_collector.get_training_occupancy_rate():.2f}" # noqa E231 + if self.operation_mode == OperationMode.CALIBRATION: + self.training_occupancy_rate_label.setText( + f"Training occupancy: {100.0*self.data_collector.get_training_occupancy_rate():.2f}" # noqa E231 + ) + self.evaluation_occupancy_rate_label.setText( + f"Evaluation occupancy: {100.0*self.data_collector.get_evaluation_occupancy_rate():.2f}" # noqa E231 + ) + + board_speed = ( + 0 if self.last_detection is None else detection.get_speed(self.last_detection) ) - self.evaluation_occupancy_rate_label.setText( - f"Evaluation occupancy: {100.0*self.data_collector.get_evaluation_occupancy_rate():.2f}" # noqa E231 + self.last_detection = detection + pan, tilt = detection.get_rotation_angles() + self.image_view.set_draw_indicators( + board_speed, + self.data_collector.max_allowed_pixel_speed.value, + self.data_collector.get_skew_percentage(), + self.data_collector.get_size_percentage(), + err_rms_rows, + err_rms_cols, + pct_err_rows, + pct_err_cols, + detection.get_aspect_ratio_pattern(), + pan, + tilt, + self.indicators_alpha_spinbox.value(), + self.draw_indicators_checkbox.isChecked(), ) # Draw training / evaluation points @@ -957,6 +1117,8 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im self.draw_evaluation_heatmap_checkbox.isChecked() ) + self.image_view.set_draw_linearity_heatmap(self.draw_linearity_heatmap_checkbox.isChecked()) + if self.draw_training_points_checkbox.isChecked(): self.image_view.set_training_points( self.data_collector.get_flattened_image_training_points() @@ -977,17 +1139,21 @@ def process_detection_results(self, img: np.array, detection: BoardDetection, im self.data_collector.get_evaluation_occupancy_heatmap() ) - if ( - self.data_collector.get_num_training_samples() > 0 - and not self.calibration_button.isEnabled() - ): - self.calibration_button.setEnabled(True) + if self.draw_linearity_heatmap_checkbox.isChecked(): + self.image_view.set_linearity_heatmap(self.data_collector.get_linearity_heatmap()) - if ( - self.data_collector.get_num_evaluation_samples() > 0 - and not self.evaluation_button.isEnabled() - ): - self.evaluation_button.setEnabled(True) + if self.operation_mode == OperationMode.CALIBRATION: + if ( + self.data_collector.get_num_training_samples() > 0 + and not self.calibration_button.isEnabled() + ): + self.calibration_button.setEnabled(True) + + if ( + self.data_collector.get_num_evaluation_samples() > 0 + and not self.evaluation_button.isEnabled() + ): + self.evaluation_button.setEnabled(True) # Set drawing image self.image_view.set_image(img) diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_model.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_model.py index 1c3bba0e..8bf37bd6 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_model.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/camera_model.py @@ -218,8 +218,8 @@ def from_dict(self, d): self.k = np.array(d["camera_matrix"]["data"]).reshape( d["camera_matrix"]["rows"], d["camera_matrix"]["cols"] ) - self.d = np.array(d["distortion_model"]["data"]).reshape( - d["distortion_model"]["rows"], d["distortion_model"]["cols"] + self.d = np.array(d["distortion_coefficients"]["data"]).reshape( + d["distortion_coefficients"]["rows"], d["distortion_coefficients"]["cols"] ) diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/data_collector.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/data_collector.py index 4c3b1dce..af2dd16e 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/data_collector.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/data_collector.py @@ -263,6 +263,7 @@ def __init__(self, cfg: dict = dict(), **kwargs): # noqa C408 self.training_heatmap = np.zeros((self.heatmap_cells.value, self.heatmap_cells.value)) self.evaluation_heatmap = np.zeros((self.heatmap_cells.value, self.heatmap_cells.value)) + self.linearity_heatmap = np.zeros((self.heatmap_cells.value, self.heatmap_cells.value)) self.training_occupancy_rate = 0.0 self.evaluation_occupancy_rate = 0.0 @@ -362,6 +363,10 @@ def get_training_occupancy_heatmap(self) -> np.array: """Return the training heatmap, which defines the parts of the pixel space that have image points in it.""" return self.training_heatmap + def get_linearity_heatmap(self) -> np.array: + """Return the linearity map to evaluate the image rectification.""" + return self.linearity_heatmap + def get_evaluation_occupancy_heatmap(self) -> np.array: """Return the evaluation heatmap, which defines the parts of the pixel space that have image points in it.""" return self.evaluation_heatmap @@ -400,6 +405,53 @@ def update_collection_heatmap(self, heatmap: np.array, detection: BoardDetection occupied = float(np.count_nonzero(heatmap > 0)) / np.prod(heatmap.shape) return occupied + def update_linearity_heatmap(self, heatmap: np.array, detection: BoardDetection) -> float: + """Update a heatmap with a single detection's image points.""" + + def squared_error(p, p1, p2): + p = p - p1 + p2 = p2 - p1 + p2 /= np.linalg.norm(p2) + squared_distance = np.abs(np.power(np.linalg.norm(p), 2) - np.power(np.dot(p, p2), 2)) + return squared_distance + + image_points = detection.get_ordered_image_points() + max_pct_error_tolerance = 0.04 + + for j in range(detection.rows): + p1 = image_points[j][0] + p2 = image_points[j][-1] + points_dist = np.linalg.norm(p2 - p1) + for i in range(1, detection.cols - 1): + p = image_points[j][i] + dist_error = np.sqrt(squared_error(p, p1, p2)) + if dist_error / points_dist > max_pct_error_tolerance: + # if distance is too big most likely is a miss detection + dist_error = 0 + x = int(heatmap.shape[1] * p[0] / detection.width) + y = int(heatmap.shape[0] * p[1] / detection.height) + if heatmap[y, x] < dist_error: + heatmap[y, x] = 1 * dist_error + + for j in range(detection.cols): + p1 = image_points[0][j] + p2 = image_points[-1][j] + points_dist = np.linalg.norm(p2 - p1) + for i in range(1, detection.rows - 1): + p = image_points[i][j] + dist_error = np.sqrt(squared_error(p, p1, p2)) + if dist_error / points_dist > max_pct_error_tolerance: + # if distance is too big most likely is a miss detection + dist_error = 0 + x = int(heatmap.shape[1] * p[0] / detection.width) + y = int(heatmap.shape[0] * p[1] / detection.height) + if heatmap[y, x] < dist_error: + heatmap[y, x] = 1 * dist_error + + def restart_linearity_heatmap(self): + """Restart heatmap created by aspect ratio.""" + self.linearity_heatmap = np.zeros((self.heatmap_cells.value, self.heatmap_cells.value)) + def evaluate_redundancy( self, normalized_2d_center_x_distance: float, @@ -463,11 +515,14 @@ def process_detection( """Evaluate if a detection should be added to either the training or evaluation dataset.""" accepted = True + # process detections without filtering, only to get linearity heatmap + self.update_linearity_heatmap(self.linearity_heatmap, detection) + if self.filter_by_speed.value: speed = 0 if self.last_detection is None else detection.get_speed(self.last_detection) self.last_detection = detection - accepted &= speed < self.max_allowed_speed + accepted &= speed < self.max_allowed_pixel_speed.value if self.filter_by_reprojection_error: reprojection_errors = detection.get_reprojection_errors() @@ -530,3 +585,62 @@ def process_detection( return CollectionStatus.ACCEPTED return CollectionStatus.REDUNDANT + + def process_detection_eval_mode( + self, + image: np.array, + detection: BoardDetection, + camera_model: Optional[CameraModel] = None, + mode: OperationMode = OperationMode.CALIBRATION, + ) -> CollectionStatus: + """Evaluate detections mad ein evaluation mode.""" + # process witout filtering detections + self.update_linearity_heatmap(self.linearity_heatmap, detection) + + def get_skew_coverage(self): + """Get skew percentage covered from a defined range for the indicators.""" + # Define the number of intervals + num_intervals = 50 + interval_size = 1 / num_intervals + # Create a unique set to store covered intervals + covered_intervals = set() + # range in radians ToDo: define the range + skew_range = np.array([0, 1.04]) + + for detection in self.training_data.get_detections(): + if skew_range[0] <= detection.get_normalized_skew() < skew_range[1]: + interval_index = int(detection.get_normalized_skew() / interval_size) + covered_intervals.add(interval_index) + + # Calculate the percentage of covered intervals + percentage_coverage = len(covered_intervals) / num_intervals # * 100 + + return percentage_coverage + + def get_skew_percentage(self): + """Get skew precentage with more useful name ToDo Refactor.""" + return self.get_skew_coverage() + + def get_size_coverage(self): + """Get board size percentage covered from a defined range for the indicators.""" + # Define the number of intervals + num_intervals = 20 + interval_size = 1 / num_intervals + # Create a set to store covered intervals + covered_intervals = set() + # range for board size ToDo: define the range + size_range = np.array([0.08, 0.21]) + + for detection in self.training_data.get_detections(): + if size_range[0] <= detection.get_normalized_size() < size_range[1]: + interval_index = int(detection.get_normalized_size() / interval_size) + covered_intervals.add(interval_index) + + # Calculate the percentage of covered intervals + percentage_coverage = len(covered_intervals) / num_intervals # * 100 + + return percentage_coverage + + def get_size_percentage(self): + """Get size precentage with more useful name ToDo Refactor.""" + return self.get_size_coverage() diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/parameter.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/parameter.py index df27a7f3..1399d2f3 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/parameter.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/parameter.py @@ -89,3 +89,14 @@ def parameters(self) -> dict: p_dict[k] = v return p_dict + + def parameters_value(self) -> dict: + """Return the Parameter objects' names and values from this class as a dictionary.""" + with self.lock: + p_dict = {} + + for k, v in vars(self).items(): + if isinstance(v, Parameter): + p_dict[k] = v.value + + return p_dict diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/image_view.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/image_view.py index 73d64641..d1bdcdb1 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/image_view.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/image_view.py @@ -23,6 +23,7 @@ from PySide2.QtCore import Qt from PySide2.QtGui import QBrush from PySide2.QtGui import QColor +from PySide2.QtGui import QFont from PySide2.QtGui import QImage from PySide2.QtGui import QPainter from PySide2.QtGui import QPen @@ -174,6 +175,10 @@ def set_draw_evaluation_heatmap(self, value: bool): """Set the flag of wether or not to draw the occupancy heatmap of the evaluation dataset.""" self.is_draw_evaluation_heatmap = value + def set_draw_linearity_heatmap(self, value: bool): + """Set the flag of wether or not to draw the linearity heatmap.""" + self.is_draw_linearity_heatmap = value + def set_detection_ordered_points(self, points_list: List[np.array]): """Set the detection points to draw. The points are expected to be in a list of rows, where each row is rendered in a different color.""" self.points_list = points_list @@ -194,6 +199,10 @@ def set_evaluation_heatmap(self, value: np.array): """Set the occupancy heatmap to draw from the evaluation dataset.""" self.evaluation_heatmap = value + def set_linearity_heatmap(self, value: np.array): + """Set the linearity heatmap to draw.""" + self.linearity_heatmap = value + def set_grid_size_pixels(self, cell_size_pixels): """Set the size in which to draw the detection's corners.""" self.cell_size_pixels = cell_size_pixels @@ -202,6 +211,162 @@ def set_rendering_alpha(self, value: float): """Set the alpha channel of the drawings.""" self.rendering_alpha = value + def set_draw_indicators( + self, + board_speed: float, + max_allowed_board_speed: float, + skew_percentage: float, + board_size_percentage: float, + rows_linear_error: float, + cols_linear_error: float, + pct_err_rows: float, + pct_err_cols: float, + aspect_ratio: float, + pan: float, + tilt: float, + alpha_indicators: float, + value: bool, + ): + """Set values for indicators.""" + self.current_board_speed = board_speed + self.max_board_allowed_speed = max_allowed_board_speed + self.skew_percentage = skew_percentage + self.board_size_percentage = board_size_percentage + self.rows_linear_error = rows_linear_error + self.cols_linear_error = cols_linear_error + self.pct_err_rows = pct_err_rows + self.pct_err_cols = pct_err_cols + self.aspect_ratio = aspect_ratio + self.pan = pan + self.tilt = tilt + self.alpha_indicators = alpha_indicators + self.is_draw_indicators = value + + def draw_indicators(self, painter: QPainter, display_size): + """Draw indicators for speed, skew, size aspect ratio, angles of the detected board.""" + color_green = QColor(0.0, 255, 0.0, int(255 * self.alpha_indicators)) + color_red = QColor(255, 0.0, 0.0, int(255 * self.alpha_indicators)) + # Change color according tot he speed + if self.current_board_speed < self.max_board_allowed_speed: + pen = QPen(color_green) + brush = QBrush(color_green) + else: + pen = QPen(color_red) + brush = QBrush(color_red) + painter.setPen(pen) + painter.setBrush(brush) + speed_indicator = QRectF( + QPointF(0, 0), QSize(display_size.width(), int(display_size.height() * 0.04)) + ) + # Draw the rectangle for speed indication + painter.drawRect(speed_indicator) + + # Set the font according to the window size + font_size = max(10, display_size.height() / 40) + font = QFont("Arial", font_size) + painter.setFont(font) + position_text_speed = QPointF( + int(display_size.width() * 0.01), int(display_size.height() * 0.10) + ) + painter.drawText(position_text_speed, "Speed") + + # ToDo: define percentage to change skew and pct size to change to green + threshold_to_be_green = 0.3 + if self.skew_percentage < threshold_to_be_green: + pen_skew = QPen(color_red) + brush_skew = QBrush(color_red) + else: + pen_skew = QPen(color_green) + brush_skew = QBrush(color_green) + painter.setPen(pen_skew) + painter.setBrush(brush_skew) + + # Draw Skew text + position_text_skew = QPointF( + int(display_size.width() * 0.01), int(display_size.height() * 0.88) + ) + painter.drawText(position_text_skew, "Skew " + str(int(self.skew_percentage * 100)) + "%") + + skew_indicator = QRectF( + QPointF(int(display_size.width() * 0.12), int(display_size.height() * 0.85)), + QSize( + display_size.width() * 0.08 * self.skew_percentage, + int(display_size.height() * 0.03), + ), + ) + # Draw the skew progress bar + painter.drawRect(skew_indicator) + + # ToDo: define percentage to change skew and pct size to change to green + threshold_to_be_green = 0.2 + if self.board_size_percentage < threshold_to_be_green: + pen_size_board = QPen(color_red) + brush_size_board = QBrush(color_red) + else: + pen_size_board = QPen(color_green) + brush_size_board = QBrush(color_green) + + painter.setPen(pen_size_board) + painter.setBrush(brush_size_board) + + # Draw board size text + position_text_size = QPointF( + int(display_size.width() * 0.01), int(display_size.height() * 0.93) + ) + painter.drawText( + position_text_size, "Size " + str(int(self.board_size_percentage * 100)) + "%" + ) + board_size_indicator = QRectF( + QPointF(int(display_size.width() * 0.12), int(display_size.height() * 0.90)), + QSize( + display_size.width() * 0.08 * self.board_size_percentage, + int(display_size.height() * 0.03), + ), + ) + # Draw the board size progress bar + painter.drawRect(board_size_indicator) + + # Draw board pan tilt angle text + deg_sign = "\N{DEGREE SIGN}" + position_text_pan = QPointF( + int(display_size.width() * 0.01), int(display_size.height() * 0.98) + ) + painter.drawText( + position_text_pan, + "Pan " + str(int(self.pan)) + deg_sign + " Tilt " + str(int(self.tilt)) + deg_sign, + ) + + # Draw Linear errors text + position_text_err_rows = QPointF( + int(display_size.width() * 0.80), int(display_size.height() * 0.88) + ) + painter.drawText( + position_text_err_rows, + "ErrRows " + + str(round(self.rows_linear_error, 1)) + + "px " + + str(round(self.pct_err_rows * 100, 1)) + + "%", + ) + + position_text_err_cols = QPointF( + int(display_size.width() * 0.80), int(display_size.height() * 0.93) + ) + painter.drawText( + position_text_err_cols, + "ErrCols " + + str(round(self.cols_linear_error, 1)) + + "px " + + str(round(self.pct_err_cols * 100, 1)) + + "%", + ) + + # Draw the aspect ratio text indicator + position_text_aspect_ratio = QPointF( + int(display_size.width() * 0.80), int(display_size.height() * 0.98) + ) + painter.drawText(position_text_aspect_ratio, "AspectR " + str(round(self.aspect_ratio, 2))) + def pixmap(self) -> QPixmap: """Return the rendering QPixmap.""" return self.raw_pixmap @@ -441,6 +606,12 @@ def paint(self, painter: QPainter, option, widget): if self.is_draw_evaluation_heatmap and self.evaluation_heatmap is not None: self.draw_heatmap(painter, self.evaluation_heatmap, display_size) + if self.is_draw_linearity_heatmap and self.linearity_heatmap is not None: + self.draw_heatmap(painter, self.linearity_heatmap, display_size) + + if self.is_draw_indicators and self.current_board_speed is not None: + self.draw_indicators(painter, display_size) + def boundingRect(self): """Return the size of the Widget to let other widgets adjust correctly.""" if self.display_pixmap.size().width() == 0: diff --git a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/initialization_view.py b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/initialization_view.py index 450e80d0..92116079 100644 --- a/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/initialization_view.py +++ b/calibrators/intrinsic_camera_calibrator/intrinsic_camera_calibrator/intrinsic_camera_calibrator/views/initialization_view.py @@ -136,6 +136,9 @@ def load_intrinsics_button_callback(): ) self.initial_intrinsics = load_intrinsics(intrinsics_path) self.evaluation_radio_button.setEnabled(True) + # self.training_radio_button.setChecked(False) + self.training_radio_button.setEnabled(False) + self.evaluation_radio_button.setChecked(True) self.load_intrinsics_button.clicked.connect(load_intrinsics_button_callback)