From b207d599689d6de4ce7799ad8a28212d9607b6ec Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 20:15:31 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../classification/procedures/classification_procedures.py | 1 - .../classification/procedures/deprec/procedures.py | 1 - .../data/classification/imagenet/tools/extract_categories.py | 1 - neodroidvision/data/classification/nlet/pair_dataset.py | 3 ++- neodroidvision/data/detection/voc/voc_evaluation.py | 1 - .../neodroid_environments/neodroid_camera_observation.py | 1 - neodroidvision/data/segmentation/clouds.py | 1 - .../data/synthesis/conversion/mnist/convert_mnist_to_png.py | 2 +- .../data/synthesis/conversion/mnist/threed/to_nrrd.py | 1 - .../data/synthesis/conversion/mnist/threed/voxel_grid.py | 1 - .../single_stage/ssd/architecture/backbones/efficient_net.py | 1 - .../ssd/architecture/nms_box_heads/box_predictor.py | 2 +- .../ssd/architecture/nms_box_heads/ssd_box_head.py | 2 +- neodroidvision/detection/single_stage/ssd/multi_box_loss.py | 3 ++- .../detection/two_stage/mask_rcnn/maskrcnn_engine.py | 2 -- .../vae/architectures/disentangled/conditional_vae.py | 1 - .../regression/vae/architectures/flow/architectures.py | 3 ++- neodroidvision/regression/vae/architectures/flow/vae_flow.py | 3 ++- .../opencv_utilities/calibrators/hough_circle_calibration.py | 5 ----- .../opencv_utilities/calibrators/hough_line_calibration.py | 3 --- .../utilities/opencv_utilities/images/add_watermark.py | 2 -- .../output_activation/ops/non_maximum_suppression.py | 2 -- .../utilities/torch_utilities/patches/ndim/patching.py | 1 - .../utilities/visualisation/bounding_box_visualisation.py | 1 - neodroidvision/utilities/visualisation/plot_kernel.py | 3 ++- samples/classification/mnist_retrain.py | 1 - samples/classification/pair_siamese_training.py | 1 - samples/classification/ram/ram_train.py | 1 - samples/classification/ram/tests/stest_model.py | 1 - .../detection/two_stage/maskrcnn_neodroid_pose_dectection.py | 2 +- samples/misc/data/synthesis/mnist/mnist_dect_vis.py | 1 - .../kivy_demo_app/opencv_face_tracking.py | 2 +- .../graphical_interfaces/pygame_demo_app/face_detection.py | 4 +--- .../autocapture/dlib_hog_examples/draw_regions.py | 4 ++-- .../autocapture/dlib_hog_examples/landmark_demo.py | 4 ++-- samples/misc/opencv_samples/color_quantisation.py | 1 - samples/misc/opencv_samples/opencv_checkerboard.py | 1 - samples/regression/ae/mnist/mae.py | 2 -- samples/regression/ae/mnist/unet_mnist.py | 1 - samples/regression/gan/dual_gan.py | 1 - samples/regression/vae/beta/train_bvae.py | 1 - samples/regression/vae/conditional/train_cvae.py | 2 -- samples/regression/vae/flow/sample_generator.py | 1 - samples/regression/vae/flow/train_flow_vae.py | 1 - samples/segmentation/misc/estimate_gmm_sklearn.py | 3 --- samples/segmentation/penn_fudan/seg_traced_export.py | 1 - 46 files changed, 20 insertions(+), 63 deletions(-) diff --git a/neodroidvision/classification/procedures/classification_procedures.py b/neodroidvision/classification/procedures/classification_procedures.py index b4b9bce1..6dbbdd45 100644 --- a/neodroidvision/classification/procedures/classification_procedures.py +++ b/neodroidvision/classification/procedures/classification_procedures.py @@ -180,7 +180,6 @@ def pred_target_train_model( for phase in [SplitEnum.training, SplitEnum.validation]: if phase == SplitEnum.training: with TorchTrainSession(model): - input, true_label = zip(*next(train_iterator)) rgb_imgs = torch_vision_normalize_batch_nchw( diff --git a/neodroidvision/classification/procedures/deprec/procedures.py b/neodroidvision/classification/procedures/deprec/procedures.py index 118304bb..58cb37af 100644 --- a/neodroidvision/classification/procedures/deprec/procedures.py +++ b/neodroidvision/classification/procedures/deprec/procedures.py @@ -177,7 +177,6 @@ def predictor_response_train_model_neodroid_observations( for phase in [SplitEnum.training, SplitEnum.validation]: if phase == SplitEnum.training: with TorchTrainSession(model): - input, true_label = zip(*next(train_iterator)) rgb_imgs = torch_vision_normalize_batch_nchw( diff --git a/neodroidvision/data/classification/imagenet/tools/extract_categories.py b/neodroidvision/data/classification/imagenet/tools/extract_categories.py index 67882ca6..42635cbb 100644 --- a/neodroidvision/data/classification/imagenet/tools/extract_categories.py +++ b/neodroidvision/data/classification/imagenet/tools/extract_categories.py @@ -11,7 +11,6 @@ from pathlib import Path if __name__ == "__main__": - with open(str(Path.home() / "Downloads" / "imagenet_class_index.json")) as f: with open("../imagenet_2012_names.py", "w") as sfn: with open("../imagenet_2012_id.py", "w") as sfi: diff --git a/neodroidvision/data/classification/nlet/pair_dataset.py b/neodroidvision/data/classification/nlet/pair_dataset.py index 3ece7f20..8ca94057 100644 --- a/neodroidvision/data/classification/nlet/pair_dataset.py +++ b/neodroidvision/data/classification/nlet/pair_dataset.py @@ -29,7 +29,8 @@ class PairDataset( ): # TODO: Extract image specificity of class to a subclass and move this super pair class to a # general torch lib. """ - # This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair""" + # This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair + """ @passes_kws_to(DictImageFolder.__init__) @drop_unused_kws diff --git a/neodroidvision/data/detection/voc/voc_evaluation.py b/neodroidvision/data/detection/voc/voc_evaluation.py index 97918067..3206b293 100644 --- a/neodroidvision/data/detection/voc/voc_evaluation.py +++ b/neodroidvision/data/detection/voc/voc_evaluation.py @@ -235,7 +235,6 @@ def calc_detection_voc_prec_rec( ) in six.moves.zip( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difficults ): - if gt_difficult is None: gt_difficult = numpy.zeros(gt_bbox.shape[0], dtype=bool) diff --git a/neodroidvision/data/neodroid_environments/neodroid_camera_observation.py b/neodroidvision/data/neodroid_environments/neodroid_camera_observation.py index 2cc05b13..1ecb5724 100644 --- a/neodroidvision/data/neodroid_environments/neodroid_camera_observation.py +++ b/neodroidvision/data/neodroid_environments/neodroid_camera_observation.py @@ -75,7 +75,6 @@ def __getitem__(self, index): if __name__ == "__main__": - import torch from matplotlib import pyplot diff --git a/neodroidvision/data/segmentation/clouds.py b/neodroidvision/data/segmentation/clouds.py index 5a85aa15..3d7aacc9 100644 --- a/neodroidvision/data/segmentation/clouds.py +++ b/neodroidvision/data/segmentation/clouds.py @@ -83,7 +83,6 @@ def __init__( N_FOLDS=10, SEED=246232, ): - self.transp = transp if subset != subset.testing: diff --git a/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py b/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py index 2ea0ad81..11cebe6a 100644 --- a/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py +++ b/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py @@ -75,7 +75,7 @@ def write_dataset(labels, data, size, rows, cols, output_dir) -> None: os.makedirs(dir) import png # pip install pypng - for (i, label) in enumerate(labels): + for i, label in enumerate(labels): output_filename = output_dirs[label] / f"{str(i)}.png" print(f"writing {output_filename}") with open(output_filename, "wb") as h: diff --git a/neodroidvision/data/synthesis/conversion/mnist/threed/to_nrrd.py b/neodroidvision/data/synthesis/conversion/mnist/threed/to_nrrd.py index 433a17b5..440e05ac 100644 --- a/neodroidvision/data/synthesis/conversion/mnist/threed/to_nrrd.py +++ b/neodroidvision/data/synthesis/conversion/mnist/threed/to_nrrd.py @@ -53,7 +53,6 @@ def save_dataset(X, y, voxel, output, shape=(28, 28)): if __name__ == "__main__": - with gzip.open(PROJECT_APP_PATH.user_data / "mnist.pkl.gz", "rb") as f: train_set, valid_set, test_set = pickle.load(f, encoding="iso-8859-1") diff --git a/neodroidvision/data/synthesis/conversion/mnist/threed/voxel_grid.py b/neodroidvision/data/synthesis/conversion/mnist/threed/voxel_grid.py index 9cb0b35e..9feeca57 100644 --- a/neodroidvision/data/synthesis/conversion/mnist/threed/voxel_grid.py +++ b/neodroidvision/data/synthesis/conversion/mnist/threed/voxel_grid.py @@ -118,7 +118,6 @@ def plot(self, d=2, cmap="Oranges", show_axis: bool = False): """ if d == 2: - fig, axes = pyplot.subplots( int(numpy.ceil(self.n_z / 4)), 4, figsize=(8, 8) ) diff --git a/neodroidvision/detection/single_stage/ssd/architecture/backbones/efficient_net.py b/neodroidvision/detection/single_stage/ssd/architecture/backbones/efficient_net.py index 31e03314..df43127d 100644 --- a/neodroidvision/detection/single_stage/ssd/architecture/backbones/efficient_net.py +++ b/neodroidvision/detection/single_stage/ssd/architecture/backbones/efficient_net.py @@ -104,7 +104,6 @@ def __init__(self, size, model_name, blocks_args=None, global_params=None): # Build blocks self._blocks = nn.ModuleList([]) for block_args in self._blocks_args: - # Update block input and output filters based on depth multiplier. block_args = block_args._replace( input_filters=round_filters( diff --git a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py index 52bd72ab..a51f1581 100644 --- a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py +++ b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py @@ -28,7 +28,7 @@ def __init__(self, boxes_per_location, out_channels, num_categories): self.cls_headers = nn.ModuleList() self.reg_headers = nn.ModuleList() - for (level_i, (num_boxes, num_channels)) in enumerate( + for level_i, (num_boxes, num_channels) in enumerate( zip(boxes_per_location, self.out_channels) ): self.cls_headers.append( diff --git a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py index e9885cd5..362d2b53 100644 --- a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py +++ b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py @@ -118,7 +118,7 @@ def forward(self, features: torch.Tensor) -> SSDOut: categori_logits, bbox_pred = self.predictor(features) results = [] - for (scores, boxes) in zip( + for scores, boxes in zip( functional.log_softmax( categori_logits, dim=-1 ), # TODO:Check dim maybe it should be 1 diff --git a/neodroidvision/detection/single_stage/ssd/multi_box_loss.py b/neodroidvision/detection/single_stage/ssd/multi_box_loss.py index 9af286b1..4affd90f 100644 --- a/neodroidvision/detection/single_stage/ssd/multi_box_loss.py +++ b/neodroidvision/detection/single_stage/ssd/multi_box_loss.py @@ -46,7 +46,8 @@ def forward( confidence (batch_size, num_priors, num_categories): class predictions. predicted_locations (batch_size, num_priors, 4): predicted locations. labels (batch_size, num_priors): real labels of all the priors. - gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors.""" + gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors. + """ with torch.no_grad(): # derived from cross_entropy=sum(log(p)) diff --git a/neodroidvision/detection/two_stage/mask_rcnn/maskrcnn_engine.py b/neodroidvision/detection/two_stage/mask_rcnn/maskrcnn_engine.py index df61d69e..c9c0454a 100644 --- a/neodroidvision/detection/two_stage/mask_rcnn/maskrcnn_engine.py +++ b/neodroidvision/detection/two_stage/mask_rcnn/maskrcnn_engine.py @@ -50,7 +50,6 @@ def maskrcnn_train_single_epoch( :return:""" model.to(device) with TorchTrainSession(model): - for images, targets in progress_bar(data_loader, description="Batch #"): images = [img.to(device) for img in images] targets = [{k: v.to(device) for k, v in t.items()} for t in targets] @@ -111,7 +110,6 @@ def maskrcnn_evaluate( with torch.no_grad(): with TorchEvalSession(model): - for image, targets in progress_bar(data_loader): image = [img.to(device) for img in image] targets = [{k: v.to(device) for k, v in t.items()} for t in targets] diff --git a/neodroidvision/regression/vae/architectures/disentangled/conditional_vae.py b/neodroidvision/regression/vae/architectures/disentangled/conditional_vae.py index d97cc25f..1b8a782a 100644 --- a/neodroidvision/regression/vae/architectures/disentangled/conditional_vae.py +++ b/neodroidvision/regression/vae/architectures/disentangled/conditional_vae.py @@ -57,7 +57,6 @@ class Decoder(nn.Module): def __init__( self, layer_sizes: Sequence[int], latent_size: int, num_conditions: int ): - super().__init__() self.MLP = nn.Sequential() diff --git a/neodroidvision/regression/vae/architectures/flow/architectures.py b/neodroidvision/regression/vae/architectures/flow/architectures.py index 9cfbf18b..35539b5b 100644 --- a/neodroidvision/regression/vae/architectures/flow/architectures.py +++ b/neodroidvision/regression/vae/architectures/flow/architectures.py @@ -43,7 +43,8 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: class Generator(nn.Module): """ - Bernoulli model parameterized by a generative network with Gaussian latents for MNIST.""" + Bernoulli model parameterized by a generative network with Gaussian latents for MNIST. + """ def __init__(self, latent_size, data_size): super().__init__() diff --git a/neodroidvision/regression/vae/architectures/flow/vae_flow.py b/neodroidvision/regression/vae/architectures/flow/vae_flow.py index 6b0bb657..ed6ab2fb 100644 --- a/neodroidvision/regression/vae/architectures/flow/vae_flow.py +++ b/neodroidvision/regression/vae/architectures/flow/vae_flow.py @@ -106,7 +106,8 @@ class MADE(nn.Module): Follows https://arxiv.org/abs/1502.03509 - This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057).""" + This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057). + """ def __init__(self, num_input, num_output, num_hidden, num_context): super().__init__() diff --git a/neodroidvision/utilities/opencv_utilities/calibrators/hough_circle_calibration.py b/neodroidvision/utilities/opencv_utilities/calibrators/hough_circle_calibration.py index 631c57a3..c07d1ec3 100644 --- a/neodroidvision/utilities/opencv_utilities/calibrators/hough_circle_calibration.py +++ b/neodroidvision/utilities/opencv_utilities/calibrators/hough_circle_calibration.py @@ -104,13 +104,11 @@ def hough_circle_calibrator( hi = cv2.getTrackbarPos(hi_label, canny_frame_window_label) if lo != lo_prev or hi != hi_prev: # --------------------------= RE-SYNC - a_canny_refresh_flag = True # --------------------------= FLAG lo_prev = lo hi_prev = hi else: - a_canny_refresh_flag = False # --------------------------= Un-FLAG dp = cv2.getTrackbarPos(dp_label, canny_hough_circle_window_label) @@ -134,7 +132,6 @@ def hough_circle_calibrator( or min_radius != min_radius_prev or max_radius != max_radius_prev ): # ----------------------------------------------= RE-SYNC - a_hough_refresh_flag = True # --------------------------= FLAG dp_prev = dp @@ -144,7 +141,6 @@ def hough_circle_calibrator( min_radius_prev = min_radius max_radius_prev = max_radius else: - a_hough_refresh_flag = False # --------------------------= Un-FLAG if ( @@ -155,7 +151,6 @@ def hough_circle_calibrator( cv2.imshow(canny_frame_window_label, edges) if a_canny_refresh_flag or a_hough_refresh_flag: - circles = cv2.HoughCircles( edges, cv2.HOUGH_GRADIENT, diff --git a/neodroidvision/utilities/opencv_utilities/calibrators/hough_line_calibration.py b/neodroidvision/utilities/opencv_utilities/calibrators/hough_line_calibration.py index fd97f9b7..b1cc9e0b 100644 --- a/neodroidvision/utilities/opencv_utilities/calibrators/hough_line_calibration.py +++ b/neodroidvision/utilities/opencv_utilities/calibrators/hough_line_calibration.py @@ -139,7 +139,6 @@ def hough_line_calibrator( lo_prev = lo hi_prev = hi else: - a_canny_refresh_flag = False # --------------------------= Un-FLAG threshold = cv2.getTrackbarPos(threshold_label, canny_hough_lines_window_label) @@ -171,7 +170,6 @@ def hough_line_calibrator( or min_theta != min_theta_prev or max_theta != max_theta_prev ): # ----------------------------------------------= RE-SYNC - a_hough_refresh_flag = True # --------------------------= FLAG rho_prev = rho @@ -182,7 +180,6 @@ def hough_line_calibrator( min_theta_prev = min_theta max_theta_prev = max_theta else: - a_hough_refresh_flag = False # --------------------------= Un-FLAG if ( diff --git a/neodroidvision/utilities/opencv_utilities/images/add_watermark.py b/neodroidvision/utilities/opencv_utilities/images/add_watermark.py index e005bb36..0d793831 100644 --- a/neodroidvision/utilities/opencv_utilities/images/add_watermark.py +++ b/neodroidvision/utilities/opencv_utilities/images/add_watermark.py @@ -45,7 +45,6 @@ def __init__( rgb_weight=(0, 1, 1.5), input_frame_shape=None, ) -> None: - logo_image = cv2.imread(logo_path, cv2.IMREAD_UNCHANGED) h, w, c = logo_image.shape if angle % 360 != 0: @@ -77,7 +76,6 @@ def __init__( self.logo_image[:, :, 0] = self.logo_image[:, :, 0] * self.rgb_weight[2] if input_frame_shape is not None: - logo_w = input_frame_shape[1] * self.size ratio = logo_w / self.ori_shape[1] logo_h = int(ratio * self.ori_shape[0]) diff --git a/neodroidvision/utilities/torch_utilities/output_activation/ops/non_maximum_suppression.py b/neodroidvision/utilities/torch_utilities/output_activation/ops/non_maximum_suppression.py index 0dab7616..f2380482 100644 --- a/neodroidvision/utilities/torch_utilities/output_activation/ops/non_maximum_suppression.py +++ b/neodroidvision/utilities/torch_utilities/output_activation/ops/non_maximum_suppression.py @@ -20,10 +20,8 @@ if int(torchvision.__version__.split(".")[1]) >= int("0.3.0".split(".")[1]): nms_support = torchvision.ops.nms else: - print(f"torchvision version: {torchvision.__version__}" "\n nms not supported") try: - import ssd_torch_extension nms_support = ssd_torch_extension.nms # non_maximum_suppression diff --git a/neodroidvision/utilities/torch_utilities/patches/ndim/patching.py b/neodroidvision/utilities/torch_utilities/patches/ndim/patching.py index bc7279f2..f83e92c5 100644 --- a/neodroidvision/utilities/torch_utilities/patches/ndim/patching.py +++ b/neodroidvision/utilities/torch_utilities/patches/ndim/patching.py @@ -80,7 +80,6 @@ def suahd(): patch_size = 8 if show_2d: - from cv2 import circle from matplotlib import pyplot diff --git a/neodroidvision/utilities/visualisation/bounding_box_visualisation.py b/neodroidvision/utilities/visualisation/bounding_box_visualisation.py index 17c755e9..6de20ccb 100644 --- a/neodroidvision/utilities/visualisation/bounding_box_visualisation.py +++ b/neodroidvision/utilities/visualisation/bounding_box_visualisation.py @@ -344,7 +344,6 @@ def draw_bounding_box_on_image( text_left = left if top > total_display_str_height: - if label_inside: text_bottom = top + total_display_str_height else: diff --git a/neodroidvision/utilities/visualisation/plot_kernel.py b/neodroidvision/utilities/visualisation/plot_kernel.py index 52ef43b0..1ecaf878 100644 --- a/neodroidvision/utilities/visualisation/plot_kernel.py +++ b/neodroidvision/utilities/visualisation/plot_kernel.py @@ -57,7 +57,8 @@ def plot_kernels( number_cols: number of columns to be displayed m_interpolation: interpolation methods matplotlib. See in: - https://matplotlib.org/gallery/images_contours_and_fields/interpolation_methods.html""" + https://matplotlib.org/gallery/images_contours_and_fields/interpolation_methods.html + """ number_kernels = tensor.shape[0] number_rows = 1 + number_kernels // number_cols diff --git a/samples/classification/mnist_retrain.py b/samples/classification/mnist_retrain.py index d11ad293..421a098a 100644 --- a/samples/classification/mnist_retrain.py +++ b/samples/classification/mnist_retrain.py @@ -101,7 +101,6 @@ def predictor_response_train_model( for phase in [SplitEnum.training, SplitEnum.validation]: if phase == SplitEnum.training: with TorchTrainSession(model): - img, true_label = next(train_iterator) rgb_imgs = to_tensor( diff --git a/samples/classification/pair_siamese_training.py b/samples/classification/pair_siamese_training.py index 84ada09a..5b98757d 100644 --- a/samples/classification/pair_siamese_training.py +++ b/samples/classification/pair_siamese_training.py @@ -166,7 +166,6 @@ def train_siamese( train_loss = loss_contrastive.cpu().item() writer.scalar("train_loss", train_loss, batch_i) if batch_counter.__next__() % validation_interval == 0: - with TorchEvalSession(model): valid_loss = 0 valid_accuracy = [] diff --git a/samples/classification/ram/ram_train.py b/samples/classification/ram/ram_train.py index 4edb0b7d..c3160b19 100644 --- a/samples/classification/ram/ram_train.py +++ b/samples/classification/ram/ram_train.py @@ -175,7 +175,6 @@ def train(self, *, writer): ) for epoch in range(self.start_epoch, self.epochs): - print( f"\nEpoch: {epoch + 1}/{self.epochs} - LR: {self.optimiser.param_groups[0]['lr']:.6f}" ) diff --git a/samples/classification/ram/tests/stest_model.py b/samples/classification/ram/tests/stest_model.py index adc13967..90bed66b 100644 --- a/samples/classification/ram/tests/stest_model.py +++ b/samples/classification/ram/tests/stest_model.py @@ -18,7 +18,6 @@ import torch if __name__ == "__main__": - config = get_ram_config() # load images diff --git a/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py b/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py index 4884ccfe..bf5e6c33 100644 --- a/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py +++ b/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py @@ -74,7 +74,7 @@ def show_preds(img, pred): drawdot = lambda x, y, r=3, fill="red": draw.ellipse( (x - r, y - r, x + r, y + r), fill=fill ) - for (box, kpts) in pred: + for box, kpts in pred: for kpt in kpts: if kpt[2] == 1: drawdot(kpt[0], kpt[1]) diff --git a/samples/misc/data/synthesis/mnist/mnist_dect_vis.py b/samples/misc/data/synthesis/mnist/mnist_dect_vis.py index 343dfbd1..a8ce3725 100644 --- a/samples/misc/data/synthesis/mnist/mnist_dect_vis.py +++ b/samples/misc/data/synthesis/mnist/mnist_dect_vis.py @@ -38,7 +38,6 @@ def read_labels(label_path: pathlib.Path) -> Tuple[ndarray, ndarray]: if __name__ == "__main__": - from draugr.opencv_utilities import draw_bounding_boxes base_path = pathlib.Path( diff --git a/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py b/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py index 109fa1af..e750870d 100644 --- a/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py +++ b/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py @@ -147,7 +147,7 @@ def update(self, dt): gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) ) - for (x, y, w, h) in faces: + for x, y, w, h in faces: cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2) except Exception as e: print(e) diff --git a/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py b/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py index fc8b0664..3619efd4 100644 --- a/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py +++ b/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py @@ -72,13 +72,12 @@ def draw_from_points(cv_image, points): Returns a cv_image.""" cv_image = numpy.ascontiguousarray(cv_image, dtype=numpy.uint8) for f in points: - for (x, y, w, h) in f: + for x, y, w, h in f: cv2.rectangle(cv_image, (x, y), (x + w, y + h), 255) return cv_image if __name__ == "__main__": - # Set game screen screen = pygame.display.set_mode(SCREEN) @@ -90,7 +89,6 @@ def draw_from_points(cv_image, points): cam.start() while 1: # Ze loop - time.sleep(1 / 120) # 60 frames per second image = cam.get_image() # Get current webcam image diff --git a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py index f72c6b0c..b8933967 100644 --- a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py +++ b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py @@ -92,7 +92,7 @@ def asijdas(): for image in AsyncVideoStream(): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - for (i, rect) in enumerate(detector(gray, upsample)): + for i, rect in enumerate(detector(gray, upsample)): # determine the facial landmarks for the face region, then # convert the landmark (x, y)-coordinates to a NumPy array shape = shape_to_ndarray(predictor(gray, rect)) @@ -119,7 +119,7 @@ def asijdas(): # loop over the subset of facial landmarks, drawing the # specific face part - for (x, y) in shape[i:j]: + for x, y in shape[i:j]: cv2.circle(clone, (x, y), 1, (0, 0, 255), -1) if False: diff --git a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py index abf358a1..67ddf585 100644 --- a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py +++ b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py @@ -25,13 +25,13 @@ for image in AsyncVideoStream(): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - for (i, rect) in enumerate(detector(gray, upsample_num_times)): + for i, rect in enumerate(detector(gray, upsample_num_times)): # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array # loop over the (x, y)-coordinates for the facial landmarks # and draw them on the image - for (x, y) in shape_to_ndarray(predictor(gray, rect)): + for x, y in shape_to_ndarray(predictor(gray, rect)): cv2.circle(image, (x, y), 2, (0, 255, 0), -1) if show_image( diff --git a/samples/misc/opencv_samples/color_quantisation.py b/samples/misc/opencv_samples/color_quantisation.py index 1eed61e2..10b1cfd2 100644 --- a/samples/misc/opencv_samples/color_quantisation.py +++ b/samples/misc/opencv_samples/color_quantisation.py @@ -3,7 +3,6 @@ from draugr.opencv_utilities import show_image if __name__ == "__main__": - img = cv2.imread("home.jpg") K = 2 if img is not None: diff --git a/samples/misc/opencv_samples/opencv_checkerboard.py b/samples/misc/opencv_samples/opencv_checkerboard.py index 680d3771..bf7bcc98 100644 --- a/samples/misc/opencv_samples/opencv_checkerboard.py +++ b/samples/misc/opencv_samples/opencv_checkerboard.py @@ -200,7 +200,6 @@ def load_and_draw(): camera_mtx, dist_coef, _, _ = [X[i] for i in save_keys] for img in images: - gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) intrsc_found, intersections = cv2.findChessboardCorners( gray_img, intersections_shape, None diff --git a/samples/regression/ae/mnist/mae.py b/samples/regression/ae/mnist/mae.py index dff077cd..e9e0ee75 100644 --- a/samples/regression/ae/mnist/mae.py +++ b/samples/regression/ae/mnist/mae.py @@ -84,7 +84,6 @@ def training( for update_i in sess: for phase in [SplitEnum.training, SplitEnum.validation]: if phase == SplitEnum.training: - for param_group in optimiser.param_groups: writer.scalar("lr", param_group["lr"], update_i) @@ -96,7 +95,6 @@ def training( optimiser.zero_grad() with torch.set_grad_enabled(phase == SplitEnum.training): - model_input = masker(rgb_imgs) recon_pred, *_ = model(torch.clamp(model_input, 0.0, 1.0)) diff --git a/samples/regression/ae/mnist/unet_mnist.py b/samples/regression/ae/mnist/unet_mnist.py index 49fcd298..67fedf58 100644 --- a/samples/regression/ae/mnist/unet_mnist.py +++ b/samples/regression/ae/mnist/unet_mnist.py @@ -88,7 +88,6 @@ def training( for update_i in sess: for phase in [SplitEnum.training, SplitEnum.validation]: if phase == SplitEnum.training: - for param_group in optimiser.param_groups: writer.scalar("lr", param_group["lr"], update_i) diff --git a/samples/regression/gan/dual_gan.py b/samples/regression/gan/dual_gan.py index fb7c0469..037a3765 100644 --- a/samples/regression/gan/dual_gan.py +++ b/samples/regression/gan/dual_gan.py @@ -87,7 +87,6 @@ def main(): with TensorBoardPytorchWriter( PROJECT_APP_PATH.user_log / str(time.time()) ) as writer: - for it in range(1000000): for _ in range(n_critics): # Sample data diff --git a/samples/regression/vae/beta/train_bvae.py b/samples/regression/vae/beta/train_bvae.py index 3948f216..9b31c2bf 100644 --- a/samples/regression/vae/beta/train_bvae.py +++ b/samples/regression/vae/beta/train_bvae.py @@ -146,7 +146,6 @@ def stest_model( if __name__ == "__main__": - torch.manual_seed(82375329) LOWEST_L = inf diff --git a/samples/regression/vae/conditional/train_cvae.py b/samples/regression/vae/conditional/train_cvae.py index 200fe56a..dd0e4c18 100644 --- a/samples/regression/vae/conditional/train_cvae.py +++ b/samples/regression/vae/conditional/train_cvae.py @@ -46,7 +46,6 @@ def main(config, model, tmsp_path, patience=100): tracker_epoch = defaultdict(lambda: defaultdict(dict)) for iteration, (original, label) in progress_bar(enumerate(data_loader)): - original, label = ( original.to(global_torch_device()), label.to(global_torch_device()), @@ -135,7 +134,6 @@ def main(config, model, tmsp_path, patience=100): if __name__ == "__main__": - CONFIG = NOD() CONFIG.seed = 58329583 CONFIG.epochs = 1000 diff --git a/samples/regression/vae/flow/sample_generator.py b/samples/regression/vae/flow/sample_generator.py index f3c729cb..00b48696 100644 --- a/samples/regression/vae/flow/sample_generator.py +++ b/samples/regression/vae/flow/sample_generator.py @@ -38,7 +38,6 @@ def evaluate(generator, evaluation_data, device): if __name__ == "__main__": - TRAIN_DIR = PROJECT_APP_PATH.user_data / "vanilla_vae" / "train" if not TRAIN_DIR.exists(): diff --git a/samples/regression/vae/flow/train_flow_vae.py b/samples/regression/vae/flow/train_flow_vae.py index 20c79a26..1847c068 100644 --- a/samples/regression/vae/flow/train_flow_vae.py +++ b/samples/regression/vae/flow/train_flow_vae.py @@ -61,7 +61,6 @@ def evaluate( if __name__ == "__main__": - TRAIN_DIR = PROJECT_APP_PATH.user_data / "vanilla_vae" / "train" if not TRAIN_DIR.exists(): diff --git a/samples/segmentation/misc/estimate_gmm_sklearn.py b/samples/segmentation/misc/estimate_gmm_sklearn.py index 0a12709d..300add6a 100644 --- a/samples/segmentation/misc/estimate_gmm_sklearn.py +++ b/samples/segmentation/misc/estimate_gmm_sklearn.py @@ -13,11 +13,9 @@ from neodroidvision.segmentation.gmm import visualise_2D_gmm, visualise_3d_gmm if __name__ == "__main__": - N, D = 1000, 3 if D == 2: - means = numpy.array([[0.5, 0.0], [0, 0], [-0.5, -0.5], [-0.8, 0.3]]) covs = numpy.array( [ @@ -28,7 +26,6 @@ ] ) elif D == 3: - means = numpy.array( [[0.5, 0.0, 0.0], [0.0, 0.0, 0.0], [-0.5, -0.5, -0.5], [-0.8, 0.3, 0.4]] ) diff --git a/samples/segmentation/penn_fudan/seg_traced_export.py b/samples/segmentation/penn_fudan/seg_traced_export.py index b6810d9c..28cb65cd 100644 --- a/samples/segmentation/penn_fudan/seg_traced_export.py +++ b/samples/segmentation/penn_fudan/seg_traced_export.py @@ -41,7 +41,6 @@ def export_detection_model( with TorchDeviceSession(device=global_torch_device("cpu"), model=model): with TorchEvalSession(model): - seed_stack(SEED) # standard PyTorch mean-std input image normalization