From 1287c29e4340de0aac8d1e4d832d3f9ceaac55f0 Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:22:33 +0200 Subject: [PATCH 1/9] switch to ruff. --- .github/workflows/format-ubuntu.yml | 5 +++-- ruff.toml | 21 +++++++++++++++++++++ scripts/format/{black.sh => python.sh} | 13 +++++++------ 3 files changed, 31 insertions(+), 8 deletions(-) create mode 100644 ruff.toml rename scripts/format/{black.sh => python.sh} (50%) diff --git a/.github/workflows/format-ubuntu.yml b/.github/workflows/format-ubuntu.yml index e3bc1879..1410019b 100644 --- a/.github/workflows/format-ubuntu.yml +++ b/.github/workflows/format-ubuntu.yml @@ -37,8 +37,9 @@ jobs: exit 0 fi set +x -euo pipefail - sudo apt-get update && sudo apt-get install -y clang-format-14 black + sudo apt-get update && sudo apt-get install -y clang-format-14 + python -m pip install ruff==0.6.7 ./scripts/format/clang_format.sh - ./scripts/format/black.sh + ./scripts/format/python.sh git diff --name-only git diff --exit-code || (echo "Code formatting failed" && exit 1) diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 00000000..bb421f1d --- /dev/null +++ b/ruff.toml @@ -0,0 +1,21 @@ +line-length = 80 + +[lint] +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", +] +ignore = ["SIM117"] + +[lint.per-file-ignores] +"scripts/*.py" = ["E", "SIM", "UP", "B"] diff --git a/scripts/format/black.sh b/scripts/format/python.sh similarity index 50% rename from scripts/format/black.sh rename to scripts/format/python.sh index ea0566a5..7dd575c3 100755 --- a/scripts/format/black.sh +++ b/scripts/format/python.sh @@ -1,14 +1,14 @@ #!/usr/bin/env bash -# This script runs the black Python formatter on the whole repository. +# This script runs the ruff Python formatter on the whole repository. # Check version -version_string=$(black --version | sed -E 's/^.*(\d+\.\d+-.*).*$/\1/') -expected_version_string='21.12' +version_string=$(ruff --version | sed -E 's/^.*(\d+\.\d+-.*).*$/\1/') +expected_version_string='0.6.7' if [[ "$version_string" =~ "$expected_version_string" ]]; then - echo "black version '$version_string' matches '$expected_version_string'" + echo "ruff version '$version_string' matches '$expected_version_string'" else - echo "black version '$version_string' doesn't match '$expected_version_string'" + echo "ruff version '$version_string' doesn't match '$expected_version_string'" exit 1 fi @@ -22,4 +22,5 @@ num_files=$(echo $all_files | wc -w) echo "Formatting ${num_files} files" # shellcheck disable=SC2086 -black --line-length 80 ${all_files} +ruff format --config ${root_folder}/ruff.toml ${all_files} +ruff check --config ${root_folder}/ruff.toml ${all_files} From ae1895c1c325bdc9457a2b0e60d7153255465ccd Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:23:39 +0200 Subject: [PATCH 2/9] formatting. --- limap/__init__.py | 37 +++---- limap/base/__init__.py | 5 +- limap/base/align.py | 12 +- limap/base/unit_test.py | 4 +- limap/estimators/__init__.py | 1 + .../_pl_estimate_absolute_pose.py | 22 ++-- limap/features/__init__.py | 3 +- limap/features/extract_line_patches.py | 14 +-- limap/features/extractors.py | 14 +-- limap/features/models/base_model.py | 5 +- limap/features/models/s2dnet.py | 26 ++--- limap/features/models/vggnet.py | 2 +- limap/fitting/__init__.py | 1 + limap/fitting/fitting.py | 3 +- limap/line2d/DeepLSD/deeplsd.py | 2 + limap/line2d/GlueStick/extractor.py | 7 +- limap/line2d/GlueStick/matcher.py | 2 + limap/line2d/HAWPv3/hawp.py | 4 +- limap/line2d/L2D2/RAL_net_cov.py | 3 +- limap/line2d/L2D2/extractor.py | 7 +- limap/line2d/L2D2/matcher.py | 2 +- limap/line2d/LBD/extractor.py | 9 +- limap/line2d/LBD/matcher.py | 2 +- limap/line2d/LSD/lsd.py | 3 +- limap/line2d/LineTR/extractor.py | 10 +- limap/line2d/LineTR/line_attention.py | 3 +- limap/line2d/LineTR/line_process.py | 14 +-- limap/line2d/LineTR/line_transformer.py | 12 +- limap/line2d/LineTR/linetr_pipeline.py | 30 +++-- limap/line2d/LineTR/matcher.py | 4 +- limap/line2d/SOLD2/experiment.py | 15 ++- limap/line2d/SOLD2/misc/geometry_utils.py | 1 - limap/line2d/SOLD2/misc/train_utils.py | 1 + limap/line2d/SOLD2/misc/visualize_util.py | 7 +- limap/line2d/SOLD2/model/line_detection.py | 11 +- limap/line2d/SOLD2/model/line_detector.py | 9 +- limap/line2d/SOLD2/model/line_matcher.py | 13 ++- limap/line2d/SOLD2/model/line_matching.py | 4 +- limap/line2d/SOLD2/model/loss.py | 9 +- limap/line2d/SOLD2/model/metrics.py | 35 +++--- limap/line2d/SOLD2/model/model_util.py | 16 ++- limap/line2d/SOLD2/sold2.py | 10 +- limap/line2d/SOLD2/sold2_wrapper.py | 8 +- limap/line2d/SOLD2/train.py | 27 ++--- limap/line2d/TP_LSD/tp_lsd.py | 5 +- limap/line2d/__init__.py | 4 +- limap/line2d/base_detector.py | 13 +-- limap/line2d/base_matcher.py | 15 +-- limap/line2d/endpoints/extractor.py | 5 +- limap/line2d/endpoints/matcher.py | 3 +- limap/line2d/line_utils/merge_lines.py | 4 +- limap/merging/__init__.py | 1 + limap/merging/merging.py | 7 +- limap/optimize/__init__.py | 5 +- limap/optimize/extract_heatmaps_sold2.py | 4 +- .../optimize/extract_track_patches_s2dnet.py | 11 +- limap/optimize/functions.py | 10 +- limap/optimize/global_pl_association/solve.py | 2 +- .../hybrid_bundle_adjustment/solve.py | 3 +- limap/optimize/line_localization/__init__.py | 2 +- limap/optimize/line_localization/functions.py | 7 +- limap/optimize/line_localization/solve.py | 5 +- limap/optimize/line_refinement/__init__.py | 2 +- .../line_refinement/line_refinement.py | 21 ++-- limap/optimize/line_refinement/solve.py | 5 +- limap/point2d/__init__.py | 2 +- limap/point2d/superglue/superglue.py | 19 ++-- limap/point2d/superpoint/main.py | 12 +- limap/point2d/superpoint/superpoint.py | 1 + limap/pointsfm/__init__.py | 7 +- limap/pointsfm/bundler_reader.py | 18 +-- limap/pointsfm/colmap_reader.py | 10 +- limap/pointsfm/colmap_sfm.py | 20 ++-- limap/pointsfm/database.py | 16 ++- limap/pointsfm/functions.py | 4 +- limap/pointsfm/model_converter.py | 28 ++--- limap/pointsfm/read_write_model.py | 22 ++-- limap/pointsfm/visualsfm_reader.py | 13 ++- limap/runners/__init__.py | 2 +- limap/runners/functions.py | 23 ++-- limap/runners/functions_structures.py | 12 +- limap/runners/line_fitnmerge.py | 7 +- limap/runners/line_localization.py | 21 ++-- limap/runners/line_triangulation.py | 14 +-- limap/triangulation/__init__.py | 1 + limap/triangulation/triangulation.py | 2 - limap/undistortion/__init__.py | 1 + limap/undistortion/undistort.py | 5 +- limap/util/__init__.py | 4 +- limap/util/config.py | 10 +- limap/util/evaluation.py | 19 +--- limap/util/geometry.py | 10 +- limap/util/io.py | 104 ++++++++---------- limap/visualize/__init__.py | 6 +- limap/visualize/trackvis/__init__.py | 2 +- limap/visualize/trackvis/base.py | 23 +--- limap/visualize/trackvis/open3d.py | 5 +- limap/visualize/vis_bipartite.py | 26 ++--- limap/visualize/vis_lines.py | 9 +- limap/visualize/vis_matches.py | 2 +- limap/visualize/vis_utils.py | 34 ++---- limap/vplib/JLinkage/JLinkage.py | 4 +- limap/vplib/__init__.py | 1 + limap/vplib/base_vp_detector.py | 7 +- limap/vplib/progressivex/progressivex.py | 8 +- runners/7scenes/localization.py | 25 +++-- runners/7scenes/utils.py | 29 ++--- runners/__init__.py | 2 +- runners/bundler_triangulation.py | 8 +- runners/cambridge/localization.py | 28 +++-- runners/cambridge/utils.py | 37 ++++--- runners/colmap_triangulation.py | 8 +- runners/eth3d/ETH3D.py | 13 +-- runners/eth3d/fitnmerge.py | 6 +- runners/eth3d/loader.py | 6 +- runners/eth3d/triangulation.py | 6 +- runners/hypersim/Hypersim.py | 32 +++--- runners/hypersim/fitnmerge.py | 6 +- runners/hypersim/loader.py | 6 +- runners/hypersim/refine_sfm.py | 21 ++-- runners/hypersim/triangulation.py | 6 +- runners/inloc/localization.py | 22 ++-- runners/inloc/utils.py | 18 +-- runners/pointline_association.py | 33 +++--- runners/refinement.py | 10 +- runners/rome16k/Rome16K.py | 11 +- runners/rome16k/statistics.py | 10 +- runners/rome16k/triangulation.py | 10 +- runners/scannet/ScanNet.py | 17 +-- runners/scannet/fitnmerge.py | 8 +- runners/scannet/loader.py | 6 +- runners/scannet/triangulation.py | 8 +- runners/tests/line2d.py | 2 +- runners/tests/localization.py | 15 ++- runners/visualsfm_triangulation.py | 8 +- scripts/aachen_undistort.py | 7 +- scripts/convert_model.py | 3 +- scripts/eval_hypersim.py | 11 +- scripts/eval_tnt.py | 9 +- scripts/tnt_align.py | 4 +- scripts/tnt_colmap_runner.py | 2 +- 141 files changed, 741 insertions(+), 809 deletions(-) diff --git a/limap/__init__.py b/limap/__init__.py index ed8b9ee5..8ac38ec7 100644 --- a/limap/__init__.py +++ b/limap/__init__.py @@ -3,22 +3,21 @@ sys.path.append("build/limap/_limap") from _limap import * -from . import base -from . import point2d -from . import line2d -from . import vplib -from . import pointsfm -from . import undistortion - -from . import triangulation -from . import merging -from . import evaluation -from . import fitting -from . import util -from . import visualize -from . import structures - -from . import features -from . import optimize - -from . import runners +from . import ( + base, + evaluation, + features, + fitting, + line2d, + merging, + optimize, + point2d, + pointsfm, + runners, + structures, + triangulation, + undistortion, + util, + visualize, + vplib, +) diff --git a/limap/base/__init__.py b/limap/base/__init__.py index e78a44a9..58d5b958 100644 --- a/limap/base/__init__.py +++ b/limap/base/__init__.py @@ -1,6 +1,7 @@ from _limap._base import * -from .functions import * + from .align import * -from .unit_test import * from .depth_reader_base import * +from .functions import * from .p3d_reader_base import * +from .unit_test import * diff --git a/limap/base/align.py b/limap/base/align.py index c8c79ccf..ccfe747d 100644 --- a/limap/base/align.py +++ b/limap/base/align.py @@ -74,11 +74,13 @@ def align_imagecols_colmap( max_error=0.01, tmp_folder="tmp/model_convertion", ): - import os, shutil + import os + import subprocess + import numpy as np - from limap.pointsfm import convert_imagecols_to_colmap + import limap.util.io as limapio - import subprocess + from limap.pointsfm import convert_imagecols_to_colmap # assertion check assert imagecols_src.NumImages() == imagecols_dst.NumImages() @@ -102,7 +104,7 @@ def align_imagecols_colmap( for img_id in imagecols_src.get_img_ids(): imname = imagecols_src.image_name(img_id) pos = imagecols_dst.camview(img_id).pose.center() - f.write("{0} {1} {2} {3}\n".format(imname, pos[0], pos[1], pos[2])) + f.write(f"{imname} {pos[0]} {pos[1]} {pos[2]}\n") # call comlap model aligner transform_path = os.path.join(tmp_folder, "transform.txt") @@ -130,7 +132,7 @@ def align_imagecols_colmap( # read in transformation def read_trans(fname): - with open(fname, "r") as f: + with open(fname) as f: lines = f.readlines() mat = [] for idx in range(4): diff --git a/limap/base/unit_test.py b/limap/base/unit_test.py index 004f2f87..81235345 100644 --- a/limap/base/unit_test.py +++ b/limap/base/unit_test.py @@ -34,10 +34,10 @@ def report_error(imagecols_pred, imagecols): R_error = ( imagecols_pred.camimage(img_id).R() - imagecols.camimage(img_id).R() ) - R_error = np.sqrt(np.sum(R_error ** 2)) + R_error = np.sqrt(np.sum(R_error**2)) T_error = ( imagecols_pred.camimage(img_id).T() - imagecols.camimage(img_id).T() ) - T_error = np.sqrt(np.sum(T_error ** 2)) + T_error = np.sqrt(np.sum(T_error**2)) pose_errors.append(np.array([R_error, T_error])) print("pose_error: (R, T)", np.array(pose_errors).mean(0)) diff --git a/limap/estimators/__init__.py b/limap/estimators/__init__.py index 56e1cbb4..bd0abed8 100644 --- a/limap/estimators/__init__.py +++ b/limap/estimators/__init__.py @@ -1,2 +1,3 @@ from _limap._estimators import * + from .absolute_pose import * diff --git a/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py b/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py index b66e7ae7..272222d7 100644 --- a/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py +++ b/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py @@ -1,8 +1,9 @@ +import numpy as np from _limap import _ceresbase -import limap.optimize as _optimize -import limap.estimators as _estimators + import limap.base as _base -import numpy as np +import limap.estimators as _estimators +import limap.optimize as _optimize def _pl_estimate_absolute_pose( @@ -110,15 +111,12 @@ def _pl_estimate_absolute_pose( ransac_options.data_type_weights_ = np.array( [ransac_cfg["weight_point"], ransac_cfg["weight_line"]] ) - ransac_options.data_type_weights_ *= ( - np.array( - [ - ransac_options.squared_inlier_thresholds_[1], - ransac_options.squared_inlier_thresholds_[0], - ] - ) - / np.sum(ransac_options.squared_inlier_thresholds_) - ) + ransac_options.data_type_weights_ *= np.array( + [ + ransac_options.squared_inlier_thresholds_[1], + ransac_options.squared_inlier_thresholds_[0], + ] + ) / np.sum(ransac_options.squared_inlier_thresholds_) ransac_options.min_num_iterations_ = ransac_cfg["min_num_iterations"] ransac_options.final_least_squares_ = ransac_cfg["final_least_squares"] diff --git a/limap/features/__init__.py b/limap/features/__init__.py index f22c6467..5ff46e92 100644 --- a/limap/features/__init__.py +++ b/limap/features/__init__.py @@ -1,3 +1,4 @@ from _limap._features import * -from .extractors import * + from .extract_line_patches import * +from .extractors import * diff --git a/limap/features/extract_line_patches.py b/limap/features/extract_line_patches.py index 3787d100..53d21be5 100644 --- a/limap/features/extract_line_patches.py +++ b/limap/features/extract_line_patches.py @@ -1,5 +1,5 @@ -from _limap import _features import numpy as np +from _limap import _features def write_patch(fname, patch, dtype="float16"): @@ -17,7 +17,7 @@ def write_patch(fname, patch, dtype="float16"): def load_patch(fname, dtype="float16"): # return a PatchInfo_f object - patch_info_name = "PatchInfo_f{0}".format(dtype[-2:]) + patch_info_name = f"PatchInfo_f{dtype[-2:]}" with open(fname, "rb") as f: data = np.load(f, allow_pickle=True) patch = getattr(_features, patch_info_name)( @@ -28,7 +28,7 @@ def load_patch(fname, dtype="float16"): def get_extractor(cfg, channels): lpe_options = _features.LinePatchExtractorOptions(cfg) - patch_extractor_name = "LinePatchExtractor_f64_c{0}".format(channels) + patch_extractor_name = f"LinePatchExtractor_f64_c{channels}" extractor = getattr(_features, patch_extractor_name)(lpe_options) return extractor @@ -39,9 +39,7 @@ def extract_line_patch_oneimage(cfg, track, img_id, camview, feature): _features.PatchInfo_fx """ lpe_options = _features.LinePatchExtractorOptions(cfg) - patch_extractor_name = "LinePatchExtractor_f64_c{0}".format( - feature.shape[2] - ) + patch_extractor_name = f"LinePatchExtractor_f64_c{feature.shape[2]}" extractor = getattr(_features, patch_extractor_name)(lpe_options) patch = extractor.ExtractOneImage(track, img_id, camview, feature) return patch @@ -53,9 +51,7 @@ def extract_line_patches(cfg, track, p_camviews, p_features): list of _features.PatchInfo_fx """ lpe_options = _features.LinePatchExtractorOptions(cfg) - patch_extractor_name = "LinePatchExtractor_f64_c{0}".format( - p_features[0].shape[2] - ) + patch_extractor_name = f"LinePatchExtractor_f64_c{p_features[0].shape[2]}" extractor = getattr(_features, patch_extractor_name)(lpe_options) patches = extractor.Extract(track, p_camviews, p_features) return patches diff --git a/limap/features/extractors.py b/limap/features/extractors.py index 128fec7f..2323a0aa 100644 --- a/limap/features/extractors.py +++ b/limap/features/extractors.py @@ -1,17 +1,17 @@ # [NOTE] modified from the pixel-perfect-sfm project -import os, sys -import torch +import sys +import time + import numpy as np import PIL - -from .models.s2dnet import * -from .models.vggnet import VGGNet +import torch import torchvision.transforms.functional as tvf +from _limap import _features from torchvision import transforms -import time -from _limap import _features +from .models.s2dnet import * +from .models.vggnet import VGGNet RGB_mean = [0.485, 0.456, 0.406] RGB_std = [0.229, 0.224, 0.225] diff --git a/limap/features/models/base_model.py b/limap/features/models/base_model.py index f50469b9..c342d6c0 100644 --- a/limap/features/models/base_model.py +++ b/limap/features/models/base_model.py @@ -4,9 +4,10 @@ """ from abc import ABCMeta, abstractmethod +from copy import copy + from omegaconf import OmegaConf from torch import nn -from copy import copy class BaseModel(nn.Module, metaclass=ABCMeta): @@ -77,7 +78,7 @@ def freeze_bn(module): def forward(self, data): """Check the data and call the _forward method of the child model.""" for key in self.required_data_keys: - assert key in data, "Missing key {} in data".format(key) + assert key in data, f"Missing key {key} in data" return self._forward(data) @abstractmethod diff --git a/limap/features/models/s2dnet.py b/limap/features/models/s2dnet.py index 6bc26f95..3fd0cf0b 100644 --- a/limap/features/models/s2dnet.py +++ b/limap/features/models/s2dnet.py @@ -1,22 +1,14 @@ +import logging +import os +from pathlib import Path from typing import List -import numpy + +import numpy as np import torch import torch.nn as nn from torchvision import models -from pathlib import Path -import logging -from PIL import Image from .base_model import BaseModel -import os, sys -from torchvision import transforms -import numpy as np -import torch.nn.functional as F - -import argparse -import h5py - -from time import time type_dict = { "uint8_t": torch.cuda.ByteTensor, @@ -66,7 +58,7 @@ def print_gpu_memory(): a = torch.cuda.memory_allocated(0) f = r - a # free inside reserved - print(np.array([t, r, a, f]) / 2 ** 30) + print(np.array([t, r, a, f]) / 2**30) class AdapLayers(nn.Module): @@ -90,12 +82,12 @@ def __init__(self, hypercolumn_layers: List[str], output_dim: int = 128): nn.BatchNorm2d(output_dim), ) self.layers.append(layer) - self.add_module("adap_layer_{}".format(i), layer) + self.add_module(f"adap_layer_{i}", layer) def forward(self, features: List[torch.tensor]): """Apply adaptation layers.""" for i, _ in enumerate(features): - features[i] = getattr(self, "adap_layer_{}".format(i))(features[i]) + features[i] = getattr(self, f"adap_layer_{i}")(features[i]) return features @@ -130,7 +122,7 @@ def _init(self, conf): if isinstance(layer, torch.nn.MaxPool2d): current_scale += 1 if i in self.hypercolumn_indices: - self.scales.append(2 ** current_scale) + self.scales.append(2**current_scale) self.adaptation_layers = AdapLayers( conf.hypercolumn_layers, conf.output_dim diff --git a/limap/features/models/vggnet.py b/limap/features/models/vggnet.py index 35b58563..f4ab7134 100644 --- a/limap/features/models/vggnet.py +++ b/limap/features/models/vggnet.py @@ -31,7 +31,7 @@ def _init(self, conf=default_conf): if isinstance(layer, torch.nn.MaxPool2d): current_scale += 1 if i in self.hypercolumn_indices: - self.scales.append(2 ** current_scale) + self.scales.append(2**current_scale) def _forward(self, data): image = data # data['image'] diff --git a/limap/fitting/__init__.py b/limap/fitting/__init__.py index c17450d1..0e9fa0d6 100644 --- a/limap/fitting/__init__.py +++ b/limap/fitting/__init__.py @@ -1,2 +1,3 @@ from _limap._fitting import * + from .fitting import * diff --git a/limap/fitting/fitting.py b/limap/fitting/fitting.py index c13b72ee..95c38944 100644 --- a/limap/fitting/fitting.py +++ b/limap/fitting/fitting.py @@ -1,6 +1,5 @@ -from _limap import _base, _estimators, _fitting -import os import numpy as np +from _limap import _estimators, _fitting from bresenham import bresenham from hloc.localize_inloc import interpolate_scan diff --git a/limap/line2d/DeepLSD/deeplsd.py b/limap/line2d/DeepLSD/deeplsd.py index 71eaebae..7836e3d3 100644 --- a/limap/line2d/DeepLSD/deeplsd.py +++ b/limap/line2d/DeepLSD/deeplsd.py @@ -1,7 +1,9 @@ import os + import numpy as np import torch from deeplsd.models.deeplsd_inference import DeepLSD + from ..base_detector import BaseDetector, BaseDetectorOptions diff --git a/limap/line2d/GlueStick/extractor.py b/limap/line2d/GlueStick/extractor.py index 1240d5b1..375cb6f4 100644 --- a/limap/line2d/GlueStick/extractor.py +++ b/limap/line2d/GlueStick/extractor.py @@ -1,12 +1,13 @@ import os + import numpy as np -from sklearn.cluster import DBSCAN import torch -from omegaconf import OmegaConf from gluestick.models.wireframe import lines_to_wireframe +from omegaconf import OmegaConf import limap.util.io as limapio from limap.point2d.superpoint.superpoint import SuperPoint, sample_descriptors + from ..base_detector import BaseDetector, BaseDetectorOptions @@ -28,7 +29,7 @@ def get_module_name(self): return "wireframe" def get_descinfo_fname(self, descinfo_folder, img_id): - fname = os.path.join(descinfo_folder, "descinfo_{0}.npz".format(img_id)) + fname = os.path.join(descinfo_folder, f"descinfo_{img_id}.npz") return fname def save_descinfo(self, descinfo_folder, img_id, descinfo): diff --git a/limap/line2d/GlueStick/matcher.py b/limap/line2d/GlueStick/matcher.py index e0ba1f9b..c0ff5a62 100644 --- a/limap/line2d/GlueStick/matcher.py +++ b/limap/line2d/GlueStick/matcher.py @@ -1,7 +1,9 @@ import os + import numpy as np import torch from gluestick.models.gluestick import GlueStick + from ..base_matcher import BaseMatcher, BaseMatcherOptions diff --git a/limap/line2d/HAWPv3/hawp.py b/limap/line2d/HAWPv3/hawp.py index 77c03554..2e73aed9 100644 --- a/limap/line2d/HAWPv3/hawp.py +++ b/limap/line2d/HAWPv3/hawp.py @@ -1,13 +1,13 @@ import os -from ..base_detector import BaseDetector, BaseDetectorOptions import cv2 import numpy as np import torch - from hawp.fsl.config import cfg as model_config from hawp.ssl.models import MODELS +from ..base_detector import BaseDetector, BaseDetectorOptions + class HAWPv3Detector(BaseDetector): def __init__(self, options=BaseDetectorOptions()): diff --git a/limap/line2d/L2D2/RAL_net_cov.py b/limap/line2d/L2D2/RAL_net_cov.py index 36466aa8..a4d8fa9e 100644 --- a/limap/line2d/L2D2/RAL_net_cov.py +++ b/limap/line2d/L2D2/RAL_net_cov.py @@ -1,7 +1,6 @@ -from __future__ import division, print_function import torch -import torch.nn.init import torch.nn as nn +import torch.nn.init class L2Norm(nn.Module): diff --git a/limap/line2d/L2D2/extractor.py b/limap/line2d/L2D2/extractor.py index 7c3d5c27..78bceaa6 100644 --- a/limap/line2d/L2D2/extractor.py +++ b/limap/line2d/L2D2/extractor.py @@ -1,8 +1,11 @@ import os -import numpy as np + import cv2 +import numpy as np import torch + import limap.util.io as limapio + from ..base_detector import BaseDetector, BaseDetectorOptions @@ -44,7 +47,7 @@ def get_module_name(self): return "l2d2" def get_descinfo_fname(self, descinfo_folder, img_id): - fname = os.path.join(descinfo_folder, "descinfo_{0}.npz".format(img_id)) + fname = os.path.join(descinfo_folder, f"descinfo_{img_id}.npz") return fname def save_descinfo(self, descinfo_folder, img_id, descinfo): diff --git a/limap/line2d/L2D2/matcher.py b/limap/line2d/L2D2/matcher.py index 7a0f4ab0..a246f52e 100644 --- a/limap/line2d/L2D2/matcher.py +++ b/limap/line2d/L2D2/matcher.py @@ -1,5 +1,5 @@ -import os import numpy as np + from ..base_matcher import BaseMatcher, BaseMatcherOptions diff --git a/limap/line2d/LBD/extractor.py b/limap/line2d/LBD/extractor.py index 08bd28ee..ac044a96 100644 --- a/limap/line2d/LBD/extractor.py +++ b/limap/line2d/LBD/extractor.py @@ -1,9 +1,12 @@ import os -import numpy as np + import cv2 -import pytlsd +import numpy as np import pytlbd +import pytlsd + import limap.util.io as limapio + from ..base_detector import BaseDetector, BaseDetectorOptions @@ -60,7 +63,7 @@ def get_module_name(self): return "lbd" def get_descinfo_fname(self, descinfo_folder, img_id): - fname = os.path.join(descinfo_folder, "descinfo_{0}.npz".format(img_id)) + fname = os.path.join(descinfo_folder, f"descinfo_{img_id}.npz") return fname def save_descinfo(self, descinfo_folder, img_id, descinfo): diff --git a/limap/line2d/LBD/matcher.py b/limap/line2d/LBD/matcher.py index 9b359721..9671908e 100644 --- a/limap/line2d/LBD/matcher.py +++ b/limap/line2d/LBD/matcher.py @@ -1,6 +1,6 @@ -import os import numpy as np import pytlbd + from ..base_matcher import BaseMatcher, BaseMatcherOptions diff --git a/limap/line2d/LSD/lsd.py b/limap/line2d/LSD/lsd.py index 457666cb..9cbfe509 100644 --- a/limap/line2d/LSD/lsd.py +++ b/limap/line2d/LSD/lsd.py @@ -1,6 +1,5 @@ -import os import pytlsd -import numpy as np + from ..base_detector import BaseDetector, BaseDetectorOptions diff --git a/limap/line2d/LineTR/extractor.py b/limap/line2d/LineTR/extractor.py index 1a2d9a6e..60674796 100644 --- a/limap/line2d/LineTR/extractor.py +++ b/limap/line2d/LineTR/extractor.py @@ -1,12 +1,14 @@ import os -import numpy as np + import cv2 +import numpy as np import torch import limap.util.io as limapio -from limap.point2d.superpoint.superpoint import SuperPoint, sample_descriptors -from .line_transformer import LineTransformer +from limap.point2d.superpoint.superpoint import SuperPoint + from ..base_detector import BaseDetector, BaseDetectorOptions +from .line_transformer import LineTransformer class LineTRExtractor(BaseDetector): @@ -24,7 +26,7 @@ def get_module_name(self): return "linetr" def get_descinfo_fname(self, descinfo_folder, img_id): - fname = os.path.join(descinfo_folder, "descinfo_{0}.npz".format(img_id)) + fname = os.path.join(descinfo_folder, f"descinfo_{img_id}.npz") return fname def save_descinfo(self, descinfo_folder, img_id, descinfo): diff --git a/limap/line2d/LineTR/line_attention.py b/limap/line2d/LineTR/line_attention.py index 786a96ba..86732281 100755 --- a/limap/line2d/LineTR/line_attention.py +++ b/limap/line2d/LineTR/line_attention.py @@ -1,4 +1,3 @@ -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F @@ -38,7 +37,7 @@ def __init__(self, n_heads: int, d_feature: int, dropout=0.1): self.w_vs = nn.Linear(d_feature, n_heads * dim, bias=True) self.fc = nn.Linear(n_heads * dim, d_feature, bias=True) - self.attention = ScaledDotProduct(scale=dim ** 0.5) + self.attention = ScaledDotProduct(scale=dim**0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_feature, eps=1e-6) diff --git a/limap/line2d/LineTR/line_process.py b/limap/line2d/LineTR/line_process.py index 8298fbc6..3f0bc8af 100755 --- a/limap/line2d/LineTR/line_process.py +++ b/limap/line2d/LineTR/line_process.py @@ -1,7 +1,7 @@ +import math + import numpy as np -import cv2 import torch -import math def filter_by_length(lines, min_length, max_sublines): @@ -54,7 +54,7 @@ def point_on_line(line, dist_px): vec = ep - sp if vec[0] != 0: m = vec[1] / vec[0] - x = np.sqrt(dist_px ** 2 / (1 + m ** 2)) + x = np.sqrt(dist_px**2 / (1 + m**2)) y = m * x else: x = 0 @@ -94,10 +94,8 @@ def remove_borders( sp = np.floor(klines[:, 0]).astype(int) ep = np.floor(klines[:, 1]).astype(int) valid_mask_given = ( - ( - valid_mask_given[sp[:, 1], sp[:, 0]] - + valid_mask_given[ep[:, 1], ep[:, 0]] - ) + valid_mask_given[sp[:, 1], sp[:, 0]] + + valid_mask_given[ep[:, 1], ep[:, 0]] ).astype(bool) valid_mask = valid_mask & valid_mask_given @@ -275,7 +273,7 @@ def change_cv2_T_np(klines_cv): kline_ep = [sp_x, sp_y] # linelength = math.sqrt((kline_ep[0]-kline_sp[0])**2 +(kline_ep[1]-kline_sp[1])**2) - linelength = line.lineLength * (2 ** line.octave) + linelength = line.lineLength * (2**line.octave) klines_sp.append(kline_sp) klines_ep.append(kline_ep) diff --git a/limap/line2d/LineTR/line_transformer.py b/limap/line2d/LineTR/line_transformer.py index bb54125c..80bcbddf 100755 --- a/limap/line2d/LineTR/line_transformer.py +++ b/limap/line2d/LineTR/line_transformer.py @@ -1,10 +1,12 @@ from copy import deepcopy from pathlib import Path + import torch +from einops import repeat from torch import nn -from .line_attention import MultiHeadAttention, FeedForward + +from .line_attention import FeedForward, MultiHeadAttention from .line_process import * -from einops import rearrange, repeat def MLP(channels: list, do_bn=True): # channels [3, 32, 64, 128, 256, 256] @@ -186,7 +188,7 @@ def forward( def attention(query, key, value): dim = query.shape[1] scores = ( - torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5 + torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5 ) # [3, 64, 4, 512] -> [3, 4, 512, 512] prob = torch.nn.functional.softmax(scores, dim=-1) return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob @@ -207,12 +209,12 @@ def __init__(self, num_heads: int, d_model: int): def forward(self, query, key, value): batch_dim = query.size(0) - query, key, value = [ + query, key, value = ( l(x).view( batch_dim, self.dim, self.num_heads, -1 ) # [3, 64, 4, 512] for l, x in zip(self.proj, (query, key, value)) - ] + ) x, prob = attention(query, key, value) return ( self.merge( diff --git a/limap/line2d/LineTR/linetr_pipeline.py b/limap/line2d/LineTR/linetr_pipeline.py index 678ece39..f7e4ebde 100755 --- a/limap/line2d/LineTR/linetr_pipeline.py +++ b/limap/line2d/LineTR/linetr_pipeline.py @@ -1,10 +1,8 @@ import numpy as np import torch +from dsfm.trainlib.models.superpoint import SuperPoint from pytlsd import lsd -from dsfm.trainlib.models.superpoint import SuperPoint -from .line_transformer import LineTransformer, get_dist_matrix -from .nn_matcher import nn_matcher, nn_matcher_distmat from .. import BaseModel from ..utils.gt_line_matches import ( UNMATCHED_FEATURE, @@ -15,6 +13,8 @@ gt_matches_from_homography, gt_matches_from_pose_depth, ) +from .line_transformer import LineTransformer, get_dist_matrix +from .nn_matcher import nn_matcher, nn_matcher_distmat class LineTrPipeline(BaseModel): @@ -168,7 +168,7 @@ def process_siamese(data, i): pred["keypoints0"], pred["keypoints1"], **data, - pos_th=self.conf.ground_truth.th_positive + pos_th=self.conf.ground_truth.th_positive, ) pred["gt_assignment"] = assignment pred["gt_matches0"], pred["gt_matches1"] = m0, m1 @@ -188,7 +188,7 @@ def process_siamese(data, i): pred["lines0"].reshape(b_size, -1, 2), pred["lines1"].reshape(b_size, -1, 2), **data, - pos_th=self.conf.ground_truth.th_positive + pos_th=self.conf.ground_truth.th_positive, ) pred["samples_gt_assignment"] = samples_assignment pred["samples_gt_matches0"] = samples_m0 @@ -219,7 +219,7 @@ def process_siamese(data, i): pred["keypoints1"], **data, pos_th=self.conf.ground_truth.th_positive, - neg_th=self.conf.ground_truth.th_negative + neg_th=self.conf.ground_truth.th_negative, ) pred["gt_assignment"] = assignment pred["gt_matches0"], pred["gt_matches1"] = m0, m1 @@ -244,10 +244,8 @@ def process_siamese(data, i): pred["lines1"].reshape(b_size, -1, 2), **data, pos_th=self.conf.ground_truth.th_positive, - neg_th=self.conf.ground_truth.th_negative - )[ - :3 - ] + neg_th=self.conf.ground_truth.th_negative, + )[:3] pred["samples_gt_assignment"] = samples_assignment pred["samples_gt_matches0"] = samples_m0 pred["samples_gt_matches1"] = samples_m1 @@ -308,13 +306,13 @@ def process_siamese(data, i): assert match_mat.shape[0] == 1 bool_match_mat = match_mat[0] > 0 pred["line_matches0"] = np.argmax(bool_match_mat, axis=1) - pred["line_matches0"][ - ~np.any(bool_match_mat, axis=1) - ] = UNMATCHED_FEATURE + pred["line_matches0"][~np.any(bool_match_mat, axis=1)] = ( + UNMATCHED_FEATURE + ) pred["line_matches1"] = np.argmax(bool_match_mat, axis=0) - pred["line_matches1"][ - ~np.any(bool_match_mat, axis=0) - ] = UNMATCHED_FEATURE + pred["line_matches1"][~np.any(bool_match_mat, axis=0)] = ( + UNMATCHED_FEATURE + ) pred["line_matches0"] = torch.from_numpy(pred["line_matches0"])[None] pred["line_matches1"] = torch.from_numpy(pred["line_matches1"])[None] lmatch_scores = torch.from_numpy( diff --git a/limap/line2d/LineTR/matcher.py b/limap/line2d/LineTR/matcher.py index 69511ec8..016d0b98 100644 --- a/limap/line2d/LineTR/matcher.py +++ b/limap/line2d/LineTR/matcher.py @@ -1,10 +1,8 @@ -import os import numpy as np -import torch from ..base_matcher import BaseMatcher, BaseMatcherOptions -from .line_transformer import LineTransformer from .line_process import get_dist_matrix +from .line_transformer import LineTransformer from .nn_matcher import nn_matcher_distmat diff --git a/limap/line2d/SOLD2/experiment.py b/limap/line2d/SOLD2/experiment.py index 7d6dc93d..ccc75f82 100644 --- a/limap/line2d/SOLD2/experiment.py +++ b/limap/line2d/SOLD2/experiment.py @@ -2,11 +2,10 @@ Main file to launch training and testing experiments. """ -import yaml import os -import argparse -import numpy as np + import torch +import yaml # Pytorch configurations torch.cuda.empty_cache() @@ -20,7 +19,7 @@ def load_config(config_path): raise ValueError("[Error] The provided config path is not valid.") # Load the configuration - with open(config_path, "r") as f: + with open(config_path) as f: config = yaml.safe_load(f) return config @@ -33,18 +32,18 @@ def update_config(path, model_cfg=None, dataset_cfg=None): dataset_cfg = {} if dataset_cfg is None else dataset_cfg # Load saved configs - with open(os.path.join(path, "model_cfg.yaml"), "r") as f: + with open(os.path.join(path, "model_cfg.yaml")) as f: model_cfg_saved = yaml.safe_load(f) model_cfg.update(model_cfg_saved) - with open(os.path.join(path, "dataset_cfg.yaml"), "r") as f: + with open(os.path.join(path, "dataset_cfg.yaml")) as f: dataset_cfg_saved = yaml.safe_load(f) dataset_cfg.update(dataset_cfg_saved) # Update the saved yaml file - if not model_cfg == model_cfg_saved: + if model_cfg != model_cfg_saved: with open(os.path.join(path, "model_cfg.yaml"), "w") as f: yaml.dump(model_cfg, f) - if not dataset_cfg == dataset_cfg_saved: + if dataset_cfg != dataset_cfg_saved: with open(os.path.join(path, "dataset_cfg.yaml"), "w") as f: yaml.dump(dataset_cfg, f) diff --git a/limap/line2d/SOLD2/misc/geometry_utils.py b/limap/line2d/SOLD2/misc/geometry_utils.py index 0e541837..d09605a5 100644 --- a/limap/line2d/SOLD2/misc/geometry_utils.py +++ b/limap/line2d/SOLD2/misc/geometry_utils.py @@ -1,7 +1,6 @@ import numpy as np import torch - ### Point-related utils diff --git a/limap/line2d/SOLD2/misc/train_utils.py b/limap/line2d/SOLD2/misc/train_utils.py index 62eb9767..6129759b 100644 --- a/limap/line2d/SOLD2/misc/train_utils.py +++ b/limap/line2d/SOLD2/misc/train_utils.py @@ -3,6 +3,7 @@ """ import os + import numpy as np import torch diff --git a/limap/line2d/SOLD2/misc/visualize_util.py b/limap/line2d/SOLD2/misc/visualize_util.py index 3be10f97..7a863186 100644 --- a/limap/line2d/SOLD2/misc/visualize_util.py +++ b/limap/line2d/SOLD2/misc/visualize_util.py @@ -1,10 +1,11 @@ -""" Organize some frequently used visualization functions. """ +"""Organize some frequently used visualization functions.""" + +import copy import cv2 -import numpy as np import matplotlib import matplotlib.pyplot as plt -import copy +import numpy as np import seaborn as sns diff --git a/limap/line2d/SOLD2/model/line_detection.py b/limap/line2d/SOLD2/model/line_detection.py index 90456c19..1c30796f 100644 --- a/limap/line2d/SOLD2/model/line_detection.py +++ b/limap/line2d/SOLD2/model/line_detection.py @@ -3,11 +3,12 @@ """ import math + import numpy as np import torch -class LineSegmentDetectionModule(object): +class LineSegmentDetectionModule: """Module extracting line segments from junctions and line heatmaps.""" def __init__( @@ -178,9 +179,7 @@ def detect(self, junctions, heatmap, device=torch.device("cpu")): dim=-1, ) ) - normalized_seg_length = segments_length / ( - ((H ** 2) + (W ** 2)) ** 0.5 - ) + normalized_seg_length = segments_length / (((H**2) + (W**2)) ** 0.5) # Perform local max search num_cand = cand_h.shape[0] @@ -189,7 +188,7 @@ def detect(self, junctions, heatmap, device=torch.device("cpu")): num_iter = math.ceil(num_cand / group_size) sampled_feat_lst = [] for iter_idx in range(num_iter): - if not iter_idx == num_iter - 1: + if iter_idx != num_iter - 1: cand_h_ = cand_h[ iter_idx * group_size : (iter_idx + 1) * group_size, :, @@ -552,7 +551,7 @@ def detect_local_max( """Detection by local maximum search.""" # Compute the distance threshold dist_thresh = ( - 0.5 * (2 ** 0.5) + self.lambda_radius * normalized_seg_length + 0.5 * (2**0.5) + self.lambda_radius * normalized_seg_length ) # Make it N x 64 dist_thresh = torch.repeat_interleave( diff --git a/limap/line2d/SOLD2/model/line_detector.py b/limap/line2d/SOLD2/model/line_detector.py index e3dfa092..e7cabd6e 100644 --- a/limap/line2d/SOLD2/model/line_detector.py +++ b/limap/line2d/SOLD2/model/line_detector.py @@ -3,14 +3,15 @@ """ import time + import numpy as np import torch from torch.nn.functional import softmax -from .model_util import get_model -from .loss import get_loss_and_weights -from .line_detection import LineSegmentDetectionModule from ..train import convert_junc_predictions +from .line_detection import LineSegmentDetectionModule +from .loss import get_loss_and_weights +from .model_util import get_model def line_map_to_segments(junctions, line_map): @@ -41,7 +42,7 @@ def line_map_to_segments(junctions, line_map): return output_segments -class LineDetector(object): +class LineDetector: def __init__( self, model_cfg, diff --git a/limap/line2d/SOLD2/model/line_matcher.py b/limap/line2d/SOLD2/model/line_matcher.py index 3c45b936..50073cac 100644 --- a/limap/line2d/SOLD2/model/line_matcher.py +++ b/limap/line2d/SOLD2/model/line_matcher.py @@ -3,22 +3,23 @@ """ import time + import cv2 import numpy as np import torch import torch.nn.functional as F from torch.nn.functional import softmax -from .model_util import get_model -from .loss import get_loss_and_weights -from .metrics import super_nms -from .line_detection import LineSegmentDetectionModule -from .line_matching import WunschLineMatcher from ..train import convert_junc_predictions +from .line_detection import LineSegmentDetectionModule from .line_detector import line_map_to_segments +from .line_matching import WunschLineMatcher +from .loss import get_loss_and_weights +from .metrics import super_nms +from .model_util import get_model -class LineMatcher(object): +class LineMatcher: """Full line matcher including line detection and matching with the Needleman-Wunsch algorithm.""" diff --git a/limap/line2d/SOLD2/model/line_matching.py b/limap/line2d/SOLD2/model/line_matching.py index 3ce46b3a..5f64e3d7 100644 --- a/limap/line2d/SOLD2/model/line_matching.py +++ b/limap/line2d/SOLD2/model/line_matching.py @@ -3,13 +3,13 @@ """ import numpy as np -import cv2 import torch import torch.nn.functional as F + from ..misc.geometry_utils import keypoints_to_grid -class WunschLineMatcher(object): +class WunschLineMatcher: """Class matching two sets of line segments with the Needleman-Wunsch algorithm.""" diff --git a/limap/line2d/SOLD2/model/loss.py b/limap/line2d/SOLD2/model/loss.py index 4c5e7525..9bac8fa0 100644 --- a/limap/line2d/SOLD2/model/loss.py +++ b/limap/line2d/SOLD2/model/loss.py @@ -6,10 +6,11 @@ import torch import torch.nn as nn import torch.nn.functional as F + from ..misc.geometry_utils import ( - keypoints_to_grid, - get_dist_mask, get_common_line_mask, + get_dist_mask, + keypoints_to_grid, ) @@ -17,7 +18,7 @@ def get_loss_and_weights(model_cfg, device=torch.device("cuda")): """Get loss functions and either static or dynamic weighting.""" # Get the global weighting policy w_policy = model_cfg.get("weighting_policy", "static") - if not w_policy in ["static", "dynamic"]: + if w_policy not in ["static", "dynamic"]: raise ValueError("[Error] Not supported weighting policy.") loss_func = {} @@ -154,7 +155,7 @@ def space_to_depth(input_tensor, grid_size): # (N, bs, bs, C, H//bs, W//bs) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, C*bs^2, H//bs, W//bs) - x = x.view(N, C * (grid_size ** 2), H // grid_size, W // grid_size) + x = x.view(N, C * (grid_size**2), H // grid_size, W // grid_size) return x diff --git a/limap/line2d/SOLD2/model/metrics.py b/limap/line2d/SOLD2/model/metrics.py index 49d9f915..aaa3c9f8 100644 --- a/limap/line2d/SOLD2/model/metrics.py +++ b/limap/line2d/SOLD2/model/metrics.py @@ -2,15 +2,14 @@ This file implements the evaluation metrics. """ +import numpy as np import torch import torch.nn.functional as F -import numpy as np -from torchvision.ops.boxes import batched_nms from ..misc.geometry_utils import keypoints_to_grid -class Metrics(object): +class Metrics: """Metric evaluation calculator.""" def __init__( @@ -128,22 +127,22 @@ def _check_metrics(self): """Check if all input metrics are valid.""" flag = True for metric in self.junc_metric_lst: - if not metric in self.supported_junc_metrics: + if metric not in self.supported_junc_metrics: flag = False break for metric in self.heatmap_metric_lst: - if not metric in self.supported_heatmap_metrics: + if metric not in self.supported_heatmap_metrics: flag = False break for metric in self.desc_metric_lst: - if not metric in self.supported_desc_metrics: + if metric not in self.supported_desc_metrics: flag = False break return flag -class AverageMeter(object): +class AverageMeter: def __init__( self, junc_metric_lst=None, @@ -255,7 +254,7 @@ def average(self): results = {} for met in self.metric_results.keys(): # Skip pr curve metrics - if not met in self.supported_pr_metrics: + if met not in self.supported_pr_metrics: results[met] = self.metric_results[met] / self.count # Only update precision and recall in pr metrics else: @@ -283,22 +282,22 @@ def _check_metrics(self): """Check if all input metrics are valid.""" flag = True for metric in self.junc_metric_lst: - if not metric in self.supported_junc_metrics: + if metric not in self.supported_junc_metrics: flag = False break for metric in self.heatmap_metric_lst: - if not metric in self.supported_heatmap_metrics: + if metric not in self.supported_heatmap_metrics: flag = False break for metric in self.desc_metric_lst: - if not metric in self.supported_desc_metrics: + if metric not in self.supported_desc_metrics: flag = False break return flag -class junction_precision(object): +class junction_precision: """Junction precision.""" def __init__(self, detection_thresh): @@ -321,7 +320,7 @@ def __call__(self, junc_pred, junc_gt, valid_mask): return float(precision) -class junction_recall(object): +class junction_recall: """Junction recall.""" def __init__(self, detection_thresh): @@ -342,7 +341,7 @@ def __call__(self, junc_pred, junc_gt, valid_mask): return float(recall) -class junction_pr(object): +class junction_pr: """Junction precision-recall info.""" def __init__(self, num_threshold=50): @@ -402,7 +401,7 @@ def __call__(self, junc_pred_raw, junc_gt, valid_mask): } -class heatmap_precision(object): +class heatmap_precision: """Heatmap precision.""" def __init__(self, prob_thresh): @@ -424,7 +423,7 @@ def __call__(self, heatmap_pred, heatmap_gt, valid_mask): return precision -class heatmap_recall(object): +class heatmap_recall: """Heatmap recall.""" def __init__(self, prob_thresh): @@ -446,7 +445,7 @@ def __call__(self, heatmap_pred, heatmap_gt, valid_mask): return recall -class matching_score(object): +class matching_score: """Descriptors matching score.""" def __init__(self, grid_size): @@ -576,7 +575,7 @@ def nms_fast(in_corners, H, W, dist_thresh): return np.zeros((3, 0)).astype(int), np.zeros(0).astype(int) if rcorners.shape[1] == 1: out = np.vstack((rcorners, in_corners[2])).reshape(3, 1) - return out, np.zeros((1)).astype(int) + return out, np.zeros(1).astype(int) # Initialize the grid. for i, rc in enumerate(rcorners.T): grid[rcorners[1, i], rcorners[0, i]] = 1 diff --git a/limap/line2d/SOLD2/model/model_util.py b/limap/line2d/SOLD2/model/model_util.py index dd3c0083..f7404678 100644 --- a/limap/line2d/SOLD2/model/model_util.py +++ b/limap/line2d/SOLD2/model/model_util.py @@ -1,11 +1,10 @@ -import torch import torch.nn as nn import torch.nn.init as init from .nets.backbone import HourglassBackbone, SuperpointBackbone -from .nets.junction_decoder import SuperpointDecoder -from .nets.heatmap_decoder import PixelShuffleDecoder from .nets.descriptor_decoder import SuperpointDescriptor +from .nets.heatmap_decoder import PixelShuffleDecoder +from .nets.junction_decoder import SuperpointDecoder def get_model(model_cfg=None, loss_weights=None, mode="train", printing=False): @@ -18,7 +17,7 @@ def get_model(model_cfg=None, loss_weights=None, mode="train", printing=False): if printing: print("\n\n\t--------Initializing model----------") supported_arch = ["simple"] - if not model_cfg["model_architecture"] in supported_arch: + if model_cfg["model_architecture"] not in supported_arch: raise ValueError( "[Error] The model architecture is not in supported arch!" ) @@ -108,7 +107,7 @@ def forward(self, input_images): def get_backbone(self): """Retrieve the backbone encoder network.""" - if not self.cfg["backbone"] in self.supported_backbone: + if self.cfg["backbone"] not in self.supported_backbone: raise ValueError("[Error] The backbone selection is not supported.") # lcnn backbone (stacked hourglass) @@ -129,7 +128,7 @@ def get_backbone(self): def get_junction_decoder(self): """Get the junction decoder.""" - if not self.cfg["junction_decoder"] in self.supported_junction_decoder: + if self.cfg["junction_decoder"] not in self.supported_junction_decoder: raise ValueError( "[Error] The junction decoder selection is not supported." ) @@ -146,7 +145,7 @@ def get_junction_decoder(self): def get_heatmap_decoder(self): """Get the heatmap decoder.""" - if not self.cfg["heatmap_decoder"] in self.supported_heatmap_decoder: + if self.cfg["heatmap_decoder"] not in self.supported_heatmap_decoder: raise ValueError( "[Error] The heatmap decoder selection is not supported." ) @@ -181,8 +180,7 @@ def get_heatmap_decoder(self): def get_descriptor_decoder(self): """Get the descriptor decoder.""" if ( - not self.cfg["descriptor_decoder"] - in self.supported_descriptor_decoder + self.cfg["descriptor_decoder"] not in self.supported_descriptor_decoder ): raise ValueError( "[Error] The descriptor decoder selection is not supported." diff --git a/limap/line2d/SOLD2/sold2.py b/limap/line2d/SOLD2/sold2.py index 2f5a83df..0d565f95 100644 --- a/limap/line2d/SOLD2/sold2.py +++ b/limap/line2d/SOLD2/sold2.py @@ -1,9 +1,11 @@ -import os import copy +import os + import limap.util.io as limapio -from .sold2_wrapper import SOLD2LineDetector + from ..base_detector import BaseDetector, BaseDetectorOptions from ..base_matcher import BaseMatcher, BaseMatcherOptions +from .sold2_wrapper import SOLD2LineDetector class SOLD2Detector(BaseDetector): @@ -15,7 +17,7 @@ def get_module_name(self): return "sold2" def get_descinfo_fname(self, descinfo_folder, img_id): - fname = os.path.join(descinfo_folder, "descinfo_{0}.npy".format(img_id)) + fname = os.path.join(descinfo_folder, f"descinfo_{img_id}.npy") return fname def save_descinfo(self, descinfo_folder, img_id, descinfo): @@ -59,7 +61,7 @@ def sample_descinfo_by_indexes(self, descinfo, indexes): return descinfo def get_heatmap_fname(self, folder, img_id): - return os.path.join(folder, "heatmap_{0}.npy".format(img_id)) + return os.path.join(folder, f"heatmap_{img_id}.npy") def extract_heatmap(self, camview): img = camview.read_image(set_gray=self.set_gray) diff --git a/limap/line2d/SOLD2/sold2_wrapper.py b/limap/line2d/SOLD2/sold2_wrapper.py index 16db8ef5..a46d38d2 100644 --- a/limap/line2d/SOLD2/sold2_wrapper.py +++ b/limap/line2d/SOLD2/sold2_wrapper.py @@ -1,10 +1,10 @@ -import os, sys -import numpy as np -from torch.nn.functional import softmax +import os +import subprocess + import cv2 +import numpy as np import torch from skimage.draw import line -import subprocess from .experiment import load_config from .model.line_matcher import LineMatcher diff --git a/limap/line2d/SOLD2/train.py b/limap/line2d/SOLD2/train.py index 206a700b..39ebb7b5 100644 --- a/limap/line2d/SOLD2/train.py +++ b/limap/line2d/SOLD2/train.py @@ -2,28 +2,23 @@ This file implements the training process and all the summaries """ -import os -import numpy as np import cv2 +import numpy as np import torch -from torch.nn.functional import pixel_shuffle, softmax -from torch.utils.data import DataLoader import torch.utils.data.dataloader as torch_loader - -# from tensorboardX import SummaryWriter - -# from dataset.dataset_util import get_dataset -# from model.model_util import get_model -# from model.loss import TotalLoss, get_loss_and_weights -from .model.metrics import AverageMeter, Metrics, super_nms +from torch.nn.functional import pixel_shuffle, softmax # from model.lr_scheduler import get_lr_scheduler from .misc.train_utils import ( convert_image, - get_latest_checkpoint, - remove_old_checkpoints, ) +# from tensorboardX import SummaryWriter +# from dataset.dataset_util import get_dataset +# from model.model_util import get_model +# from model.loss import TotalLoss, get_loss_and_weights +from .model.metrics import AverageMeter, super_nms + def customized_collate_fn(batch): """Customized collate_fn.""" @@ -56,7 +51,7 @@ def restore_weights(model, state_dict, strict=True): # Load mismatched keys manually model_dict = model.state_dict() for idx, key in enumerate(missing_keys): - dict_keys = [_ for _ in unexpected_keys if not "tracked" in _] + dict_keys = [_ for _ in unexpected_keys if "tracked" not in _] model_dict[key] = state_dict[dict_keys[idx]] model.load_state_dict(model_dict) @@ -373,7 +368,7 @@ def train_single_epoch( results = metric_func.metric_results average = average_meter.average() # Get gpu memory usage in GB - gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3) + gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3) if compute_descriptors: print( "Epoch [%d / %d] Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), descriptor_loss=%.4f (%.4f), gpu_mem=%.4fGB" @@ -734,7 +729,7 @@ def record_train_summaries(writer, global_step, scalars, images): # GPU memory part # Get gpu memory usage in GB - gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3) + gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3) writer.add_scalar("GPU/GPU_memory_usage", gpu_mem_usage, global_step) # Loss part diff --git a/limap/line2d/TP_LSD/tp_lsd.py b/limap/line2d/TP_LSD/tp_lsd.py index 69a64a4d..19a6b52f 100644 --- a/limap/line2d/TP_LSD/tp_lsd.py +++ b/limap/line2d/TP_LSD/tp_lsd.py @@ -1,12 +1,13 @@ import os -from ..base_detector import BaseDetector, BaseDetectorOptions import cv2 import numpy as np import torch +from tp_lsd.modeling.TP_Net import Res320 from tp_lsd.utils.reconstruct import TPS_line from tp_lsd.utils.utils import load_model -from tp_lsd.modeling.TP_Net import Res320 + +from ..base_detector import BaseDetector, BaseDetectorOptions class TPLSDDetector(BaseDetector): diff --git a/limap/line2d/__init__.py b/limap/line2d/__init__.py index bf9326b3..7c29d288 100644 --- a/limap/line2d/__init__.py +++ b/limap/line2d/__init__.py @@ -1,6 +1,6 @@ import os -from .register_detector import get_detector, get_extractor -from .register_matcher import get_matcher # line utilization functions from .line_utils import * +from .register_detector import get_detector, get_extractor +from .register_matcher import get_matcher diff --git a/limap/line2d/base_detector.py b/limap/line2d/base_detector.py index 197322f4..b2a2dedc 100644 --- a/limap/line2d/base_detector.py +++ b/limap/line2d/base_detector.py @@ -1,14 +1,13 @@ import os +from typing import NamedTuple + +import cv2 import numpy as np from tqdm import tqdm -import cv2 import limap.util.io as limapio import limap.visualize as limapvis -import collections -from typing import NamedTuple - class BaseDetectorOptions(NamedTuple): """ @@ -182,7 +181,7 @@ def visualize_segs(self, output_folder, imagecols, first_k=10): img = imagecols.read_image(img_id) segs = limapio.read_txt_segments(seg_folder, img_id) img = limapvis.draw_segments(img, segs, (0, 255, 0)) - fname = os.path.join(vis_folder, "img_{0}_det.png".format(img_id)) + fname = os.path.join(vis_folder, f"img_{img_id}_det.png") cv2.imwrite(fname, img) def detect_all_images(self, output_folder, imagecols, skip_exists=False): @@ -219,7 +218,7 @@ def detect_all_images(self, output_folder, imagecols, skip_exists=False): img = imagecols.read_image(img_id) img = limapvis.draw_segments(img, segs, (0, 255, 0)) fname = os.path.join( - vis_folder, "img_{0}_det.png".format(img_id) + vis_folder, f"img_{img_id}_det.png" ) cv2.imwrite(fname, img) all_2d_segs = limapio.read_all_segments_from_folder(seg_folder) @@ -308,7 +307,7 @@ def detect_and_extract_all_images( img = imagecols.read_image(img_id) img = limapvis.draw_segments(img, segs, (0, 255, 0)) fname = os.path.join( - vis_folder, "img_{0}_det.png".format(img_id) + vis_folder, f"img_{img_id}_det.png" ) cv2.imwrite(fname, img) all_2d_segs = limapio.read_all_segments_from_folder(seg_folder) diff --git a/limap/line2d/base_matcher.py b/limap/line2d/base_matcher.py index 2077ce1e..0306ba7d 100644 --- a/limap/line2d/base_matcher.py +++ b/limap/line2d/base_matcher.py @@ -1,11 +1,10 @@ import os -import numpy as np -from tqdm import tqdm +from typing import NamedTuple + import joblib -import limap.util.io as limapio +from tqdm import tqdm -import collections -from typing import NamedTuple +import limap.util.io as limapio class BaseMatcherOptions(NamedTuple): @@ -60,9 +59,7 @@ def get_matches_folder(self, output_folder): """ return os.path.join( output_folder, - "{0}_n{1}_top{2}".format( - self.get_module_name(), self.n_neighbors, self.topk - ), + f"{self.get_module_name()}_n{self.n_neighbors}_top{self.topk}", ) def read_descinfo(self, descinfo_folder, idx): @@ -76,7 +73,7 @@ def get_match_filename(self, matches_folder, idx): matches_folder (str): The output matching folder idx (int): image id """ - fname = os.path.join(matches_folder, "matches_{0}.npy".format(idx)) + fname = os.path.join(matches_folder, f"matches_{idx}.npy") return fname def save_match(self, matches_folder, idx, matches): diff --git a/limap/line2d/endpoints/extractor.py b/limap/line2d/endpoints/extractor.py index c3995bec..a39f390b 100644 --- a/limap/line2d/endpoints/extractor.py +++ b/limap/line2d/endpoints/extractor.py @@ -1,8 +1,11 @@ import os + import numpy as np import torch + import limap.util.io as limapio from limap.point2d.superpoint.superpoint import SuperPoint + from ..base_detector import BaseDetector, BaseDetectorOptions @@ -18,7 +21,7 @@ def get_module_name(self): return "superpoint_endpoints" def get_descinfo_fname(self, descinfo_folder, img_id): - fname = os.path.join(descinfo_folder, "descinfo_{0}.npz".format(img_id)) + fname = os.path.join(descinfo_folder, f"descinfo_{img_id}.npz") return fname def save_descinfo(self, descinfo_folder, img_id, descinfo): diff --git a/limap/line2d/endpoints/matcher.py b/limap/line2d/endpoints/matcher.py index 1472e1b5..6fb3b903 100644 --- a/limap/line2d/endpoints/matcher.py +++ b/limap/line2d/endpoints/matcher.py @@ -1,9 +1,8 @@ -import os import numpy as np import torch -import limap.util.io as limapio from limap.point2d.superglue.superglue import SuperGlue + from ..base_matcher import BaseMatcher, BaseMatcherOptions diff --git a/limap/line2d/line_utils/merge_lines.py b/limap/line2d/line_utils/merge_lines.py index d3cc93ed..2184ec78 100644 --- a/limap/line2d/line_utils/merge_lines.py +++ b/limap/line2d/line_utils/merge_lines.py @@ -104,8 +104,8 @@ def merge_line_cluster(lines): if b == 0: u = np.array([1, 0]) if a >= c else np.array([0, 1]) else: - m = (c - a + np.sqrt((a - c) ** 2 + 4 * b ** 2)) / (2 * b) - u = np.array([1, m]) / np.sqrt(1 + m ** 2) + m = (c - a + np.sqrt((a - c) ** 2 + 4 * b**2)) / (2 * b) + u = np.array([1, m]) / np.sqrt(1 + m**2) # Get the center of gravity of all endpoints cross = np.mean(points, axis=0) diff --git a/limap/merging/__init__.py b/limap/merging/__init__.py index 0eca21c1..4f2c352e 100644 --- a/limap/merging/__init__.py +++ b/limap/merging/__init__.py @@ -1,2 +1,3 @@ from _limap._merging import * + from .merging import * diff --git a/limap/merging/merging.py b/limap/merging/merging.py index 951af68c..cc627feb 100644 --- a/limap/merging/merging.py +++ b/limap/merging/merging.py @@ -1,6 +1,5 @@ -from _limap import _merging as _mrg from _limap import _base -import numpy as np +from _limap import _merging as _mrg def merging(linker, all_2d_segs, imagecols, seg3d_list, neighbors, var2d=5.0): @@ -36,9 +35,7 @@ def remerge(linker3d, linetracks, num_outliers=2): break num_tracks = num_tracks_new print( - "[LOG] tracks after iterative remerging: {0} / {1}".format( - len(new_linetracks), len(linetracks) - ) + f"[LOG] tracks after iterative remerging: {len(new_linetracks)} / {len(linetracks)}" ) return new_linetracks diff --git a/limap/optimize/__init__.py b/limap/optimize/__init__.py index 00c520cd..6e0be369 100644 --- a/limap/optimize/__init__.py +++ b/limap/optimize/__init__.py @@ -1,5 +1,6 @@ from _limap._optimize import * -from .line_refinement import * -from .hybrid_bundle_adjustment import * + from .global_pl_association import * +from .hybrid_bundle_adjustment import * from .line_localization import * +from .line_refinement import * diff --git a/limap/optimize/extract_heatmaps_sold2.py b/limap/optimize/extract_heatmaps_sold2.py index ffed16de..db77714e 100644 --- a/limap/optimize/extract_heatmaps_sold2.py +++ b/limap/optimize/extract_heatmaps_sold2.py @@ -1,8 +1,6 @@ -import os -import numpy as np -import limap.line2d import limap.base as _base +import limap.line2d import limap.util.io as limapio diff --git a/limap/optimize/extract_track_patches_s2dnet.py b/limap/optimize/extract_track_patches_s2dnet.py index 05ccf433..0888a410 100644 --- a/limap/optimize/extract_track_patches_s2dnet.py +++ b/limap/optimize/extract_track_patches_s2dnet.py @@ -1,10 +1,9 @@ import os +import time + import numpy as np from tqdm import tqdm -import time -import limap.line2d -import limap.base as _base import limap.features as _features import limap.util.io as limapio @@ -46,7 +45,7 @@ def extract_track_patches_s2dnet( line2d_range = extractor.GetLine2DRange(track, img_id, camview) line2d_collections[img_id].append([line2d_range, track_id]) limapio.check_makedirs( - os.path.join(output_dir, "track{0}".format(track_id)) + os.path.join(output_dir, f"track{track_id}") ) # extract line patches for each image @@ -64,8 +63,8 @@ def extract_track_patches_s2dnet( for patch, track_id in zip(patches, track_ids): fname = os.path.join( output_dir, - "track{0}".format(track_id), - "track{0}_img{1}.npy".format(track_id, img_id), + f"track{track_id}", + f"track{track_id}_img{img_id}.npy", ) if skip_exists and os.path.exists(fname): continue diff --git a/limap/optimize/functions.py b/limap/optimize/functions.py index e3502d6d..0f81e643 100644 --- a/limap/optimize/functions.py +++ b/limap/optimize/functions.py @@ -1,6 +1,8 @@ import os + import numpy as np from tqdm import tqdm + import limap.visualize as limapvis @@ -12,8 +14,8 @@ def visualize_heatmap_intersections( ht_intersections, max_image_dim=None, ): - import matplotlib.colors as colors import matplotlib.cm as cmx + import matplotlib.colors as colors cNorm = colors.Normalize(vmin=0, vmax=1) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap="viridis") @@ -31,7 +33,7 @@ def visualize_heatmap_intersections( imname, max_image_dim=max_image_dim, set_gray=False ) img = limapvis.draw_points(img, intersections, (255, 0, 0), 2) - fname_out = prefix + "_img{0}.png".format(img_id) + fname_out = prefix + f"_img{img_id}.png" cv2.imwrite(fname_out, img) # visualize heatmap @@ -41,7 +43,7 @@ def visualize_heatmap_intersections( heatmap_img = limapvis.draw_points( heatmap_img, intersections, (255, 0, 0), 2 ) - fname_out_heatmap = prefix + "_heatmap{0}.png".format(img_id) + fname_out_heatmap = prefix + f"_heatmap{img_id}.png" cv2.imwrite(fname_out_heatmap, heatmap_img) @@ -69,7 +71,7 @@ def visualize_fconsis_intersections( img = limapvis.crop_to_patch(img, point, patch_size=100) imgs.append(img) bigimg = limapvis.make_bigimage(imgs, pad=20) - fname_out = prefix + "_sample{0}.png".format(sample_id) + fname_out = prefix + f"_sample{sample_id}.png" cv2.imwrite(fname_out, bigimg) diff --git a/limap/optimize/global_pl_association/solve.py b/limap/optimize/global_pl_association/solve.py index 288dde43..f3c3c736 100644 --- a/limap/optimize/global_pl_association/solve.py +++ b/limap/optimize/global_pl_association/solve.py @@ -1,4 +1,4 @@ -from _limap import _base, _ceresbase, _optimize +from _limap import _optimize def solve_global_pl_association(cfg, imagecols, bpt3d, all_bpt2ds): diff --git a/limap/optimize/hybrid_bundle_adjustment/solve.py b/limap/optimize/hybrid_bundle_adjustment/solve.py index 708a1ba6..c6e072e2 100644 --- a/limap/optimize/hybrid_bundle_adjustment/solve.py +++ b/limap/optimize/hybrid_bundle_adjustment/solve.py @@ -1,5 +1,4 @@ -from _limap import _base, _ceresbase, _optimize -import numpy as np +from _limap import _ceresbase, _optimize def _init_bundle_adjustment_engine(cfg, imagecols, max_num_iterations=100): diff --git a/limap/optimize/line_localization/__init__.py b/limap/optimize/line_localization/__init__.py index dcbba1ae..049b1180 100644 --- a/limap/optimize/line_localization/__init__.py +++ b/limap/optimize/line_localization/__init__.py @@ -1,2 +1,2 @@ -from .solve import * from .functions import * +from .solve import * diff --git a/limap/optimize/line_localization/functions.py b/limap/optimize/line_localization/functions.py index 1eb48f77..93379c78 100644 --- a/limap/optimize/line_localization/functions.py +++ b/limap/optimize/line_localization/functions.py @@ -113,9 +113,10 @@ def reprojection_filter_matches_2to3( best_id = None for id in track_ids: l3d = linetracks[id].line - l2d_start, l2d_end = ref_camview.projection( - l3d.start - ), ref_camview.projection(l3d.end) + l2d_start, l2d_end = ( + ref_camview.projection(l3d.start), + ref_camview.projection(l3d.end), + ) l2d = _base.Line2d(l2d_start, l2d_end) dist = dist_func(ref_line, l2d) diff --git a/limap/optimize/line_localization/solve.py b/limap/optimize/line_localization/solve.py index dcf37335..9622d79d 100644 --- a/limap/optimize/line_localization/solve.py +++ b/limap/optimize/line_localization/solve.py @@ -1,8 +1,7 @@ -from heapq import merge -from _limap import _base, _ceresbase, _optimize -import numpy as np from collections import defaultdict +from _limap import _ceresbase, _optimize + def get_lineloc_cost_func(func_name): if func_name in [ diff --git a/limap/optimize/line_refinement/__init__.py b/limap/optimize/line_refinement/__init__.py index b3f95f38..73d6816d 100644 --- a/limap/optimize/line_refinement/__init__.py +++ b/limap/optimize/line_refinement/__init__.py @@ -1,2 +1,2 @@ -from .solve import * from .line_refinement import line_refinement +from .solve import * diff --git a/limap/optimize/line_refinement/line_refinement.py b/limap/optimize/line_refinement/line_refinement.py index b3a7cb2b..f9af0221 100644 --- a/limap/optimize/line_refinement/line_refinement.py +++ b/limap/optimize/line_refinement/line_refinement.py @@ -1,10 +1,11 @@ import os + import numpy as np from tqdm import tqdm import limap.base as _base -import limap.features as _features import limap.evaluation as _eval +import limap.features as _features import limap.optimize as _optim import limap.util.io as limapio import limap.visualize as limapvis @@ -56,15 +57,15 @@ def line_refinement( p_vpresults.append(vpresults[img_id]) if cfg["use_heatmap"]: heatmap = limapio.read_npy( - os.path.join(heatmap_dir, "heatmap_{0}.npy".format(img_id)) + os.path.join(heatmap_dir, f"heatmap_{img_id}.npy") ) p_heatmaps.append(heatmap) if cfg["use_feature"]: if patch_dir is not None: fname = os.path.join( patch_dir, - "track{0}".format(track_id), - "track{0}_img{1}.npy".format(track_id, img_id), + f"track{track_id}", + f"track{track_id}_img{img_id}.npy", ) patch = _features.load_patch(fname, dtype=cfg["dtype"]) p_patches.append(patch) @@ -104,14 +105,10 @@ def line_refinement( newratio = evaluator.ComputeInlierRatio(newtrack.line, 0.001) if newdist > dist and newratio < ratio: print( - "[DEBUG] t_id = {0}, original: dist = {1:.4f}, ratio = {2:.4f}".format( - t_id, dist * 1000, ratio - ) + f"[DEBUG] t_id = {t_id}, original: dist = {dist * 1000:.4f}, ratio = {ratio:.4f}" ) print( - "[DEBUG] t_id = {0}, optimized: dist = {1:.4f}, ratio = {2:.4f}".format( - t_id, newdist * 1000, newratio - ) + f"[DEBUG] t_id = {t_id}, optimized: dist = {newdist * 1000:.4f}, ratio = {newratio:.4f}" ) # output @@ -133,7 +130,7 @@ def report_track(track_id): tracks[track_id], max_image_dim=-1, cameras=cameras, - prefix="track.{0}".format(track_id), + prefix=f"track.{track_id}", ) def report_newtrack(track_id): @@ -142,7 +139,7 @@ def report_newtrack(track_id): opttracks[track_id], max_image_dim=-1, cameras=cameras, - prefix="newtrack.{0}".format(track_id), + prefix=f"newtrack.{track_id}", ) import pdb diff --git a/limap/optimize/line_refinement/solve.py b/limap/optimize/line_refinement/solve.py index e9fd8560..799bf7c4 100644 --- a/limap/optimize/line_refinement/solve.py +++ b/limap/optimize/line_refinement/solve.py @@ -1,5 +1,4 @@ -from _limap import _base, _ceresbase, _optimize -import numpy as np +from _limap import _ceresbase, _optimize def solve_line_refinement( @@ -27,7 +26,7 @@ def solve_line_refinement( channels = p_features[0].shape[2] else: channels = 128 - rf_engine_name = "RefinementEngine_f{0}_c{1}".format(dtype[-2:], channels) + rf_engine_name = f"RefinementEngine_f{dtype[-2:]}_c{channels}" # print("Refinement type: ", rf_engine_name) rf_engine = getattr(_optimize, rf_engine_name)(rf_config) diff --git a/limap/point2d/__init__.py b/limap/point2d/__init__.py index 5bfb5745..576c86e3 100644 --- a/limap/point2d/__init__.py +++ b/limap/point2d/__init__.py @@ -1,2 +1,2 @@ -from .superpoint import * from .superglue import * +from .superpoint import * diff --git a/limap/point2d/superglue/superglue.py b/limap/point2d/superglue/superglue.py index 27f33a8d..8610d1d8 100644 --- a/limap/point2d/superglue/superglue.py +++ b/limap/point2d/superglue/superglue.py @@ -91,7 +91,7 @@ def attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: dim = query.shape[1] - scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5 + scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5 prob = torch.nn.functional.softmax(scores, dim=-1) return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob @@ -111,10 +111,10 @@ def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> torch.Tensor: batch_dim = query.size(0) - query, key, value = [ + query, key, value = ( l(x).view(batch_dim, self.dim, self.num_heads, -1) for l, x in zip(self.proj, (query, key, value)) - ] + ) x, _ = attention(query, key, value) return self.merge( x.contiguous().view(batch_dim, self.dim * self.num_heads, -1) @@ -236,10 +236,8 @@ def download_model(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) model_name = os.path.basename(path) - print("Downloading SuperGlue model {0}...".format(model_name)) - link = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/models/weights/{0}?raw=true".format( - model_name - ) + print(f"Downloading SuperGlue model {model_name}...") + link = f"https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/models/weights/{model_name}?raw=true" cmd = ["wget", link, "-O", path] subprocess.run(cmd, check=True) @@ -336,9 +334,10 @@ def log_optimal_transport( return Z def _get_matches(self, scores_mat): - max0, max1 = scores_mat[:, :-1, :-1].max(2), scores_mat[ - :, :-1, :-1 - ].max(1) + max0, max1 = ( + scores_mat[:, :-1, :-1].max(2), + scores_mat[:, :-1, :-1].max(1), + ) m0, m1 = max0.indices, max1.indices mutual0 = arange_like(m0, 1)[None] == m1.gather(1, m0) mutual1 = arange_like(m1, 1)[None] == m0.gather(1, m1) diff --git a/limap/point2d/superpoint/main.py b/limap/point2d/superpoint/main.py index f02c42e3..206203cc 100644 --- a/limap/point2d/superpoint/main.py +++ b/limap/point2d/superpoint/main.py @@ -1,16 +1,18 @@ -import torch -import numpy as np +import pprint +from pathlib import Path +from typing import Dict, List, Optional, Union + import h5py +import numpy as np +import torch from tqdm import tqdm -from pathlib import Path -from typing import Dict, List, Union, Optional -import pprint string_classes = str import collections.abc as collections from hloc import extract_features from hloc.utils.io import list_h5_names + from .superpoint import SuperPoint diff --git a/limap/point2d/superpoint/superpoint.py b/limap/point2d/superpoint/superpoint.py index 4ec97477..ef2a153b 100644 --- a/limap/point2d/superpoint/superpoint.py +++ b/limap/point2d/superpoint/superpoint.py @@ -42,6 +42,7 @@ import os from pathlib import Path + import torch from torch import nn diff --git a/limap/pointsfm/__init__.py b/limap/pointsfm/__init__.py index 22cfbe33..adc54efc 100644 --- a/limap/pointsfm/__init__.py +++ b/limap/pointsfm/__init__.py @@ -1,9 +1,10 @@ from _limap._pointsfm import * -from .colmap_sfm import * -from .functions import * + from .colmap_reader import ( - check_exists_colmap_model, PyReadCOLMAP, ReadPointTracks, + check_exists_colmap_model, ) +from .colmap_sfm import * +from .functions import * from .model_converter import * diff --git a/limap/pointsfm/bundler_reader.py b/limap/pointsfm/bundler_reader.py index 3533f752..99d1f55c 100644 --- a/limap/pointsfm/bundler_reader.py +++ b/limap/pointsfm/bundler_reader.py @@ -1,8 +1,8 @@ -from _limap import _base, _pointsfm - import os -import numpy as np + import imagesize +import numpy as np +from _limap import _base, _pointsfm from tqdm import tqdm @@ -11,8 +11,8 @@ def ReadModelBundler(bundler_path, list_path, model_path): # read imname_list ################################ list_path = os.path.join(bundler_path, list_path) - print("Loading bundler list file {0}...".format(list_path)) - with open(list_path, "r") as f: + print(f"Loading bundler list file {list_path}...") + with open(list_path) as f: lines = f.readlines() image_names = [line.strip("\n").split(" ")[0] for line in lines] imname_list = [ @@ -23,8 +23,8 @@ def ReadModelBundler(bundler_path, list_path, model_path): # read sfm model ################################ model_path = os.path.join(bundler_path, model_path) - print("Loading bundler model file {0}...".format(model_path)) - with open(model_path, "r") as f: + print(f"Loading bundler model file {model_path}...") + with open(model_path) as f: lines = f.readlines() counter = 1 # start from the second line line = lines[counter].strip("\n").split(" ") @@ -42,7 +42,7 @@ def ReadModelBundler(bundler_path, list_path, model_path): counter += 1 imname = imname_list[img_id] if not os.path.exists(imname): - raise ValueError("Error! Image not found: {0}".format(imname)) + raise ValueError(f"Error! Image not found: {imname}") width, height = imagesize.get(imname) img_hw = [height, width] K = np.zeros((3, 3)) @@ -64,7 +64,7 @@ def ReadModelBundler(bundler_path, list_path, model_path): counter += 1 R[1, :] = -R[1, :] # for bundler format R[2, :] = -R[2, :] # for bundler format - T = np.zeros((3)) + T = np.zeros(3) line = lines[counter].strip("\n").split(" ") T[0], T[1], T[2] = float(line[0]), float(line[1]), float(line[2]) T[1:] = -T[1:] # for bundler format diff --git a/limap/pointsfm/colmap_reader.py b/limap/pointsfm/colmap_reader.py index 1158f3f2..1f8ad727 100644 --- a/limap/pointsfm/colmap_reader.py +++ b/limap/pointsfm/colmap_reader.py @@ -1,4 +1,6 @@ -import os, sys +import os +import sys + from _limap import _base sys.path.append(os.path.dirname(os.path.abspath(__file__))) @@ -37,9 +39,9 @@ def ReadInfos(colmap_path, model_path="sparse", image_path="images"): colmap_images = read_images_text(fname_images) else: raise ValueError( - "Error! The model file does not exist at {0}".format(model_path) + f"Error! The model file does not exist at {model_path}" ) - print("Reconstruction loaded. (n_images = {0})".format(len(colmap_images))) + print(f"Reconstruction loaded. (n_images = {len(colmap_images)})") # read cameras cameras = {} @@ -89,7 +91,7 @@ def PyReadCOLMAP(colmap_path, model_path=None): colmap_points = read_points3D_text(fname_points) else: raise ValueError( - "Error! The model file does not exist at {0}".format(model_path) + f"Error! The model file does not exist at {model_path}" ) reconstruction = {} reconstruction["cameras"] = colmap_cameras diff --git a/limap/pointsfm/colmap_sfm.py b/limap/pointsfm/colmap_sfm.py index 5c73ec37..2772b8c6 100644 --- a/limap/pointsfm/colmap_sfm.py +++ b/limap/pointsfm/colmap_sfm.py @@ -1,15 +1,15 @@ -import os, sys -import shutil -import numpy as np -import cv2 import copy +import os +import shutil import subprocess +import sys from pathlib import Path -from tqdm import tqdm + +import cv2 sys.path.append(os.path.dirname(os.path.abspath(__file__))) -import read_write_model as colmap_utils import database +import read_write_model as colmap_utils from model_converter import convert_imagecols_to_colmap @@ -60,7 +60,7 @@ def write_pairs_from_neighbors(output_path, image_path, neighbors, image_ids): if id2 in m_pairs[id1]: continue m_pairs[id1].append(id2) - f.write("{0} {1}\n".format(name1, name2)) + f.write(f"{name1} {name2}\n") def run_hloc_matches( @@ -175,7 +175,7 @@ def run_colmap_sfm( for idx, img_id in enumerate(imagecols.get_img_ids()): img = imagecols.read_image(img_id) fname_to_save = os.path.join( - image_path, "image{0:08d}.png".format(img_id) + image_path, f"image{img_id:08d}.png" ) cv2.imwrite(fname_to_save, img) if keypoints is not None: @@ -252,13 +252,13 @@ def run_colmap_sfm_with_known_poses( for idx, img_id in enumerate(imagecols.get_img_ids()): img = imagecols.read_image(img_id) fname_to_save = os.path.join( - image_path, "image{0:08d}.png".format(img_id) + image_path, f"image{img_id:08d}.png" ) cv2.imwrite(fname_to_save, img) if keypoints is not None: keypoints_in_order.append(keypoints[img_id]) imagecols_tmp.change_image_name( - img_id, "image{0:08d}.png".format(img_id) + img_id, f"image{img_id:08d}.png" ) # feature extraction and matching diff --git a/limap/pointsfm/database.py b/limap/pointsfm/database.py index c7f49914..ec7743e4 100644 --- a/limap/pointsfm/database.py +++ b/limap/pointsfm/database.py @@ -31,14 +31,14 @@ # This script is based on an original implementation by True Price. -import sys import sqlite3 -import numpy as np +import sys +import numpy as np IS_PYTHON3 = sys.version_info[0] >= 3 -MAX_IMAGE_ID = 2 ** 31 - 1 +MAX_IMAGE_ID = 2**31 - 1 CREATE_CAMERAS_TABLE = """CREATE TABLE IF NOT EXISTS cameras ( camera_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, @@ -55,7 +55,7 @@ data BLOB, FOREIGN KEY(image_id) REFERENCES images(image_id) ON DELETE CASCADE)""" -CREATE_IMAGES_TABLE = """CREATE TABLE IF NOT EXISTS images ( +CREATE_IMAGES_TABLE = f"""CREATE TABLE IF NOT EXISTS images ( image_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, name TEXT NOT NULL UNIQUE, camera_id INTEGER NOT NULL, @@ -66,11 +66,9 @@ prior_tx REAL, prior_ty REAL, prior_tz REAL, - CONSTRAINT image_id_check CHECK(image_id >= 0 and image_id < {}), + CONSTRAINT image_id_check CHECK(image_id >= 0 and image_id < {MAX_IMAGE_ID}), FOREIGN KEY(camera_id) REFERENCES cameras(camera_id)) -""".format( - MAX_IMAGE_ID -) +""" CREATE_TWO_VIEW_GEOMETRIES_TABLE = """ CREATE TABLE IF NOT EXISTS two_view_geometries ( @@ -293,8 +291,8 @@ def add_two_view_geometry( def example_usage(): - import os import argparse + import os parser = argparse.ArgumentParser() parser.add_argument("--database_path", default="database.db") diff --git a/limap/pointsfm/functions.py b/limap/pointsfm/functions.py index 255546d1..0fcbcd79 100644 --- a/limap/pointsfm/functions.py +++ b/limap/pointsfm/functions.py @@ -1,4 +1,4 @@ -from _limap import _base, _pointsfm +from _limap import _pointsfm def filter_by_cam_id(cam_id, prev_imagecols, prev_neighbors): @@ -40,7 +40,7 @@ def ComputeNeighbors( def compute_metainfos(cfg, model, n_neighbors=20): # get neighbors print( - "Computing visual neighbors... (n_neighbors = {0})".format(n_neighbors) + f"Computing visual neighbors... (n_neighbors = {n_neighbors})" ) neighbors = ComputeNeighbors( model, diff --git a/limap/pointsfm/model_converter.py b/limap/pointsfm/model_converter.py index 7f5f4499..def9f23a 100644 --- a/limap/pointsfm/model_converter.py +++ b/limap/pointsfm/model_converter.py @@ -1,9 +1,11 @@ -import os, sys +import os +import sys + from limap.util.geometry import rotation_from_quaternion sys.path.append(os.path.dirname(os.path.abspath(__file__))) -from colmap_reader import PyReadCOLMAP import read_write_model as colmap_utils +from colmap_reader import PyReadCOLMAP def convert_colmap_to_visualsfm(colmap_model_path, output_nvm_file): @@ -17,7 +19,7 @@ def convert_colmap_to_visualsfm(colmap_model_path, output_nvm_file): f.write("NVM_V3\n\n") # write images - f.write("{0}\n".format(len(colmap_images))) + f.write(f"{len(colmap_images)}\n") map_image_id = dict() counter = 0 for img_id, colmap_image in colmap_images.items(): @@ -44,33 +46,33 @@ def convert_colmap_to_visualsfm(colmap_model_path, output_nvm_file): k1 = cam.params[3] else: raise ValueError("Camera model not supported in VisualSfM.") - f.write("{0}\t".format(img_name)) - f.write(" {0}".format(focal)) + f.write(f"{img_name}\t") + f.write(f" {focal}") qvec, tvec = colmap_image.qvec, colmap_image.tvec R = rotation_from_quaternion(qvec) center = -R.transpose() @ tvec f.write( - " {0} {1} {2} {3}".format(qvec[0], qvec[1], qvec[2], qvec[3]) + f" {qvec[0]} {qvec[1]} {qvec[2]} {qvec[3]}" ) - f.write(" {0} {1} {2}".format(center[0], center[1], center[2])) - f.write(" {0} 0\n".format(k1)) + f.write(f" {center[0]} {center[1]} {center[2]}") + f.write(f" {k1} 0\n") f.write("\n") # write points - f.write("{0}\n".format(len(colmap_points))) + f.write(f"{len(colmap_points)}\n") for pid, point in colmap_points.items(): xyz = point.xyz - f.write("{0} {1} {2}".format(xyz[0], xyz[1], xyz[2])) + f.write(f"{xyz[0]} {xyz[1]} {xyz[2]}") f.write(" 128 128 128") # dummy color n_supports = len(point.image_ids) - f.write(" {0}".format(n_supports)) + f.write(f" {n_supports}") for idx in range(n_supports): img_id = point.image_ids[idx] xy_id = point.point2D_idxs[idx] img_index = map_image_id[img_id] - f.write(" {0} {1}".format(img_index, xy_id)) + f.write(f" {img_index} {xy_id}") xy = colmap_images[img_id].xys[xy_id] - f.write(" {0} {1}".format(xy[0], xy[1])) + f.write(f" {xy[0]} {xy[1]}") f.write("\n") diff --git a/limap/pointsfm/read_write_model.py b/limap/pointsfm/read_write_model.py index 1c42bb6b..11c754dd 100755 --- a/limap/pointsfm/read_write_model.py +++ b/limap/pointsfm/read_write_model.py @@ -29,12 +29,12 @@ # # Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de) -import os +import argparse import collections -import numpy as np +import os import struct -import argparse +import numpy as np CameraModel = collections.namedtuple( "CameraModel", ["model_id", "model_name", "num_params"] @@ -111,7 +111,7 @@ def read_cameras_text(path): void Reconstruction::ReadCamerasText(const std::string& path) """ cameras = {} - with open(path, "r") as fid: + with open(path) as fid: while True: line = fid.readline() if not line: @@ -178,7 +178,7 @@ def write_cameras_text(cameras, path): HEADER = ( "# Camera list with one line of data per camera:\n" + "# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n" - + "# Number of cameras: {}\n".format(len(cameras)) + + f"# Number of cameras: {len(cameras)}\n" ) with open(path, "w") as fid: fid.write(HEADER) @@ -212,7 +212,7 @@ def read_images_text(path): void Reconstruction::WriteImagesText(const std::string& path) """ images = {} - with open(path, "r") as fid: + with open(path) as fid: while True: line = fid.readline() if not line: @@ -310,9 +310,7 @@ def write_images_text(images, path): "# Image list with two lines of data per image:\n" + "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" + "# POINTS2D[] as (X, Y, POINT3D_ID)\n" - + "# Number of images: {}, mean observations per image: {}\n".format( - len(images), mean_observations - ) + + f"# Number of images: {len(images)}, mean observations per image: {mean_observations}\n" ) with open(path, "w") as fid: @@ -362,7 +360,7 @@ def read_points3D_text(path): void Reconstruction::WritePoints3DText(const std::string& path) """ points3D = {} - with open(path, "r") as fid: + with open(path) as fid: while True: line = fid.readline() if not line: @@ -440,9 +438,7 @@ def write_points3D_text(points3D, path): HEADER = ( "# 3D point list with one line of data per point:\n" + "# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n" - + "# Number of points: {}, mean track length: {}\n".format( - len(points3D), mean_track_length - ) + + f"# Number of points: {len(points3D)}, mean track length: {mean_track_length}\n" ) with open(path, "w") as fid: diff --git a/limap/pointsfm/visualsfm_reader.py b/limap/pointsfm/visualsfm_reader.py index 8896752d..3e9ff22a 100644 --- a/limap/pointsfm/visualsfm_reader.py +++ b/limap/pointsfm/visualsfm_reader.py @@ -1,9 +1,10 @@ -from _limap import _base, _pointsfm - import os -import numpy as np + import imagesize +import numpy as np +from _limap import _base, _pointsfm from tqdm import tqdm + from limap.util.geometry import rotation_from_quaternion @@ -11,9 +12,9 @@ def ReadModelVisualSfM(vsfm_path, nvm_file="reconstruction.nvm"): input_file = os.path.join(vsfm_path, nvm_file) if not os.path.exists(input_file): raise ValueError( - "Error! Input file {0} does not exist.".format(input_file) + f"Error! Input file {input_file} does not exist." ) - with open(input_file, "r") as f: + with open(input_file) as f: txt_lines = f.readlines() # read camviews @@ -36,7 +37,7 @@ def ReadModelVisualSfM(vsfm_path, nvm_file="reconstruction.nvm"): # add camera if not os.path.exists(imname): - raise ValueError("Error! Image not found: {0}".format(imname)) + raise ValueError(f"Error! Image not found: {imname}") width, height = imagesize.get(imname) img_hw = [height, width] cx = img_hw[1] / 2.0 diff --git a/limap/runners/__init__.py b/limap/runners/__init__.py index f0aa0cce..c5af2529 100644 --- a/limap/runners/__init__.py +++ b/limap/runners/__init__.py @@ -1,5 +1,5 @@ from .functions import * from .functions_structures import * from .line_fitnmerge import * -from .line_triangulation import * from .line_localization import * +from .line_triangulation import * diff --git a/limap/runners/functions.py b/limap/runners/functions.py index da5ecf4c..c43a156b 100644 --- a/limap/runners/functions.py +++ b/limap/runners/functions.py @@ -1,7 +1,8 @@ import os -import numpy as np import warnings + from tqdm import tqdm + import limap.util.io as limapio @@ -49,9 +50,7 @@ def undistort_images( unload_ids = imagecols.get_img_ids() if skip_exists: print( - "[LOG] Loading undistorted images (n_images = {0})...".format( - imagecols.NumImages() - ) + f"[LOG] Loading undistorted images (n_images = {imagecols.NumImages()})..." ) fname_in = os.path.join(output_dir, fname) if os.path.isfile(fname_in): @@ -71,14 +70,14 @@ def undistort_images( if n_jobs == -1: n_jobs = os.cpu_count() print( - "[LOG] Start undistorting images (n_images = {0})...".format( - len(unload_ids) - ) + f"[LOG] Start undistorting images (n_images = {len(unload_ids)})..." ) - import limap.undistortion as _undist - import cv2, imagesize + import cv2 + import imagesize import joblib + import limap.undistortion as _undist + # limapio.delete_folder(output_dir) limapio.check_makedirs(output_dir) @@ -87,7 +86,7 @@ def process(imagecols, img_id): cam_id = imagecols.camimage(img_id).cam_id cam = imagecols.cam(cam_id) imname_in = imagecols.camimage(img_id).image_name() - imname_out = os.path.join(output_dir, "image{0:08d}.png".format(img_id)) + imname_out = os.path.join(output_dir, f"image{img_id:08d}.png") # save image if resizing is needed width, height = imagesize.get(imname_in) if height != cam.h() or width != cam.w(): @@ -109,7 +108,7 @@ def process(imagecols, img_id): imagecols_undistorted = _base.ImageCollection(imagecols) cam_dict = {} for idx, img_id in enumerate(unload_ids): - imname_out = os.path.join(output_dir, "image{0:08d}.png".format(img_id)) + imname_out = os.path.join(output_dir, f"image{img_id:08d}.png") cam_undistorted = outputs[idx] cam_id = cam_undistorted.cam_id() if cam_id not in cam_dict: @@ -117,7 +116,7 @@ def process(imagecols, img_id): imagecols_undistorted.change_camera(cam_id, cam_undistorted) imagecols_undistorted.change_image_name(img_id, imname_out) for idx, img_id in enumerate(loaded_ids): - imname_out = os.path.join(output_dir, "image{0:08d}.png".format(img_id)) + imname_out = os.path.join(output_dir, f"image{img_id:08d}.png") cam_id = loaded_imagecols.camimage(img_id).cam_id cam_undistorted = loaded_imagecols.cam(cam_id) if cam_id not in cam_dict: diff --git a/limap/runners/functions_structures.py b/limap/runners/functions_structures.py index 919a1efa..4b7f725d 100644 --- a/limap/runners/functions_structures.py +++ b/limap/runners/functions_structures.py @@ -1,14 +1,16 @@ import os + import numpy as np from tqdm import tqdm -import limap.structures as _structures + import limap.pointsfm as _psfm +import limap.structures as _structures def compute_2d_feature_points_sp(imagecols, output_path="tmp/featurepoints"): - import limap.pointsfm as _psfm - import cv2 from pathlib import Path + + import cv2 import h5py if not os.path.exists(output_path): @@ -21,7 +23,7 @@ def compute_2d_feature_points_sp(imagecols, output_path="tmp/featurepoints"): for img_id in imagecols.get_img_ids(): img = imagecols.read_image(img_id) fname_to_save = os.path.join( - image_path, "image{0:08d}.png".format(img_id) + image_path, f"image{img_id:08d}.png" ) cv2.imwrite(fname_to_save, img) @@ -39,7 +41,7 @@ def compute_2d_feature_points_sp(imagecols, output_path="tmp/featurepoints"): f = h5py.File(feature_path, "r") all_keypoints = {} for img_id in imagecols.get_img_ids(): - fname = "image{0:08d}.png".format(img_id) + fname = f"image{img_id:08d}.png" keypoints = np.array(f[fname]["keypoints"]) all_keypoints[img_id] = keypoints return all_keypoints diff --git a/limap/runners/line_fitnmerge.py b/limap/runners/line_fitnmerge.py index 6cb1b5a2..caf5ef95 100644 --- a/limap/runners/line_fitnmerge.py +++ b/limap/runners/line_fitnmerge.py @@ -1,7 +1,8 @@ import os + +import joblib import numpy as np from tqdm import tqdm -import joblib import limap.base as _base import limap.fitting as _fit @@ -128,7 +129,7 @@ def line_fitnmerge(cfg, imagecols, depths, neighbors=None, ranges=None): """ # assertion check assert imagecols.IsUndistorted() == True - print("[LOG] Number of images: {0}".format(imagecols.NumImages())) + print(f"[LOG] Number of images: {imagecols.NumImages()}") cfg = _runners.setup(cfg) detector_name = cfg["line2d"]["detector"]["method"] if cfg["fitting"]["var2d"] == -1: @@ -299,7 +300,7 @@ def line_fitting_with_3Dpoints( """ # assertion check assert imagecols.IsUndistorted() == True - print("[LOG] Number of images: {0}".format(imagecols.NumImages())) + print(f"[LOG] Number of images: {imagecols.NumImages()}") cfg = _runners.setup(cfg) detector_name = cfg["line2d"]["detector"]["method"] if cfg["fitting"]["var2d"] == -1: diff --git a/limap/runners/line_localization.py b/limap/runners/line_localization.py index 8f30e564..2f77f731 100644 --- a/limap/runners/line_localization.py +++ b/limap/runners/line_localization.py @@ -1,16 +1,15 @@ import os +from collections import defaultdict + import numpy as np -import pycolmap -import pickle +from hloc.utils.io import get_keypoints, get_matches +from tqdm import tqdm + +import limap.base as _base import limap.estimators as _estimators +import limap.line2d import limap.runners as _runners -import limap.base as _base import limap.util.io as limapio -import limap.line2d - -from tqdm import tqdm -from collections import defaultdict -from hloc.utils.io import get_keypoints, get_matches from limap.optimize.line_localization.functions import * @@ -199,7 +198,7 @@ def line_localization( basedir = os.path.join( "line_matchings", cfg["line2d"]["detector"]["method"], - "feats_{0}".format(matcher_name), + f"feats_{matcher_name}", ) matcher = limap.line2d.get_matcher( ma_cfg, @@ -230,7 +229,7 @@ def line_localization( if cfg["localization"]["skip_exists"]: limapio.check_makedirs(str(pose_dir)) if os.path.exists(os.path.join(pose_dir, f"{qid}.txt")): - with open(os.path.join(pose_dir, f"{qid}.txt"), "r") as f: + with open(os.path.join(pose_dir, f"{qid}.txt")) as f: data = f.read().rstrip().split("\n")[0].split() q, t = np.split(np.array(data[1:], float), [4]) final_poses[qid] = _base.CameraPose(q, t) @@ -248,7 +247,7 @@ def line_localization( if cfg["localization"]["2d_matcher"] != "epipolar": # Read from the pre-computed matches all_line_pairs_2to2 = limapio.read_npy( - os.path.join(se_matches_dir, "matches_{0}.npy".format(qid)) + os.path.join(se_matches_dir, f"matches_{qid}.npy") ).item() all_line_pairs_2to3 = defaultdict(list) diff --git a/limap/runners/line_triangulation.py b/limap/runners/line_triangulation.py index 6f3a95d2..4be4d386 100644 --- a/limap/runners/line_triangulation.py +++ b/limap/runners/line_triangulation.py @@ -1,16 +1,16 @@ import os -import numpy as np + from tqdm import tqdm import limap.base as _base import limap.merging as _mrg -import limap.triangulation as _tri -import limap.vplib as _vplib -import limap.pointsfm as _psfm import limap.optimize as _optim +import limap.pointsfm as _psfm import limap.runners as _runners +import limap.triangulation as _tri import limap.util.io as limapio import limap.visualize as limapvis +import limap.vplib as _vplib def line_triangulation(cfg, imagecols, neighbors=None, ranges=None): @@ -25,7 +25,7 @@ def line_triangulation(cfg, imagecols, neighbors=None, ranges=None): Returns: list[:class:`limap.base.LineTrack`]: list of output 3D line tracks """ - print("[LOG] Number of images: {0}".format(imagecols.NumImages())) + print(f"[LOG] Number of images: {imagecols.NumImages()}") cfg = _runners.setup(cfg) detector_name = cfg["line2d"]["detector"]["method"] if cfg["triangulation"]["var2d"] == -1: @@ -154,7 +154,7 @@ def line_triangulation(cfg, imagecols, neighbors=None, ranges=None): ) else: matches = limapio.read_npy( - os.path.join(matches_dir, "matches_{0}.npy".format(img_id)) + os.path.join(matches_dir, f"matches_{img_id}.npy") ).item() Triangulator.TriangulateImage(img_id, matches) linetracks = Triangulator.ComputeLineTracks() @@ -248,7 +248,7 @@ def report_track(track_id): limapvis.visualize_line_track( imagecols, validtracks[track_id], - prefix="track.{0}".format(track_id), + prefix=f"track.{track_id}", ) import pdb diff --git a/limap/triangulation/__init__.py b/limap/triangulation/__init__.py index efaa909d..f32c4e53 100644 --- a/limap/triangulation/__init__.py +++ b/limap/triangulation/__init__.py @@ -1,2 +1,3 @@ from _limap._triangulation import * + from .triangulation import * diff --git a/limap/triangulation/triangulation.py b/limap/triangulation/triangulation.py index 72ddaf4e..baa02264 100644 --- a/limap/triangulation/triangulation.py +++ b/limap/triangulation/triangulation.py @@ -1,6 +1,4 @@ from _limap import _triangulation as _tri -from _limap import _base -import numpy as np def get_normal_direction(l, view): diff --git a/limap/undistortion/__init__.py b/limap/undistortion/__init__.py index fac0ada9..fe97c3dc 100644 --- a/limap/undistortion/__init__.py +++ b/limap/undistortion/__init__.py @@ -1,2 +1,3 @@ from _limap._undistortion import * + from .undistort import * diff --git a/limap/undistortion/undistort.py b/limap/undistortion/undistort.py index 670f6eb6..34e3cdaf 100644 --- a/limap/undistortion/undistort.py +++ b/limap/undistortion/undistort.py @@ -1,7 +1,6 @@ -from _limap import _base, _undistortion + import cv2 -import numpy as np -import copy +from _limap import _base, _undistortion def UndistortImageCamera(camera, imname_in, imname_out): diff --git a/limap/util/__init__.py b/limap/util/__init__.py index 6a908d10..e81aab61 100644 --- a/limap/util/__init__.py +++ b/limap/util/__init__.py @@ -1,3 +1,3 @@ -from .io import * -from .geometry import * from .config import * +from .geometry import * +from .io import * diff --git a/limap/util/config.py b/limap/util/config.py index bfe31289..7b8da04c 100644 --- a/limap/util/config.py +++ b/limap/util/config.py @@ -1,7 +1,7 @@ -import yaml -import numpy as np import copy +import yaml + def update_recursive(dict1, dictinfo): for k, v in dictinfo.items(): @@ -20,7 +20,7 @@ def update_recursive_deepcopy(dict1, dictinfo): def load_config(config_file, default_path=None): - with open(config_file, "r") as f: + with open(config_file) as f: cfg_loaded = yaml.load(f, Loader=yaml.Loader) base_config_file = cfg_loaded.get("base_config_file") @@ -56,9 +56,7 @@ def get_val_from_keys(cfg, keys): argtype = type(val) if argtype == bool: # test if it is a store action - if idx == len(unknown) - 1: - v = True - elif unknown[idx + 1].startswith("--"): + if idx == len(unknown) - 1 or unknown[idx + 1].startswith("--"): v = True else: v = unknown[idx + 1].lower() == "true" diff --git a/limap/util/evaluation.py b/limap/util/evaluation.py index a8d32e52..b49c81d0 100644 --- a/limap/util/evaluation.py +++ b/limap/util/evaluation.py @@ -1,5 +1,6 @@ -import numpy as np import cv2 +import numpy as np + import limap.base as _base @@ -27,14 +28,10 @@ def eval_imagecols( ): if enable_logging: print( - "[LOG EVAL] imagecols.NumImages() = {0}".format( - imagecols.NumImages() - ) + f"[LOG EVAL] imagecols.NumImages() = {imagecols.NumImages()}" ) print( - "[LOG EVAL] imagecols_gt.NumImages() = {0}".format( - imagecols_gt.NumImages() - ) + f"[LOG EVAL] imagecols_gt.NumImages() = {imagecols_gt.NumImages()}" ) _, imagecols_aligned = _base.align_imagecols( imagecols, @@ -65,14 +62,10 @@ def eval_imagecols_relpose( assert len(shared_img_ids) == imagecols.NumImages() if enable_logging: print( - "[LOG EVAL] imagecols.NumImages() = {0}".format( - imagecols.NumImages() - ) + f"[LOG EVAL] imagecols.NumImages() = {imagecols.NumImages()}" ) print( - "[LOG EVAL] imagecols_gt.NumImages() = {0}".format( - imagecols_gt.NumImages() - ) + f"[LOG EVAL] imagecols_gt.NumImages() = {imagecols_gt.NumImages()}" ) if fill_uninitialized: img_ids = imagecols_gt.get_img_ids() diff --git a/limap/util/geometry.py b/limap/util/geometry.py index 2620050e..931870d8 100644 --- a/limap/util/geometry.py +++ b/limap/util/geometry.py @@ -41,20 +41,18 @@ def rotation_from_quaternion(quad): norm = np.linalg.norm(quad) if norm < 1e-10: raise ValueError( - "Error! the quaternion is not robust. quad.norm() = {0}".format( - norm - ) + f"Error! the quaternion is not robust. quad.norm() = {norm}" ) quad = quad / norm qr, qi, qj, qk = quad[0], quad[1], quad[2], quad[3] rot_mat = np.zeros((3, 3)) - rot_mat[0, 0] = 1 - 2 * (qj ** 2 + qk ** 2) + rot_mat[0, 0] = 1 - 2 * (qj**2 + qk**2) rot_mat[0, 1] = 2 * (qi * qj - qk * qr) rot_mat[0, 2] = 2 * (qi * qk + qj * qr) rot_mat[1, 0] = 2 * (qi * qj + qk * qr) - rot_mat[1, 1] = 1 - 2 * (qi ** 2 + qk ** 2) + rot_mat[1, 1] = 1 - 2 * (qi**2 + qk**2) rot_mat[1, 2] = 2 * (qj * qk - qi * qr) rot_mat[2, 0] = 2 * (qi * qk - qj * qr) rot_mat[2, 1] = 2 * (qj * qk + qi * qr) - rot_mat[2, 2] = 1 - 2 * (qi ** 2 + qj ** 2) + rot_mat[2, 2] = 1 - 2 * (qi**2 + qj**2) return rot_mat diff --git a/limap/util/io.py b/limap/util/io.py index 1e1c5c1e..04ae330c 100644 --- a/limap/util/io.py +++ b/limap/util/io.py @@ -1,7 +1,9 @@ import os -import numpy as np import shutil + +import numpy as np from tqdm import tqdm + import limap.base as _base @@ -12,9 +14,7 @@ def check_directory(fname): not os.path.exists(os.path.dirname(fname)) ): raise ValueError( - "Error! Base directory {0} does not exist!".format( - os.path.dirname(fname) - ) + f"Error! Base directory {os.path.dirname(fname)} does not exist!" ) @@ -22,7 +22,7 @@ def check_path(fname): if fname is None: raise ValueError("Error! Input filepath is None!") if not os.path.exists(fname): - raise ValueError("Error! File {0} does not exist!".format(fname)) + raise ValueError(f"Error! File {fname} does not exist!") def delete_folder(folder): @@ -72,14 +72,14 @@ def save_ply(fname, points): def read_ply(fname): - from plyfile import PlyData, PlyElement + from plyfile import PlyData plydata = PlyData.read(fname) x = np.asarray(plydata.elements[0].data["x"]) y = np.asarray(plydata.elements[0].data["y"]) z = np.asarray(plydata.elements[0].data["z"]) points = np.stack([x, y, z], axis=1) - print("number of points: {0}".format(points.shape[0])) + print(f"number of points: {points.shape[0]}") return points @@ -89,14 +89,14 @@ def save_txt_metainfos(fname, neighbors, ranges): """ check_directory(fname) with open(fname, "w") as f: - f.write("number of images, {0}\n".format(len(neighbors))) - f.write("x-range, {0}, {1}\n".format(ranges[0][0], ranges[1][0])) - f.write("y-range, {0}, {1}\n".format(ranges[0][1], ranges[1][1])) - f.write("z-range, {0}, {1}\n".format(ranges[0][2], ranges[1][2])) + f.write(f"number of images, {len(neighbors)}\n") + f.write(f"x-range, {ranges[0][0]}, {ranges[1][0]}\n") + f.write(f"y-range, {ranges[0][1]}, {ranges[1][1]}\n") + f.write(f"z-range, {ranges[0][2]}, {ranges[1][2]}\n") for img_id, neighbor in neighbors.items(): - str_ = "image {0}".format(img_id) + str_ = f"image {img_id}" for ng_idx in neighbor: - str_ += ", {0}".format(ng_idx) + str_ += f", {ng_idx}" f.write(str_ + "\n") @@ -105,7 +105,7 @@ def read_txt_metainfos(fname): Read in .txt for neighbors and ranges """ check_path(fname) - with open(fname, "r") as f: + with open(fname) as f: txt_lines = f.readlines() counter = 0 n_images = int(txt_lines[counter].strip().split(",")[1]) @@ -133,14 +133,14 @@ def read_txt_metainfos(fname): def save_txt_imname_list(fname, imname_list): check_directory(fname) with open(fname, "w") as f: - f.write("number of images, {0}\n".format(len(imname_list))) + f.write(f"number of images, {len(imname_list)}\n") for imname in imname_list: f.write(imname + "\n") def read_txt_imname_list(fname): check_path(fname) - with open(fname, "r") as f: + with open(fname) as f: txt_lines = f.readlines() counter = 0 n_images = int(txt_lines[counter].strip().split(",")[1]) @@ -156,14 +156,14 @@ def read_txt_imname_list(fname): def save_txt_imname_dict(fname, imname_dict): check_directory(fname) with open(fname, "w") as f: - f.write("number of images, {0}\n".format(len(imname_dict))) + f.write(f"number of images, {len(imname_dict)}\n") for img_id, imname in imname_dict.items(): - f.write("{0}, {1}\n".format(img_id, imname)) + f.write(f"{img_id}, {imname}\n") def read_txt_imname_dict(fname): check_path(fname) - with open(fname, "r") as f: + with open(fname) as f: txt_lines = f.readlines() counter = 0 n_images = int(txt_lines[counter].strip().split(",")[1]) @@ -193,13 +193,13 @@ def save_obj(fname, lines): n_lines = int(len(vertices) / 2) with open(fname, "w") as f: for v in vertices: - f.write("v {0} {1} {2}\n".format(v[0], v[1], v[2])) + f.write(f"v {v[0]} {v[1]} {v[2]}\n") for idx in range(n_lines): - f.write("l {0} {1}\n".format(2 * idx + 1, 2 * idx + 2)) + f.write(f"l {2 * idx + 1} {2 * idx + 2}\n") def load_obj(fname): - with open(fname, "r") as f: + with open(fname) as f: flines = f.readlines() counter = 0 vertices = [] @@ -243,22 +243,18 @@ def save_l3dpp(folder, imagecols, all_2d_segs): image_id = index_list.index(idx) else: raise NotImplementedError - fname = "segments_L3D++_{0}_{1}x{2}_3000.txt".format( - image_id, width, height - ) + fname = f"segments_L3D++_{image_id}_{width}x{height}_3000.txt" fname = os.path.join(folder, fname) segs = all_2d_segs[idx] n_segments = segs.shape[0] with open(fname, "w") as f: - f.write("{0}\n".format(n_segments)) + f.write(f"{n_segments}\n") for line_id in range(n_segments): line = segs[line_id] f.write( - "{0} {1} {2} {3}\n".format( - line[0], line[1], line[2], line[3] - ) + f"{line[0]} {line[1]} {line[2]} {line[3]}\n" ) - print("Writing for L3DPP: {0}".format(fname)) + print(f"Writing for L3DPP: {fname}") def save_txt_linetracks(fname, linetracks, n_visible_views=4): @@ -273,31 +269,23 @@ def save_txt_linetracks(fname, linetracks, n_visible_views=4): print("Writing all linetracks to a single file...") n_tracks = len(linetracks) with open(fname, "w") as f: - f.write("{0}\n".format(n_tracks)) + f.write(f"{n_tracks}\n") for track_id in tqdm(range(n_tracks)): track = linetracks[track_id] f.write( - "{0} {1} {2}\n".format( - track_id, track.count_lines(), track.count_images() - ) + f"{track_id} {track.count_lines()} {track.count_images()}\n" ) f.write( - "{0:.10f} {1:.10f} {2:.10f}\n".format( - track.line.start[0], - track.line.start[1], - track.line.start[2], - ) + f"{track.line.start[0]:.10f} {track.line.start[1]:.10f} {track.line.start[2]:.10f}\n" ) f.write( - "{0:.10f} {1:.10f} {2:.10f}\n".format( - track.line.end[0], track.line.end[1], track.line.end[2] - ) + f"{track.line.end[0]:.10f} {track.line.end[1]:.10f} {track.line.end[2]:.10f}\n" ) for idx in range(track.count_lines()): - f.write("{0} ".format(track.image_id_list[idx])) + f.write(f"{track.image_id_list[idx]} ") f.write("\n") for idx in range(track.count_lines()): - f.write("{0} ".format(track.line_id_list[idx])) + f.write(f"{track.line_id_list[idx]} ") f.write("\n") @@ -305,10 +293,10 @@ def save_folder_linetracks(folder, linetracks): if os.path.exists(folder): shutil.rmtree(folder) os.makedirs(folder) - print("Writing linetracks to {0}...".format(folder)) + print(f"Writing linetracks to {folder}...") n_tracks = len(linetracks) for track_id in tqdm(range(n_tracks)): - fname = os.path.join(folder, "track_{0}.txt".format(track_id)) + fname = os.path.join(folder, f"track_{track_id}.txt") linetracks[track_id].Write(fname) @@ -319,10 +307,10 @@ def read_folder_linetracks(folder): for fname in flist: if fname[-4:] == ".txt" and fname[:5] == "track": n_tracks += 1 - print("Read linetracks from {0}...".format(folder)) + print(f"Read linetracks from {folder}...") linetracks = [] for track_id in range(n_tracks): - fname = os.path.join(folder, "track_{0}.txt".format(track_id)) + fname = os.path.join(folder, f"track_{track_id}.txt") track = _base.LineTrack() track.Read(fname) linetracks.append(track) @@ -357,7 +345,7 @@ def read_folder_linetracks_with_info(folder): def read_txt_Line3Dpp(fname): linetracks = [] - with open(fname, "r") as f: + with open(fname) as f: txt_lines = f.readlines() line_counts = [] line_track_id_list = [] @@ -410,7 +398,7 @@ def read_lines_from_input(input_file): """ if not os.path.exists(input_file): raise ValueError( - "Error! Input file/directory {0} not found.".format(input_file) + f"Error! Input file/directory {input_file} not found." ) # linetracks folder @@ -439,33 +427,31 @@ def read_lines_from_input(input_file): # exception raise ValueError( - "Error! File {0} not supported. should be txt, obj, or folder to the linetracks.".format( - input_dir - ) + f"Error! File {input_dir} not supported. should be txt, obj, or folder to the linetracks." ) def exists_txt_segments(folder, img_id): - fname = os.path.join(folder, "segments_{0}.txt".format(img_id)) + fname = os.path.join(folder, f"segments_{img_id}.txt") return os.path.exists(fname) def save_txt_segments(folder, img_id, segs): - fname = os.path.join(folder, "segments_{0}.txt".format(img_id)) + fname = os.path.join(folder, f"segments_{img_id}.txt") n_segments = segs.shape[0] with open(fname, "w") as f: - f.write("{0}\n".format(n_segments)) + f.write(f"{n_segments}\n") for line_id in range(n_segments): line = segs[line_id] f.write( - "{0} {1} {2} {3}\n".format(line[0], line[1], line[2], line[3]) + f"{line[0]} {line[1]} {line[2]} {line[3]}\n" ) def read_txt_segments(folder, img_id): check_path(folder) - fname = os.path.join(folder, "segments_{0}.txt".format(img_id)) - with open(fname, "r") as f: + fname = os.path.join(folder, f"segments_{img_id}.txt") + with open(fname) as f: txt_lines = f.readlines() n_segments = int(txt_lines[0].strip()) assert n_segments + 1 == len(txt_lines) diff --git a/limap/visualize/__init__.py b/limap/visualize/__init__.py index e7b07c7e..19d7ddd3 100644 --- a/limap/visualize/__init__.py +++ b/limap/visualize/__init__.py @@ -1,5 +1,5 @@ -from .vis_utils import * +from .trackvis import * +from .vis_bipartite import * from .vis_lines import * from .vis_matches import * -from .vis_bipartite import * -from .trackvis import * +from .vis_utils import * diff --git a/limap/visualize/trackvis/__init__.py b/limap/visualize/trackvis/__init__.py index 9f670fa1..dbb611a2 100644 --- a/limap/visualize/trackvis/__init__.py +++ b/limap/visualize/trackvis/__init__.py @@ -1,2 +1,2 @@ -from .pyvista import PyVistaTrackVisualizer from .open3d import Open3DTrackVisualizer +from .pyvista import PyVistaTrackVisualizer diff --git a/limap/visualize/trackvis/base.py b/limap/visualize/trackvis/base.py index d4388527..e22a38df 100644 --- a/limap/visualize/trackvis/base.py +++ b/limap/visualize/trackvis/base.py @@ -1,7 +1,7 @@ from ..vis_utils import * -class BaseTrackVisualizer(object): +class BaseTrackVisualizer: def __init__(self, tracks): self.tracks = tracks self.counts = [track.count_images() for track in tracks] @@ -22,15 +22,7 @@ def report(self): def report_stats(self): counts = np.array(self.counts) print( - "[Track Report] (N2, N4, N6, N8, N10, N20, N50) = ({0}, {1}, {2}, {3}, {4}, {5}, {6})".format( - counts[counts >= 2].shape[0], - counts[counts >= 4].shape[0], - counts[counts >= 6].shape[0], - counts[counts >= 8].shape[0], - counts[counts >= 10].shape[0], - counts[counts >= 20].shape[0], - counts[counts >= 50].shape[0], - ) + f"[Track Report] (N2, N4, N6, N8, N10, N20, N50) = ({counts[counts >= 2].shape[0]}, {counts[counts >= 4].shape[0]}, {counts[counts >= 6].shape[0]}, {counts[counts >= 8].shape[0]}, {counts[counts >= 10].shape[0]}, {counts[counts >= 20].shape[0]}, {counts[counts >= 50].shape[0]})" ) def report_avg_supports(self, n_visible_views=4): @@ -39,17 +31,10 @@ def report_avg_supports(self, n_visible_views=4): arr = counts[counts >= n_visible_views] arr_lines = counts_lines[counts >= n_visible_views] print( - "average supporting images (>= {0}): {1} / {2} = {3:.2f}".format( - n_visible_views, arr.sum(), arr.shape[0], arr.mean() - ) + f"average supporting images (>= {n_visible_views}): {arr.sum()} / {arr.shape[0]} = {arr.mean():.2f}" ) print( - "average supporting lines (>= {0}): {1} / {2} = {3:.2f}".format( - n_visible_views, - arr_lines.sum(), - arr_lines.shape[0], - arr_lines.mean(), - ) + f"average supporting lines (>= {n_visible_views}): {arr_lines.sum()} / {arr_lines.shape[0]} = {arr_lines.mean():.2f}" ) def get_counts_np(self): diff --git a/limap/visualize/trackvis/open3d.py b/limap/visualize/trackvis/open3d.py index f6b414e7..a81372d3 100644 --- a/limap/visualize/trackvis/open3d.py +++ b/limap/visualize/trackvis/open3d.py @@ -1,7 +1,8 @@ import open3d as o3d -from .base import BaseTrackVisualizer + +from ..vis_lines import open3d_get_cameras, open3d_get_line_set from ..vis_utils import compute_robust_range_lines -from ..vis_lines import open3d_get_line_set, open3d_get_cameras +from .base import BaseTrackVisualizer class Open3DTrackVisualizer(BaseTrackVisualizer): diff --git a/limap/visualize/vis_bipartite.py b/limap/visualize/vis_bipartite.py index e0e3db59..d0246275 100644 --- a/limap/visualize/vis_bipartite.py +++ b/limap/visualize/vis_bipartite.py @@ -1,23 +1,19 @@ -import os import copy -import cv2 + +import _limap._base as _base import numpy as np -import matplotlib -import matplotlib.pyplot as plt +from .vis_lines import ( + open3d_add_cameras, + open3d_add_line_set, + open3d_add_points, +) from .vis_utils import ( - draw_segments, draw_points, - test_point_inside_ranges, + draw_segments, test_line_inside_ranges, + test_point_inside_ranges, ) -from .vis_lines import ( - open3d_add_points, - open3d_add_line_set, - open3d_add_cameras, -) - -import _limap._base as _base def draw_bipartite2d(image, bpt2d): @@ -152,7 +148,7 @@ def open3d_draw_bipartite3d_pointline( np_triangles = np.array([[0, 1, 2], [0, 2, 3]]).astype(np.int32) mesh.vertices = o3d.utility.Vector3dVector(np_vertices) mesh.triangles = o3d.utility.Vector3iVector(np_triangles) - w.add_geometry("plane_{0}".format(plane_id), mesh) + w.add_geometry(f"plane_{plane_id}", mesh) # optionally draw cameras if imagecols is not None: @@ -204,7 +200,7 @@ def open3d_draw_bipartite3d_vpline(bpt3d, ranges=None): vp_line_sets[vp_id], color=vp_id_to_color[vp_id], width=2, - name="lineset_vp_{0}".format(vp_id), + name=f"lineset_vp_{vp_id}", ) # w = open3d_add_line_set(w, nonvp_line_set, color=(0.0, 0.0, 0.0), width=2, name="lineset_nonvp") w.reset_camera_to_default() diff --git a/limap/visualize/vis_lines.py b/limap/visualize/vis_lines.py index bc450b84..bf93d9e8 100644 --- a/limap/visualize/vis_lines.py +++ b/limap/visualize/vis_lines.py @@ -1,5 +1,6 @@ import numpy as np -from .vis_utils import test_point_inside_ranges, test_line_inside_ranges + +from .vis_utils import test_line_inside_ranges, test_point_inside_ranges def pyvista_vis_3d_lines( @@ -116,9 +117,10 @@ def open3d_get_cameras( scale_cam_geometry=1.0, scale=1.0, ): - import open3d as o3d import copy + import open3d as o3d + cameras = o3d.geometry.LineSet() camera_lines = {} @@ -154,9 +156,10 @@ def open3d_add_cameras( scale_cam_geometry=1.0, scale=1.0, ): - import open3d as o3d import copy + import open3d as o3d + camera_lines = {} for cam_id in imagecols.get_cam_ids(): cam = imagecols.cam(cam_id) diff --git a/limap/visualize/vis_matches.py b/limap/visualize/vis_matches.py index e2a7f08e..357b2b99 100644 --- a/limap/visualize/vis_matches.py +++ b/limap/visualize/vis_matches.py @@ -1,8 +1,8 @@ # Author: RĆ©mi Pautrat -import numpy as np import matplotlib import matplotlib.pyplot as plt +import numpy as np import seaborn as sns diff --git a/limap/visualize/vis_utils.py b/limap/visualize/vis_utils.py index 955a596d..d2a790b8 100644 --- a/limap/visualize/vis_utils.py +++ b/limap/visualize/vis_utils.py @@ -1,10 +1,10 @@ +import copy import os + import cv2 -import numpy as np -import matplotlib import matplotlib.pyplot as plt +import numpy as np import seaborn as sns -import copy def random_color(): @@ -369,15 +369,11 @@ def report_dist_reprojection(line3d, line2d, camview, prefix=None): sensitivity = line3d.sensitivity(camview) if prefix is None: print( - "angle = {0:.4f}, perp = {1:.4f}, overlap = {2:.4f}, sensi = {3:.4f}".format( - angle, perp_dist, overlap, sensitivity - ) + f"angle = {angle:.4f}, perp = {perp_dist:.4f}, overlap = {overlap:.4f}, sensi = {sensitivity:.4f}" ) else: print( - "{4}: angle = {0:.4f}, perp = {1:.4f}, overlap = {2:.4f}, sensi = {3:.4f}".format( - angle, perp_dist, overlap, sensitivity, prefix - ) + f"{prefix}: angle = {angle:.4f}, perp = {perp_dist:.4f}, overlap = {overlap:.4f}, sensi = {sensitivity:.4f}" ) @@ -393,9 +389,7 @@ def visualize_line_track( imagecols, linetrack, prefix="linetrack", report=False ): print( - "[VISUALIZE]: line length: {0}, num_supporting_lines: {1}".format( - linetrack.line.length(), len(linetrack.image_id_list) - ) + f"[VISUALIZE]: line length: {linetrack.line.length()}, num_supporting_lines: {len(linetrack.image_id_list)}" ) for idx, (img_id, line2d) in enumerate( zip(linetrack.image_id_list, linetrack.line2d_list) @@ -407,9 +401,7 @@ def visualize_line_track( linetrack.line, line2d, imagecols.camview(img_id), - prefix="Reprojecting to line {0} (img {1}, line {2})".format( - idx, img_id, linetrack.line_id_list[idx] - ), + prefix=f"Reprojecting to line {idx} (img {img_id}, line {linetrack.line_id_list[idx]})", ) line2d_proj = linetrack.line.projection(imagecols.camview(img_id)) img = draw_segments( @@ -420,11 +412,7 @@ def visualize_line_track( ) fname = os.path.join( "tmp", - "{0}.{1}.{2}.png".format( - prefix, - idx, - os.path.basename(imagecols.camimage(img_id).image_name())[:-4], - ), + f"{prefix}.{idx}.{os.path.basename(imagecols.camimage(img_id).image_name())[:-4]}.png", ) cv2.imwrite(fname, img) @@ -432,7 +420,6 @@ def visualize_line_track( def vis_vpresult( img, lines, vpres, vp_id=-1, show_original=False, endpoints=False ): - import seaborn as sns import cv2 n_vps = vpres.count_vps() @@ -442,10 +429,7 @@ def vis_vpresult( colors = [[255, 0, 0]] for line_id, line in enumerate(lines): c = [255, 255, 255] # default color: white - if not vpres.HasVP(line_id): - if not show_original: - continue - elif vp_id >= 0 and vpres.labels[line_id] != vp_id: + if not vpres.HasVP(line_id) or vp_id >= 0 and vpres.labels[line_id] != vp_id: if not show_original: continue else: diff --git a/limap/vplib/JLinkage/JLinkage.py b/limap/vplib/JLinkage/JLinkage.py index f40808e7..d512d838 100644 --- a/limap/vplib/JLinkage/JLinkage.py +++ b/limap/vplib/JLinkage/JLinkage.py @@ -1,7 +1,7 @@ -from ..base_vp_detector import BaseVPDetector, BaseVPDetectorOptions - from _limap import _vplib +from ..base_vp_detector import BaseVPDetector, BaseVPDetectorOptions + class JLinkage(BaseVPDetector): def __init__(self, cfg_jlinkage, options=BaseVPDetectorOptions()): diff --git a/limap/vplib/__init__.py b/limap/vplib/__init__.py index f1ffc6af..314a8ea1 100644 --- a/limap/vplib/__init__.py +++ b/limap/vplib/__init__.py @@ -1,2 +1,3 @@ from _limap._vplib import * + from .register_vp_detector import get_vp_detector diff --git a/limap/vplib/base_vp_detector.py b/limap/vplib/base_vp_detector.py index 779a377b..7eb3e513 100644 --- a/limap/vplib/base_vp_detector.py +++ b/limap/vplib/base_vp_detector.py @@ -1,10 +1,8 @@ -import numpy as np +from typing import NamedTuple + import joblib from tqdm import tqdm -import collections -from typing import NamedTuple - class BaseVPDetectorOptions(NamedTuple): """ @@ -75,6 +73,7 @@ def visualize( self, fname, img, lines, vpresult, show_original=False, endpoints=False ): import cv2 + import limap.visualize as limapvis img = limapvis.vis_vpresult( diff --git a/limap/vplib/progressivex/progressivex.py b/limap/vplib/progressivex/progressivex.py index 4fa4f0b2..2573e09b 100644 --- a/limap/vplib/progressivex/progressivex.py +++ b/limap/vplib/progressivex/progressivex.py @@ -1,10 +1,10 @@ -from ..base_vp_detector import BaseVPDetector, BaseVPDetectorOptions +from collections import namedtuple -from _limap import _vplib -import pyprogressivex import numpy as np +import pyprogressivex +from _limap import _vplib -from collections import namedtuple +from ..base_vp_detector import BaseVPDetector, BaseVPDetectorOptions ProgressiveXOptions = namedtuple( "ProgressiveXOptions", diff --git a/runners/7scenes/localization.py b/runners/7scenes/localization.py index fa36fa9f..83122281 100644 --- a/runners/7scenes/localization.py +++ b/runners/7scenes/localization.py @@ -1,29 +1,30 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.base as _base -import limap.util.io as limapio -import limap.util.config as cfgutils -import limap.runners as _runners import argparse import logging import pickle -import pycolmap -import limap.pointsfm.read_write_model as colmap_utils from pathlib import Path +import pycolmap +from hloc.utils.parsers import parse_retrieval from utils import ( DepthReader, - read_scene_7scenes, + evaluate, get_result_filenames, + image_path_to_rendered_depth_path, + read_scene_7scenes, run_hloc_7scenes, ) -from utils import image_path_to_rendered_depth_path, evaluate -from hloc.utils.parsers import parse_retrieval + +import limap.pointsfm.read_write_model as colmap_utils +import limap.runners as _runners +import limap.util.config as cfgutils +import limap.util.io as limapio formatter = logging.Formatter( fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s", @@ -107,7 +108,7 @@ def parse_config(): # Output path for LIMAP results (tmp) if cfg["output_dir"] is None: - cfg["output_dir"] = "tmp/7scenes/{}".format(args.scene) + cfg["output_dir"] = f"tmp/7scenes/{args.scene}" # Output folder for LIMAP linetracks (in tmp) if cfg["output_folder"] is None: cfg["output_folder"] = "finaltracks" diff --git a/runners/7scenes/utils.py b/runners/7scenes/utils.py index 1da23c18..f048e1e4 100644 --- a/runners/7scenes/utils.py +++ b/runners/7scenes/utils.py @@ -1,16 +1,12 @@ -import numpy as np +import logging import os +from pathlib import Path + +import numpy as np import PIL +import PIL.Image import pycolmap import torch -from pathlib import Path -from tqdm import tqdm -import logging -import PIL.Image -import limap.base as _base -import limap.pointsfm as _psfm -import limap.util.io as limapio - from hloc import ( extract_features, localize_sfm, @@ -18,12 +14,17 @@ pairs_from_covisibility, triangulation, ) -from hloc.utils.read_write_model import read_model, write_model from hloc.pipelines.Cambridge.utils import ( create_query_list_with_intrinsics, evaluate, ) from hloc.utils.parsers import * +from hloc.utils.read_write_model import read_model, write_model +from tqdm import tqdm + +import limap.base as _base +import limap.pointsfm as _psfm +import limap.util.io as limapio ############################################################################### # The following utils functions are taken/modified from hloc.pipelines.7scenes @@ -39,7 +40,7 @@ def create_reference_sfm(full_model, ref_model, blacklist=None, ext=".bin"): cameras, images, points3D = read_model(full_model, ext) if blacklist is not None: - with open(blacklist, "r") as f: + with open(blacklist) as f: blacklist = f.read().rstrip().split("\n") train_ids = [] @@ -282,7 +283,7 @@ def get_train_test_ids_from_sfm(full_model, blacklist=None, ext=".bin"): cameras, images, points3D = read_model(full_model, ext) if blacklist is not None: - with open(blacklist, "r") as f: + with open(blacklist) as f: blacklist = f.read().rstrip().split("\n") train_ids, test_ids = [], [] @@ -390,11 +391,11 @@ def run_hloc_7scenes( evaluate(gt_dir, results_file, test_list) else: if logger: - logger.info(f"Point-only localization skipped.") + logger.info("Point-only localization skipped.") # Read coarse poses poses = {} - with open(results_file, "r") as f: + with open(results_file) as f: lines = [] for data in f.read().rstrip().split("\n"): data = data.split() diff --git a/runners/__init__.py b/runners/__init__.py index 31f88d48..333808e7 100644 --- a/runners/__init__.py +++ b/runners/__init__.py @@ -1,3 +1,3 @@ -from .colmap_triangulation import read_scene_colmap from .bundler_triangulation import read_scene_bundler +from .colmap_triangulation import read_scene_colmap from .visualsfm_triangulation import read_scene_visualsfm diff --git a/runners/bundler_triangulation.py b/runners/bundler_triangulation.py index 350a0533..b980d708 100644 --- a/runners/bundler_triangulation.py +++ b/runners/bundler_triangulation.py @@ -1,12 +1,14 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import limap.base as _base import limap.pointsfm as _psfm -import limap.util.io as limapio -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils +import limap.util.io as limapio def read_scene_bundler( diff --git a/runners/cambridge/localization.py b/runners/cambridge/localization.py index 8c594d76..1f521c1e 100644 --- a/runners/cambridge/localization.py +++ b/runners/cambridge/localization.py @@ -1,31 +1,29 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.base as _base -import limap.pointsfm as _psfm -import limap.util.io as limapio -import limap.util.config as cfgutils -import limap.runners as _runners import argparse import logging -import pycolmap import pickle from pathlib import Path from hloc.utils.parsers import parse_retrieval from utils import ( - read_scene_visualsfm, - get_scene_info, - get_result_filenames, eval, - undistort_and_resize, + get_result_filenames, + get_scene_info, + read_scene_visualsfm, run_hloc_cambridge, + undistort_and_resize, ) +import limap.runners as _runners +import limap.util.config as cfgutils +import limap.util.io as limapio + formatter = logging.Formatter( fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s", datefmt="%Y/%m/%d %H:%M:%S", @@ -109,7 +107,7 @@ def parse_config(): cfg["n_neighbors_loc"] = args.num_loc # Output path for LIMAP results (tmp) if cfg["output_dir"] is None: - cfg["output_dir"] = "tmp/cambridge/{}".format(scene_id) + cfg["output_dir"] = f"tmp/cambridge/{scene_id}" # Output folder for LIMAP linetracks (in tmp) if cfg["output_folder"] is None: cfg["output_folder"] = "finaltracks" @@ -158,7 +156,7 @@ def main(): ) img_name_to_id = { - "image{0:08d}.png".format(id): id for id in (train_ids + query_ids) + f"image{id:08d}.png": id for id in (train_ids + query_ids) } ########################################################## @@ -207,7 +205,7 @@ def main(): id_to_origin_name[img_name_to_id[n]] for n in _retrieval[name] ] hloc_name_dict = { - id: "image{0:08d}.png".format(id) for id in (train_ids + query_ids) + id: f"image{id:08d}.png" for id in (train_ids + query_ids) } # Update coarse poses for epipolar methods diff --git a/runners/cambridge/utils.py b/runners/cambridge/utils.py index f927a111..c5f01b16 100644 --- a/runners/cambridge/utils.py +++ b/runners/cambridge/utils.py @@ -1,25 +1,27 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import imagesize -import pycolmap -from tqdm import tqdm from pathlib import Path -import limap.base as _base -import limap.pointsfm as _psfm -import limap.util.io as limapio +import imagesize +import pycolmap from hloc import ( extract_features, localize_sfm, match_features, pairs_from_retrieval, ) -from hloc.utils.parsers import parse_retrieval +from tqdm import tqdm + +import limap.base as _base +import limap.pointsfm as _psfm +import limap.util.io as limapio def read_scene_visualsfm( @@ -82,9 +84,10 @@ def get_scene_info(vsfm_path, imagecols, query_images): def undistort_and_resize(cfg, imagecols, logger=None): - import limap.runners as _runners import cv2 + import limap.runners as _runners + # undistort images logger.info("Performing undistortion...") if not imagecols.IsUndistorted(): @@ -190,7 +193,7 @@ def eval(filename, poses_gt, query_ids, id_to_name, logger): errors_t = [] errors_R = [] pose_results = {} - with open(filename, "r") as f: + with open(filename) as f: for data in f.read().rstrip().split("\n"): data = data.split() name = data[0] @@ -259,10 +262,10 @@ def run_hloc_cambridge( query_list = results_dir / "query_list_with_intrinsics.txt" loc_pairs = results_dir / f"pairs-query-netvlad{num_loc}.txt" image_list = [ - "image{0:08d}.png".format(img_id) for img_id in (train_ids + query_ids) + f"image{img_id:08d}.png" for img_id in (train_ids + query_ids) ] img_name_to_id = { - "image{0:08d}.png".format(id): id for id in (train_ids + query_ids) + f"image{id:08d}.png": id for id in (train_ids + query_ids) } imagecols_train = imagecols.subset_by_image_ids(train_ids) @@ -286,8 +289,8 @@ def run_hloc_cambridge( global_descriptors, loc_pairs, num_loc, - db_list=["image{0:08d}.png".format(img_id) for img_id in train_ids], - query_list=["image{0:08d}.png".format(img_id) for img_id in query_ids], + db_list=[f"image{img_id:08d}.png" for img_id in train_ids], + query_list=[f"image{img_id:08d}.png" for img_id in query_ids], ) # feature extraction @@ -336,7 +339,7 @@ def run_hloc_cambridge( ) # Read coarse poses - with open(results_file, "r") as f: + with open(results_file) as f: lines = [] for data in f.read().rstrip().split("\n"): data = data.split() @@ -361,11 +364,11 @@ def run_hloc_cambridge( logger.info(f"Coarse pose saved at {results_file}") else: if logger: - logger.info(f"Point-only localization skipped.") + logger.info("Point-only localization skipped.") # Read coarse poses poses = {} - with open(results_file, "r") as f: + with open(results_file) as f: lines = [] for data in f.read().rstrip().split("\n"): data = data.split() diff --git a/runners/colmap_triangulation.py b/runners/colmap_triangulation.py index 81be5dd2..9b3aba1b 100644 --- a/runners/colmap_triangulation.py +++ b/runners/colmap_triangulation.py @@ -1,12 +1,14 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import limap.base as _base import limap.pointsfm as _psfm -import limap.util.io as limapio -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils +import limap.util.io as limapio def read_scene_colmap( diff --git a/runners/eth3d/ETH3D.py b/runners/eth3d/ETH3D.py index 7ebdba3f..68a5a5ea 100644 --- a/runners/eth3d/ETH3D.py +++ b/runners/eth3d/ETH3D.py @@ -1,4 +1,5 @@ -import os, sys +import os + import cv2 import numpy as np @@ -70,11 +71,9 @@ def set_scene_id(self, reso_type, scene_id, cam_id=-1): self.sparse_folder = "rig_calibration_undistorted" else: raise NotImplementedError - if not (scene_id in self.scenes[reso_type]): + if scene_id not in self.scenes[reso_type]: raise ValueError( - "Scene {0} does not exist in ETH3D {1} data.".format( - scene_id, reso_type - ) + f"Scene {scene_id} does not exist in ETH3D {reso_type} data." ) self.scene_dir = os.path.join(self.data_dir, self.reso_type, scene_id) self.cam_id = cam_id @@ -89,11 +88,11 @@ def get_depth_fname(self, imname, use_inpainted=True): imname = os.path.basename(imname) if use_inpainted: fname_depth = os.path.join( - self.scene_dir, "inpainted_depth", "{0}.png".format(imname) + self.scene_dir, "inpainted_depth", f"{imname}.png" ) else: fname_depth = os.path.join( - self.scene_dir, "ground_truth_depth", "{0}.png".format(imname) + self.scene_dir, "ground_truth_depth", f"{imname}.png" ) return fname_depth diff --git a/runners/eth3d/fitnmerge.py b/runners/eth3d/fitnmerge.py index 0ea3ae97..a2822a48 100644 --- a/runners/eth3d/fitnmerge.py +++ b/runners/eth3d/fitnmerge.py @@ -1,5 +1,5 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from ETH3D import ETH3D @@ -8,8 +8,8 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils def run_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=0): diff --git a/runners/eth3d/loader.py b/runners/eth3d/loader.py index 423e4de4..5f1bc04e 100644 --- a/runners/eth3d/loader.py +++ b/runners/eth3d/loader.py @@ -1,6 +1,8 @@ -import os, sys -import numpy as np +import os +import sys + import cv2 +import numpy as np sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) diff --git a/runners/eth3d/triangulation.py b/runners/eth3d/triangulation.py index 6ab1d23d..3839a2b3 100644 --- a/runners/eth3d/triangulation.py +++ b/runners/eth3d/triangulation.py @@ -1,5 +1,5 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from ETH3D import ETH3D @@ -8,8 +8,8 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils def run_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=0): diff --git a/runners/hypersim/Hypersim.py b/runners/hypersim/Hypersim.py index ed4c9bfb..69bc44dd 100644 --- a/runners/hypersim/Hypersim.py +++ b/runners/hypersim/Hypersim.py @@ -1,6 +1,7 @@ -import os, sys -import h5py +import os + import cv2 +import h5py import numpy as np import pyvista as pv @@ -58,7 +59,7 @@ def raydepth2depth(raydepth, K, img_hw): K_inv = np.linalg.inv(K) h, w = raydepth.shape[0], raydepth.shape[1] grids = np.meshgrid(np.arange(w), np.arange(h)) - coords_homo = [grids[0].reshape(-1), grids[1].reshape(-1), np.ones((h * w))] + coords_homo = [grids[0].reshape(-1), grids[1].reshape(-1), np.ones(h * w)] coords_homo = np.stack(coords_homo) coeffs = np.linalg.norm(K_inv @ coords_homo, axis=0) coeffs = coeffs.reshape(h, w) @@ -95,8 +96,9 @@ def set_max_dim(cls, max_dim): @classmethod def set_resize_ratio(cls, ratio): - cls.h, cls.w = int(round(cls.default_h * ratio)), int( - round(cls.default_w * ratio) + cls.h, cls.w = ( + int(round(cls.default_h * ratio)), + int(round(cls.default_w * ratio)), ) cls.K[0, :] = cls.default_K[0, :] * cls.w / cls.default_w cls.K[1, :] = cls.default_K[1, :] * cls.h / cls.default_h @@ -108,7 +110,7 @@ def read_mpau(self, scene_dir): import csv param_dict = {} - with open(fname_metascene, "r") as f: + with open(fname_metascene) as f: reader = csv.DictReader(f) for row in reader: param_dict[row["parameter_name"]] = row["parameter_value"] @@ -117,7 +119,7 @@ def read_mpau(self, scene_dir): return float(param_dict[key]) else: raise ValueError( - "Key {0} not exists in {1}".format(key, fname_metascene) + f"Key {key} not exists in {fname_metascene}" ) def set_scene_id(self, scene_id): @@ -130,8 +132,8 @@ def filter_index_list(self, index_list, cam_id=0): image_fname = os.path.join( self.scene_dir, "images", - "scene_cam_{0:02d}_final_preview".format(cam_id), - "frame.{0:04d}.color.jpg".format(image_id), + f"scene_cam_{cam_id:02d}_final_preview", + f"frame.{image_id:04d}.color.jpg", ) if os.path.exists(image_fname): new_index_list.append(image_id) @@ -148,7 +150,7 @@ def load_cameras(self, cam_id=0, scene_id=None): positions_fname = os.path.join( scene_dir, "_detail", - "cam_{0:02d}".format(cam_id), + f"cam_{cam_id:02d}", "camera_keyframe_positions.hdf5", ) with h5py.File(positions_fname, "r") as f: @@ -157,7 +159,7 @@ def load_cameras(self, cam_id=0, scene_id=None): orientations_fname = os.path.join( scene_dir, "_detail", - "cam_{0:02d}".format(cam_id), + f"cam_{cam_id:02d}", "camera_keyframe_orientations.hdf5", ) with h5py.File(orientations_fname, "r") as f: @@ -179,8 +181,8 @@ def load_imname(self, image_id, cam_id=0, scene_id=None): image_fname = os.path.join( scene_dir, "images", - "scene_cam_{0:02d}_final_preview".format(cam_id), - "frame.{0:04d}.color.jpg".format(image_id), + f"scene_cam_{cam_id:02d}_final_preview", + f"frame.{image_id:04d}.color.jpg", ) return image_fname @@ -200,8 +202,8 @@ def load_raydepth_fname(self, image_id, cam_id=0, scene_id=None): raydepth_fname = os.path.join( scene_dir, "images", - "scene_cam_{0:02d}_geometry_hdf5".format(cam_id), - "frame.{0:04d}.depth_meters.hdf5".format(image_id), + f"scene_cam_{cam_id:02d}_geometry_hdf5", + f"frame.{image_id:04d}.depth_meters.hdf5", ) return raydepth_fname diff --git a/runners/hypersim/fitnmerge.py b/runners/hypersim/fitnmerge.py index 75156192..959d64c6 100644 --- a/runners/hypersim/fitnmerge.py +++ b/runners/hypersim/fitnmerge.py @@ -1,5 +1,5 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from Hypersim import Hypersim @@ -8,8 +8,8 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils def run_scene_hypersim(cfg, dataset, scene_id, cam_id=0): diff --git a/runners/hypersim/loader.py b/runners/hypersim/loader.py index 51392111..a1efac4c 100644 --- a/runners/hypersim/loader.py +++ b/runners/hypersim/loader.py @@ -1,8 +1,10 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.abspath(__file__))) -from Hypersim import read_raydepth, raydepth2depth +from Hypersim import raydepth2depth, read_raydepth sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) diff --git a/runners/hypersim/refine_sfm.py b/runners/hypersim/refine_sfm.py index 5b02a90e..8c8706a4 100644 --- a/runners/hypersim/refine_sfm.py +++ b/runners/hypersim/refine_sfm.py @@ -1,4 +1,6 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.abspath(__file__))) @@ -8,16 +10,13 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.base as _base +import limap.optimize +import limap.pointsfm +import limap.runners import limap.util.config as cfgutils import limap.util.evaluation as limapeval import limap.util.io as limapio -import limap.runners -import limap.optimize -import limap.pointsfm - from runners.colmap_triangulation import run_colmap_triangulation -from runners.pointline_association import pointline_association def run_scene_hypersim(cfg, dataset, scene_id, cam_id=0): @@ -71,15 +70,11 @@ def run_scene_hypersim(cfg, dataset, scene_id, cam_id=0): imagecols, imagecols_gt ) print( - "original: trans: {0:.4f}, rot: {1:.4f}".format( - np.median(trans_errs_orig), np.median(rot_errs_orig) - ) + f"original: trans: {np.median(trans_errs_orig):.4f}, rot: {np.median(rot_errs_orig):.4f}" ) trans_errs, rot_errs = limapeval.eval_imagecols(new_imagecols, imagecols_gt) print( - "optimized: trans: {0:.4f}, rot: {1:.4f}".format( - np.median(trans_errs), np.median(rot_errs) - ) + f"optimized: trans: {np.median(trans_errs):.4f}, rot: {np.median(rot_errs):.4f}" ) diff --git a/runners/hypersim/triangulation.py b/runners/hypersim/triangulation.py index acdabab6..38890475 100644 --- a/runners/hypersim/triangulation.py +++ b/runners/hypersim/triangulation.py @@ -1,5 +1,5 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from Hypersim import Hypersim @@ -8,8 +8,8 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils def run_scene_hypersim(cfg, dataset, scene_id, cam_id=0): diff --git a/runners/inloc/localization.py b/runners/inloc/localization.py index cfc3e2de..a206b1b5 100644 --- a/runners/inloc/localization.py +++ b/runners/inloc/localization.py @@ -1,25 +1,27 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.io as limapio -import limap.util.config as cfgutils -import limap.runners as _runners import argparse import logging import pickle -from tqdm import tqdm from pathlib import Path + from utils import ( InLocP3DReader, - read_dataset_inloc, get_result_filenames, - run_hloc_inloc, parse_retrieval, + read_dataset_inloc, + run_hloc_inloc, ) +import limap.runners as _runners +import limap.util.config as cfgutils +import limap.util.io as limapio + formatter = logging.Formatter( fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s", datefmt="%Y/%m/%d %H:%M:%S", @@ -98,9 +100,7 @@ def parse_config(): # Output folder for LIMAP linetracks (in tmp) if cfg["output_folder"] is None: cfg["output_folder"] = "finaltracks" - cfg[ - "inloc_dataset" - ] = ( + cfg["inloc_dataset"] = ( args.dataset ) # For reading camera poses for estimating 3D lines fron depth return cfg, args @@ -114,7 +114,7 @@ def main(): outputs = Path(cfg["output_dir"]) / "localization" outputs.mkdir(exist_ok=True, parents=True) - logger.info(f"Working on InLoc.") + logger.info("Working on InLoc.") pairs = Path("third-party/Hierarchical-Localization/pairs/inloc/") loc_pairs = pairs / "pairs-query-netvlad{}{}.txt".format( args.num_loc, "-temporal" if args.use_temporal else "" diff --git a/runners/inloc/utils.py b/runners/inloc/utils.py index 4bf447ec..223b8d62 100644 --- a/runners/inloc/utils.py +++ b/runners/inloc/utils.py @@ -1,16 +1,16 @@ -import os, sys -import numpy as np +import os +from pathlib import Path import imagesize -from tqdm import tqdm -from pathlib import Path +import numpy as np +from hloc import extract_features, localize_inloc, match_features +from hloc.utils.parsers import parse_retrieval from scipy.io import loadmat +from tqdm import tqdm + import limap.base as _base import limap.util.io as limapio -from hloc import extract_features, match_features, localize_inloc -from hloc.utils.parsers import parse_retrieval - class InLocP3DReader(_base.BaseP3DReader): def __init__(self, filename): @@ -191,11 +191,11 @@ def run_hloc_inloc( if logger: logger.info(f"Coarse pose saved at {results_file}") else: - logger.info(f"Point-only localization skipped.") + logger.info("Point-only localization skipped.") # Read coarse poses and inliers poses = {} - with open(results_file, "r") as f: + with open(results_file) as f: lines = [] for data in f.read().rstrip().split("\n"): data = data.split() diff --git a/runners/pointline_association.py b/runners/pointline_association.py index 0f75e9cf..17052719 100644 --- a/runners/pointline_association.py +++ b/runners/pointline_association.py @@ -1,19 +1,20 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import numpy as np import math -import limap.util.io as limapio -import limap.util.config as cfgutils -import limap.visualize as limapvis +import numpy as np + import limap.base as _base -import limap.ceresbase as _ceresbase -import limap.pointsfm as _psfm -import limap.vplib as _vplib import limap.optimize as _optim -import limap.structures as _structures +import limap.pointsfm as _psfm import limap.runners +import limap.structures as _structures +import limap.util.config as cfgutils +import limap.util.io as limapio +import limap.visualize as limapvis +import limap.vplib as _vplib def report_vp(vpresults, vptracks, print_pairs=False): @@ -26,24 +27,18 @@ def report_vp(vpresults, vptracks, print_pairs=False): n_pairs_parallel += 1 if print_pairs: print( - "[LOG] Parallel pair detected: {0} / {1}, angle = {2:.2f}".format( - i, j, angle - ) + f"[LOG] Parallel pair detected: {i} / {j}, angle = {angle:.2f}" ) if angle >= 87.0: n_pairs_orthogonal += 1 if print_pairs: print( - "[LOG] Orthogonal pair detected: {0} / {1}, angle = {2:.2f}".format( - i, j, angle - ) + f"[LOG] Orthogonal pair detected: {i} / {j}, angle = {angle:.2f}" ) - print("[LOG] number of VP tracks: {0}".format(len(vptracks))) + print(f"[LOG] number of VP tracks: {len(vptracks)}") print("[LOG]", [track.length() for track in vptracks]) print( - "[LOG] parallel pairs: {0}, orthogonal pairs: {1}".format( - n_pairs_parallel, n_pairs_orthogonal - ) + f"[LOG] parallel pairs: {n_pairs_parallel}, orthogonal pairs: {n_pairs_orthogonal}" ) diff --git a/runners/refinement.py b/runners/refinement.py index c927c299..90a510f6 100644 --- a/runners/refinement.py +++ b/runners/refinement.py @@ -1,14 +1,14 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import numpy as np -import limap.util.io as limapio -import limap.util.config as cfgutils - import limap.base as _base -import limap.vplib as _vplib import limap.optimize as _optim +import limap.util.config as cfgutils +import limap.util.io as limapio +import limap.vplib as _vplib def one_by_one_refinement(cfg): diff --git a/runners/rome16k/Rome16K.py b/runners/rome16k/Rome16K.py index 6d38d2cc..528b14ab 100644 --- a/runners/rome16k/Rome16K.py +++ b/runners/rome16k/Rome16K.py @@ -1,4 +1,5 @@ -import os, sys +import os + import numpy as np @@ -7,18 +8,18 @@ def __init__(self, list_file, component_folder): self.image_list = self.load_image_list(list_file) self.components = {} self.component_names = [] - self.component_ids = (np.ones((len(self.image_list))) * -1).tolist() + self.component_ids = (np.ones(len(self.image_list)) * -1).tolist() self.load_components(component_folder) def load_image_list(self, list_file): - print("Loading bundler list file {0}...".format(list_file)) - with open(list_file, "r") as f: + print(f"Loading bundler list file {list_file}...") + with open(list_file) as f: lines = f.readlines() image_names = [line.strip("\n").split(" ")[0] for line in lines] return image_names def load_component_file(self, component_file): - with open(component_file, "r") as f: + with open(component_file) as f: lines = f.readlines() imname_list = [] for line in lines: diff --git a/runners/rome16k/statistics.py b/runners/rome16k/statistics.py index fef91756..d759a12c 100644 --- a/runners/rome16k/statistics.py +++ b/runners/rome16k/statistics.py @@ -1,4 +1,6 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.abspath(__file__))) @@ -7,13 +9,7 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.base as _base -import limap.pointsfm as _psfm -import limap.util.io as limapio import limap.util.config as cfgutils -import limap.runners - -from runners.bundler_triangulation import read_scene_bundler def report_rome16k_statistics(cfg, bundler_path, list_path, model_path): diff --git a/runners/rome16k/triangulation.py b/runners/rome16k/triangulation.py index 2d43d8ee..caed521f 100644 --- a/runners/rome16k/triangulation.py +++ b/runners/rome16k/triangulation.py @@ -1,5 +1,5 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from Rome16K import Rome16K @@ -7,12 +7,8 @@ sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.base as _base -import limap.pointsfm as _psfm -import limap.util.io as limapio -import limap.util.config as cfgutils import limap.runners - +import limap.util.config as cfgutils from runners.bundler_triangulation import read_scene_bundler diff --git a/runners/scannet/ScanNet.py b/runners/scannet/ScanNet.py index 9c18ae68..e6639da0 100644 --- a/runners/scannet/ScanNet.py +++ b/runners/scannet/ScanNet.py @@ -1,7 +1,8 @@ -import os, sys +import copy +import os + import cv2 import numpy as np -import copy class ScanNet: @@ -40,7 +41,7 @@ def set_stride(self, stride): self.loadinfos() def read_intrinsics(self, fname, mode="color"): - with open(fname, "r") as f: + with open(fname) as f: lines = f.readlines() img_hw = [-1, -1] K = np.zeros((3, 3)) @@ -67,7 +68,7 @@ def read_intrinsics(self, fname, mode="color"): return K, img_hw def read_pose(self, pose_txt): - with open(pose_txt, "r") as f: + with open(pose_txt) as f: lines = f.readlines() mat = [] for line in lines: @@ -89,7 +90,7 @@ def loadinfos(self): # load intrinsic fname_meta = os.path.join( - self.scene_dir, "{0}.txt".format(self.scene_id) + self.scene_dir, f"{self.scene_id}.txt" ) K_orig, img_hw_orig = self.read_intrinsics(fname_meta) h_orig, w_orig = img_hw_orig[0], img_hw_orig[1] @@ -116,12 +117,12 @@ def loadinfos(self): self.imname_list, self.Rs, self.Ts = [], [], [] for index in index_list: imname = os.path.join( - self.scene_dir, "color", "{0}.jpg".format(index) + self.scene_dir, "color", f"{index}.jpg" ) self.imname_list.append(imname) pose_txt = os.path.join( - self.scene_dir, "pose", "{0}.txt".format(index) + self.scene_dir, "pose", f"{index}.txt" ) R, T = self.read_pose(pose_txt) self.Rs.append(R) @@ -130,7 +131,7 @@ def loadinfos(self): def get_depth_fname(self, imname): depth_folder = os.path.join(self.scene_dir, "depth") img_id = int(os.path.basename(imname)[:-4]) - depth_fname = os.path.join(depth_folder, "{0}.png".format(img_id)) + depth_fname = os.path.join(depth_folder, f"{img_id}.png") return depth_fname def get_depth(self, imname): diff --git a/runners/scannet/fitnmerge.py b/runners/scannet/fitnmerge.py index 832979c8..50759d33 100644 --- a/runners/scannet/fitnmerge.py +++ b/runners/scannet/fitnmerge.py @@ -1,15 +1,15 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) -from ScanNet import ScanNet from loader import read_scene_scannet +from ScanNet import ScanNet sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils def run_scene_scannet(cfg, dataset, scene_id): diff --git a/runners/scannet/loader.py b/runners/scannet/loader.py index 0ea1d691..a51213e4 100644 --- a/runners/scannet/loader.py +++ b/runners/scannet/loader.py @@ -1,6 +1,8 @@ -import os, sys -import numpy as np +import os +import sys + import cv2 +import numpy as np sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) diff --git a/runners/scannet/triangulation.py b/runners/scannet/triangulation.py index 49337978..10e9b2a4 100644 --- a/runners/scannet/triangulation.py +++ b/runners/scannet/triangulation.py @@ -1,15 +1,15 @@ -import os, sys -import numpy as np +import os +import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) -from ScanNet import ScanNet from loader import read_scene_scannet +from ScanNet import ScanNet sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils def run_scene_scannet(cfg, dataset, scene_id): diff --git a/runners/tests/line2d.py b/runners/tests/line2d.py index 2bbe0648..57b5659f 100644 --- a/runners/tests/line2d.py +++ b/runners/tests/line2d.py @@ -1,6 +1,6 @@ -import limap.util.config import limap.base import limap.line2d +import limap.util.config import limap.visualize # detect and describe lines diff --git a/runners/tests/localization.py b/runners/tests/localization.py index 1dfa14c0..7db098bd 100644 --- a/runners/tests/localization.py +++ b/runners/tests/localization.py @@ -1,4 +1,6 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -6,15 +8,16 @@ os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) -import cv2 -import limap -import limap.base as _base -import limap.estimators as _estimators -import logging import argparse +import logging from pathlib import Path + +import cv2 from hloc.utils.read_write_model import * +import limap.base as _base +import limap.estimators as _estimators + formatter = logging.Formatter( fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s", datefmt="%Y/%m/%d %H:%M:%S", diff --git a/runners/visualsfm_triangulation.py b/runners/visualsfm_triangulation.py index 41541497..58cdf699 100644 --- a/runners/visualsfm_triangulation.py +++ b/runners/visualsfm_triangulation.py @@ -1,12 +1,14 @@ -import os, sys +import os +import sys + import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import limap.base as _base import limap.pointsfm as _psfm -import limap.util.io as limapio -import limap.util.config as cfgutils import limap.runners +import limap.util.config as cfgutils +import limap.util.io as limapio def read_scene_visualsfm( diff --git a/scripts/aachen_undistort.py b/scripts/aachen_undistort.py index 8f8b5c8f..26046ba0 100644 --- a/scripts/aachen_undistort.py +++ b/scripts/aachen_undistort.py @@ -1,13 +1,14 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import numpy as np + import cv2 +import numpy as np from tqdm import tqdm import limap.base as _base import limap.undistortion as _undist -import pdb data_dir = os.path.expanduser("~/data/Localization/Aachen-1.1") img_orig_dir = os.path.join(data_dir, "images_upright") diff --git a/scripts/convert_model.py b/scripts/convert_model.py index f8817f0b..92b4eef1 100644 --- a/scripts/convert_model.py +++ b/scripts/convert_model.py @@ -1,4 +1,5 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import limap.base as _base diff --git a/scripts/eval_hypersim.py b/scripts/eval_hypersim.py index 0a63e3da..00e76333 100644 --- a/scripts/eval_hypersim.py +++ b/scripts/eval_hypersim.py @@ -1,19 +1,17 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm -import limap.base as _base import limap.evaluation as _eval import limap.util.config as cfgutils import limap.util.io as limapio import limap.visualize as limapvis from runners.hypersim.Hypersim import Hypersim -import matplotlib as mpl -import matplotlib.pyplot as plt - # hypersim MPAU = 0.02539999969303608 @@ -29,7 +27,6 @@ def visualize_error_to_GT(evaluator, lines, threshold): outlier_lines = evaluator.ComputeOutlierSegs(lines, threshold) # visualize - import limap.visualize as limapvis import open3d as o3d vis = o3d.visualization.Visualizer() @@ -73,7 +70,7 @@ def report_error_to_GT(evaluator, lines, vis_err_th=None): def read_ply(fname): - from plyfile import PlyData, PlyElement + from plyfile import PlyData plydata = PlyData.read(fname) x = np.asarray(plydata.elements[0].data["x"]) diff --git a/scripts/eval_tnt.py b/scripts/eval_tnt.py index 60781323..e1672bd1 100644 --- a/scripts/eval_tnt.py +++ b/scripts/eval_tnt.py @@ -1,6 +1,8 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm @@ -10,9 +12,6 @@ import limap.util.io as limapio import limap.visualize as limapvis -import matplotlib as mpl -import matplotlib.pyplot as plt - def plot_curve(fname, thresholds, data): plt.plot(thresholds, data) @@ -63,7 +62,7 @@ def report_pc_recall_for_GT(evaluator, lines): def read_ply(fname): - from plyfile import PlyData, PlyElement + from plyfile import PlyData plydata = PlyData.read(fname) x = np.asarray(plydata.elements[0].data["x"]) diff --git a/scripts/tnt_align.py b/scripts/tnt_align.py index e64d643f..410be42e 100644 --- a/scripts/tnt_align.py +++ b/scripts/tnt_align.py @@ -1,6 +1,6 @@ -import os, sys +import os + import numpy as np -import pdb MAX_ERROR = 0.01 colmap_output_path = os.path.expanduser("~/data/TanksTemples/colmap/training") diff --git a/scripts/tnt_colmap_runner.py b/scripts/tnt_colmap_runner.py index e1639491..6be725f5 100644 --- a/scripts/tnt_colmap_runner.py +++ b/scripts/tnt_colmap_runner.py @@ -1,4 +1,4 @@ -import os, sys +import os import time path = "training" From 7c77204c3af694ffdf1059ace7a44e0e38b7281a Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:27:56 +0200 Subject: [PATCH 3/9] formatting. --- setup.py | 17 +++++++---------- visualize_3d_lines.py | 5 ++--- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/setup.py b/setup.py index 55f6da4e..db7ac534 100644 --- a/setup.py +++ b/setup.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- import os -import sys import subprocess +import sys -from setuptools import setup, Extension +from setuptools import Extension, setup from setuptools.command.build_ext import build_ext # Cores used for building the project @@ -47,12 +46,10 @@ def build_extension(self, ext): # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code # from Python. cmake_args = [ - "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir), - "-DPYTHON_EXECUTABLE={}".format(sys.executable), - "-DEXAMPLE_VERSION_INFO={}".format(self.distribution.get_version()), - "-DCMAKE_BUILD_TYPE={}".format( - cfg - ), # not used on MSVC, but no harm + f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}", + f"-DPYTHON_EXECUTABLE={sys.executable}", + f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}", + f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm ] build_args = [] @@ -63,7 +60,7 @@ def build_extension(self, ext): ["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp ) subprocess.check_call( - ["cmake", "--build", ".", "--parallel {0}".format(N_CORES)] + ["cmake", "--build", ".", f"--parallel {N_CORES}"] + build_args, cwd=self.build_temp, ) diff --git a/visualize_3d_lines.py b/visualize_3d_lines.py index 8e9bdf03..151b7d76 100644 --- a/visualize_3d_lines.py +++ b/visualize_3d_lines.py @@ -1,5 +1,4 @@ -import os, sys -import numpy as np +import os import limap.base as _base import limap.util.io as limapio @@ -113,7 +112,7 @@ def main(args): not args.imagecols.endswith(".npy") ): raise ValueError( - "Error! Input file {0} is not valid".format(args.imagecols) + f"Error! Input file {args.imagecols} is not valid" ) imagecols = _base.ImageCollection( limapio.read_npy(args.imagecols).item() From da1ff49aca607a3eb152d29c545bc8813c08caae Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:31:45 +0200 Subject: [PATCH 4/9] update. --- ruff.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ruff.toml b/ruff.toml index bb421f1d..22d0c2d3 100644 --- a/ruff.toml +++ b/ruff.toml @@ -18,4 +18,4 @@ select = [ ignore = ["SIM117"] [lint.per-file-ignores] -"scripts/*.py" = ["E", "SIM", "UP", "B"] +"scripts/*.py" = ["E"] From f01c23fcae0f2cd993951af4d65e48c06a97a93f Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:33:18 +0200 Subject: [PATCH 5/9] formatting. --- limap/line2d/SOLD2/model/model_util.py | 3 ++- limap/line2d/base_detector.py | 8 ++----- limap/optimize/extract_heatmaps_sold2.py | 1 - .../optimize/extract_track_patches_s2dnet.py | 4 +--- limap/pointsfm/colmap_sfm.py | 12 +++------- limap/pointsfm/functions.py | 4 +--- limap/pointsfm/model_converter.py | 4 +--- limap/pointsfm/visualsfm_reader.py | 4 +--- limap/runners/functions.py | 4 +--- limap/runners/functions_structures.py | 4 +--- limap/undistortion/undistort.py | 1 - limap/util/evaluation.py | 8 ++----- limap/util/io.py | 12 +++------- limap/visualize/vis_utils.py | 6 ++++- runners/hypersim/Hypersim.py | 4 +--- runners/scannet/ScanNet.py | 12 +++------- scripts/aachen_undistort.py | 8 +++---- scripts/eval_hypersim.py | 14 ++++------- scripts/eval_tnt.py | 24 +++++++------------ scripts/tnt_align.py | 12 +++++----- scripts/tnt_colmap_runner.py | 10 +++----- setup.py | 3 +-- visualize_3d_lines.py | 4 +--- 23 files changed, 55 insertions(+), 111 deletions(-) diff --git a/limap/line2d/SOLD2/model/model_util.py b/limap/line2d/SOLD2/model/model_util.py index f7404678..4a44c55f 100644 --- a/limap/line2d/SOLD2/model/model_util.py +++ b/limap/line2d/SOLD2/model/model_util.py @@ -180,7 +180,8 @@ def get_heatmap_decoder(self): def get_descriptor_decoder(self): """Get the descriptor decoder.""" if ( - self.cfg["descriptor_decoder"] not in self.supported_descriptor_decoder + self.cfg["descriptor_decoder"] + not in self.supported_descriptor_decoder ): raise ValueError( "[Error] The descriptor decoder selection is not supported." diff --git a/limap/line2d/base_detector.py b/limap/line2d/base_detector.py index b2a2dedc..945fda47 100644 --- a/limap/line2d/base_detector.py +++ b/limap/line2d/base_detector.py @@ -217,9 +217,7 @@ def detect_all_images(self, output_folder, imagecols, skip_exists=False): if self.visualize: img = imagecols.read_image(img_id) img = limapvis.draw_segments(img, segs, (0, 255, 0)) - fname = os.path.join( - vis_folder, f"img_{img_id}_det.png" - ) + fname = os.path.join(vis_folder, f"img_{img_id}_det.png") cv2.imwrite(fname, img) all_2d_segs = limapio.read_all_segments_from_folder(seg_folder) all_2d_segs = {id: all_2d_segs[id] for id in imagecols.get_img_ids()} @@ -306,9 +304,7 @@ def detect_and_extract_all_images( if self.visualize: img = imagecols.read_image(img_id) img = limapvis.draw_segments(img, segs, (0, 255, 0)) - fname = os.path.join( - vis_folder, f"img_{img_id}_det.png" - ) + fname = os.path.join(vis_folder, f"img_{img_id}_det.png") cv2.imwrite(fname, img) all_2d_segs = limapio.read_all_segments_from_folder(seg_folder) all_2d_segs = {id: all_2d_segs[id] for id in imagecols.get_img_ids()} diff --git a/limap/optimize/extract_heatmaps_sold2.py b/limap/optimize/extract_heatmaps_sold2.py index db77714e..7681c3e4 100644 --- a/limap/optimize/extract_heatmaps_sold2.py +++ b/limap/optimize/extract_heatmaps_sold2.py @@ -1,4 +1,3 @@ - import limap.base as _base import limap.line2d import limap.util.io as limapio diff --git a/limap/optimize/extract_track_patches_s2dnet.py b/limap/optimize/extract_track_patches_s2dnet.py index 0888a410..d5327f5f 100644 --- a/limap/optimize/extract_track_patches_s2dnet.py +++ b/limap/optimize/extract_track_patches_s2dnet.py @@ -44,9 +44,7 @@ def extract_track_patches_s2dnet( camview = imagecols.camview(img_id) line2d_range = extractor.GetLine2DRange(track, img_id, camview) line2d_collections[img_id].append([line2d_range, track_id]) - limapio.check_makedirs( - os.path.join(output_dir, f"track{track_id}") - ) + limapio.check_makedirs(os.path.join(output_dir, f"track{track_id}")) # extract line patches for each image for img_id in tqdm(imagecols.get_img_ids()): diff --git a/limap/pointsfm/colmap_sfm.py b/limap/pointsfm/colmap_sfm.py index 2772b8c6..ec7941cd 100644 --- a/limap/pointsfm/colmap_sfm.py +++ b/limap/pointsfm/colmap_sfm.py @@ -174,9 +174,7 @@ def run_colmap_sfm( keypoints_in_order = [] for idx, img_id in enumerate(imagecols.get_img_ids()): img = imagecols.read_image(img_id) - fname_to_save = os.path.join( - image_path, f"image{img_id:08d}.png" - ) + fname_to_save = os.path.join(image_path, f"image{img_id:08d}.png") cv2.imwrite(fname_to_save, img) if keypoints is not None: keypoints_in_order.append(keypoints[img_id]) @@ -251,15 +249,11 @@ def run_colmap_sfm_with_known_poses( imagecols_tmp = copy.deepcopy(imagecols) for idx, img_id in enumerate(imagecols.get_img_ids()): img = imagecols.read_image(img_id) - fname_to_save = os.path.join( - image_path, f"image{img_id:08d}.png" - ) + fname_to_save = os.path.join(image_path, f"image{img_id:08d}.png") cv2.imwrite(fname_to_save, img) if keypoints is not None: keypoints_in_order.append(keypoints[img_id]) - imagecols_tmp.change_image_name( - img_id, f"image{img_id:08d}.png" - ) + imagecols_tmp.change_image_name(img_id, f"image{img_id:08d}.png") # feature extraction and matching run_hloc_matches( diff --git a/limap/pointsfm/functions.py b/limap/pointsfm/functions.py index 0fcbcd79..1a8404a0 100644 --- a/limap/pointsfm/functions.py +++ b/limap/pointsfm/functions.py @@ -39,9 +39,7 @@ def ComputeNeighbors( def compute_metainfos(cfg, model, n_neighbors=20): # get neighbors - print( - f"Computing visual neighbors... (n_neighbors = {n_neighbors})" - ) + print(f"Computing visual neighbors... (n_neighbors = {n_neighbors})") neighbors = ComputeNeighbors( model, n_neighbors, diff --git a/limap/pointsfm/model_converter.py b/limap/pointsfm/model_converter.py index def9f23a..cbed2cbc 100644 --- a/limap/pointsfm/model_converter.py +++ b/limap/pointsfm/model_converter.py @@ -51,9 +51,7 @@ def convert_colmap_to_visualsfm(colmap_model_path, output_nvm_file): qvec, tvec = colmap_image.qvec, colmap_image.tvec R = rotation_from_quaternion(qvec) center = -R.transpose() @ tvec - f.write( - f" {qvec[0]} {qvec[1]} {qvec[2]} {qvec[3]}" - ) + f.write(f" {qvec[0]} {qvec[1]} {qvec[2]} {qvec[3]}") f.write(f" {center[0]} {center[1]} {center[2]}") f.write(f" {k1} 0\n") f.write("\n") diff --git a/limap/pointsfm/visualsfm_reader.py b/limap/pointsfm/visualsfm_reader.py index 3e9ff22a..fcf2ef76 100644 --- a/limap/pointsfm/visualsfm_reader.py +++ b/limap/pointsfm/visualsfm_reader.py @@ -11,9 +11,7 @@ def ReadModelVisualSfM(vsfm_path, nvm_file="reconstruction.nvm"): input_file = os.path.join(vsfm_path, nvm_file) if not os.path.exists(input_file): - raise ValueError( - f"Error! Input file {input_file} does not exist." - ) + raise ValueError(f"Error! Input file {input_file} does not exist.") with open(input_file) as f: txt_lines = f.readlines() diff --git a/limap/runners/functions.py b/limap/runners/functions.py index c43a156b..41c47d52 100644 --- a/limap/runners/functions.py +++ b/limap/runners/functions.py @@ -69,9 +69,7 @@ def undistort_images( # start undistortion if n_jobs == -1: n_jobs = os.cpu_count() - print( - f"[LOG] Start undistorting images (n_images = {len(unload_ids)})..." - ) + print(f"[LOG] Start undistorting images (n_images = {len(unload_ids)})...") import cv2 import imagesize import joblib diff --git a/limap/runners/functions_structures.py b/limap/runners/functions_structures.py index 4b7f725d..0abfc01c 100644 --- a/limap/runners/functions_structures.py +++ b/limap/runners/functions_structures.py @@ -22,9 +22,7 @@ def compute_2d_feature_points_sp(imagecols, output_path="tmp/featurepoints"): ### copy images to tmp folder for img_id in imagecols.get_img_ids(): img = imagecols.read_image(img_id) - fname_to_save = os.path.join( - image_path, f"image{img_id:08d}.png" - ) + fname_to_save = os.path.join(image_path, f"image{img_id:08d}.png") cv2.imwrite(fname_to_save, img) # run superpoint diff --git a/limap/undistortion/undistort.py b/limap/undistortion/undistort.py index 34e3cdaf..2073f6b6 100644 --- a/limap/undistortion/undistort.py +++ b/limap/undistortion/undistort.py @@ -1,4 +1,3 @@ - import cv2 from _limap import _base, _undistortion diff --git a/limap/util/evaluation.py b/limap/util/evaluation.py index b49c81d0..d50f9244 100644 --- a/limap/util/evaluation.py +++ b/limap/util/evaluation.py @@ -27,9 +27,7 @@ def eval_imagecols( imagecols, imagecols_gt, max_error=0.01, enable_logging=True ): if enable_logging: - print( - f"[LOG EVAL] imagecols.NumImages() = {imagecols.NumImages()}" - ) + print(f"[LOG EVAL] imagecols.NumImages() = {imagecols.NumImages()}") print( f"[LOG EVAL] imagecols_gt.NumImages() = {imagecols_gt.NumImages()}" ) @@ -61,9 +59,7 @@ def eval_imagecols_relpose( ) assert len(shared_img_ids) == imagecols.NumImages() if enable_logging: - print( - f"[LOG EVAL] imagecols.NumImages() = {imagecols.NumImages()}" - ) + print(f"[LOG EVAL] imagecols.NumImages() = {imagecols.NumImages()}") print( f"[LOG EVAL] imagecols_gt.NumImages() = {imagecols_gt.NumImages()}" ) diff --git a/limap/util/io.py b/limap/util/io.py index 04ae330c..7d6816a3 100644 --- a/limap/util/io.py +++ b/limap/util/io.py @@ -251,9 +251,7 @@ def save_l3dpp(folder, imagecols, all_2d_segs): f.write(f"{n_segments}\n") for line_id in range(n_segments): line = segs[line_id] - f.write( - f"{line[0]} {line[1]} {line[2]} {line[3]}\n" - ) + f.write(f"{line[0]} {line[1]} {line[2]} {line[3]}\n") print(f"Writing for L3DPP: {fname}") @@ -397,9 +395,7 @@ def read_lines_from_input(input_file): General reader for lines """ if not os.path.exists(input_file): - raise ValueError( - f"Error! Input file/directory {input_file} not found." - ) + raise ValueError(f"Error! Input file/directory {input_file} not found.") # linetracks folder if not os.path.isfile(input_file): @@ -443,9 +439,7 @@ def save_txt_segments(folder, img_id, segs): f.write(f"{n_segments}\n") for line_id in range(n_segments): line = segs[line_id] - f.write( - f"{line[0]} {line[1]} {line[2]} {line[3]}\n" - ) + f.write(f"{line[0]} {line[1]} {line[2]} {line[3]}\n") def read_txt_segments(folder, img_id): diff --git a/limap/visualize/vis_utils.py b/limap/visualize/vis_utils.py index d2a790b8..6ba20d61 100644 --- a/limap/visualize/vis_utils.py +++ b/limap/visualize/vis_utils.py @@ -429,7 +429,11 @@ def vis_vpresult( colors = [[255, 0, 0]] for line_id, line in enumerate(lines): c = [255, 255, 255] # default color: white - if not vpres.HasVP(line_id) or vp_id >= 0 and vpres.labels[line_id] != vp_id: + if ( + not vpres.HasVP(line_id) + or vp_id >= 0 + and vpres.labels[line_id] != vp_id + ): if not show_original: continue else: diff --git a/runners/hypersim/Hypersim.py b/runners/hypersim/Hypersim.py index 69bc44dd..7f2a3c72 100644 --- a/runners/hypersim/Hypersim.py +++ b/runners/hypersim/Hypersim.py @@ -118,9 +118,7 @@ def read_mpau(self, scene_dir): if key in param_dict: return float(param_dict[key]) else: - raise ValueError( - f"Key {key} not exists in {fname_metascene}" - ) + raise ValueError(f"Key {key} not exists in {fname_metascene}") def set_scene_id(self, scene_id): self.scene_dir = os.path.join(self.data_dir, scene_id) diff --git a/runners/scannet/ScanNet.py b/runners/scannet/ScanNet.py index e6639da0..3de53f26 100644 --- a/runners/scannet/ScanNet.py +++ b/runners/scannet/ScanNet.py @@ -89,9 +89,7 @@ def loadinfos(self): index_list = np.arange(0, n_images, self.stride).tolist() # load intrinsic - fname_meta = os.path.join( - self.scene_dir, f"{self.scene_id}.txt" - ) + fname_meta = os.path.join(self.scene_dir, f"{self.scene_id}.txt") K_orig, img_hw_orig = self.read_intrinsics(fname_meta) h_orig, w_orig = img_hw_orig[0], img_hw_orig[1] # reshape w.r.t max_image_dim @@ -116,14 +114,10 @@ def loadinfos(self): # get imname_list and cameras self.imname_list, self.Rs, self.Ts = [], [], [] for index in index_list: - imname = os.path.join( - self.scene_dir, "color", f"{index}.jpg" - ) + imname = os.path.join(self.scene_dir, "color", f"{index}.jpg") self.imname_list.append(imname) - pose_txt = os.path.join( - self.scene_dir, "pose", f"{index}.txt" - ) + pose_txt = os.path.join(self.scene_dir, "pose", f"{index}.txt") R, T = self.read_pose(pose_txt) self.Rs.append(R) self.Ts.append(T) diff --git a/scripts/aachen_undistort.py b/scripts/aachen_undistort.py index 26046ba0..42eca6fa 100644 --- a/scripts/aachen_undistort.py +++ b/scripts/aachen_undistort.py @@ -20,7 +20,7 @@ def load_list_file(fname): - with open(fname, "r") as f: + with open(fname) as f: lines = f.readlines() imname_list, cameras = [], [] for line in lines: @@ -35,7 +35,7 @@ def load_list_file(fname): k1 = float(k[7]) K = np.array([[f, 0, cx], [0, f, cy], [0, 0, 1.0]]) camera = _base.Camera( - K, np.eye(3), np.zeros((3)), np.array([k1, 0, 0, 0, 0]) + K, np.eye(3), np.zeros(3), np.array([k1, 0, 0, 0, 0]) ) imname_list.append(imname) cameras.append(camera) @@ -61,9 +61,7 @@ def process(image_list, cameras): fx = camera_undistorted.K[0, 0] cx, cy = camera_undistorted.K[0, 2], camera_undistorted.K[1, 2] f.write( - "{0} SIMPLE_PINHOLE {1} {2} {3} {4} {5}\n".format( - imname_undist, w, h, fx, cx, cy - ) + f"{imname_undist} SIMPLE_PINHOLE {w} {h} {fx} {cx} {cy}\n" ) diff --git a/scripts/eval_hypersim.py b/scripts/eval_hypersim.py index 00e76333..231e7fdb 100644 --- a/scripts/eval_hypersim.py +++ b/scripts/eval_hypersim.py @@ -62,9 +62,7 @@ def report_error_to_GT(evaluator, lines, vis_err_th=None): print("R: recall, P: precision") for idx, threshold in enumerate(thresholds): print( - "R / P at {0}mm: {1:.2f} / {2:.2f}".format( - int(threshold * 1000), list_recall[idx], list_precision[idx] - ) + f"R / P at {int(threshold * 1000)}mm: {list_recall[idx]:.2f} / {list_precision[idx]:.2f}" ) return evaluator @@ -77,7 +75,7 @@ def read_ply(fname): y = np.asarray(plydata.elements[0].data["y"]) z = np.asarray(plydata.elements[0].data["z"]) points = np.stack([x, y, z], axis=1) - print("number of points: {0}".format(points.shape[0])) + print(f"number of points: {points.shape[0]}") return points @@ -150,14 +148,14 @@ def eval_hypersim( [line.as_array() for line in inlier_lines] ) limapio.save_obj( - "tmp/inliers_th_{0:.4f}.obj".format(threshold), inlier_lines_np + f"tmp/inliers_th_{threshold:.4f}.obj", inlier_lines_np ) outlier_lines = evaluator.ComputeOutlierSegs(lines, threshold) outlier_lines_np = np.array( [line.as_array() for line in outlier_lines] ) limap.save_obj( - "tmp/outliers_th_{0:.4f}.obj".format(threshold), + f"tmp/outliers_th_{threshold:.4f}.obj", outlier_lines_np, ) @@ -286,9 +284,7 @@ def main(): [track.count_lines() for track in linetracks] ) print( - "supporting images / lines: ({0:.2f} / {1:.2f})".format( - sup_image_counts.mean(), sup_line_counts.mean() - ) + f"supporting images / lines: ({sup_image_counts.mean():.2f} / {sup_line_counts.mean():.2f})" ) diff --git a/scripts/eval_tnt.py b/scripts/eval_tnt.py index e1672bd1..d970f014 100644 --- a/scripts/eval_tnt.py +++ b/scripts/eval_tnt.py @@ -33,9 +33,7 @@ def report_error_to_GT(evaluator, lines): list_precision.append(precision) for idx, threshold in enumerate(thresholds): print( - "R / P at {0}mm: {1:.2f} / {2:.2f}".format( - int(threshold * 1000), list_recall[idx], list_precision[idx] - ) + f"R / P at {int(threshold * 1000)}mm: {list_recall[idx]:.2f} / {list_precision[idx]:.2f}" ) return evaluator @@ -54,9 +52,7 @@ def report_pc_recall_for_GT(evaluator, lines): num_inliers = (point_dists < threshold).sum() point_recall = 100 * num_inliers / n_points print( - "{0:.0f}mm, inliers = {1}, point recall = {2:.2f}".format( - int(threshold * 1000), num_inliers, point_recall - ) + f"{int(threshold * 1000):.0f}mm, inliers = {num_inliers}, point recall = {point_recall:.2f}" ) return evaluator @@ -69,7 +65,7 @@ def read_ply(fname): y = np.asarray(plydata.elements[0].data["y"]) z = np.asarray(plydata.elements[0].data["z"]) points = np.stack([x, y, z], axis=1) - print("number of points: {0}".format(points.shape[0])) + print(f"number of points: {points.shape[0]}") return points @@ -119,7 +115,7 @@ def eval_tnt(cfg, lines, ref_lines=None): for line in lines if limapvis.test_line_inside_ranges(line, ranges) ] - print("Filtering by range: {0} / {1}".format(len(lines), n_lines)) + print(f"Filtering by range: {len(lines)} / {n_lines}") evaluator = report_error_to_point_cloud( points, lines, kdtree_dir=cfg["kdtree_dir"] ) @@ -133,14 +129,14 @@ def eval_tnt(cfg, lines, ref_lines=None): [line.as_array() for line in inlier_lines] ) limapio.save_obj( - "tmp/inliers_th_{0:.4f}.obj".format(threshold), inlier_lines_np + f"tmp/inliers_th_{threshold:.4f}.obj", inlier_lines_np ) outlier_lines = evaluator.ComputeOutlierSegs(lines, threshold) outlier_lines_np = np.array( [line.as_array() for line in outlier_lines] ) limapio.save_obj( - "tmp/outliers_th_{0:.4f}.obj".format(threshold), + f"tmp/outliers_th_{threshold:.4f}.obj", outlier_lines_np, ) @@ -222,7 +218,7 @@ def parse_config(): def transform_lines(fname, lines): - with open(fname, "r") as f: + with open(fname) as f: flines = f.readlines() mat = [] for fline in flines: @@ -267,7 +263,7 @@ def main(): return ref_lines = None if cfg["reference_dir"] is not None: - ref_lines = read_lines_from_input( + ref_lines = limapio.read_lines_from_input( cfg["reference_dir"], n_visible_views=4 ) eval_tnt(cfg, lines, ref_lines=ref_lines) @@ -281,9 +277,7 @@ def main(): [track.count_lines() for track in linetracks] ) print( - "supporting images / lines: ({0:.2f} / {1:.2f})".format( - sup_image_counts.mean(), sup_line_counts.mean() - ) + f"supporting images / lines: ({sup_image_counts.mean():.2f} / {sup_line_counts.mean():.2f})" ) diff --git a/scripts/tnt_align.py b/scripts/tnt_align.py index 410be42e..52316818 100644 --- a/scripts/tnt_align.py +++ b/scripts/tnt_align.py @@ -13,14 +13,14 @@ def get_imname_list(scene_id): n_images = len(flist) imname_list = [] for idx in range(n_images): - fname = "{0:06d}.jpg".format(idx + 1) + fname = f"{idx + 1:06d}.jpg" # fname = os.path.join(image_path, fname) imname_list.append(fname) return imname_list def read_positions(log_file): - with open(log_file, "r") as f: + with open(log_file) as f: lines = f.readlines() n_images = int(len(lines) / 5) positions = [] @@ -39,7 +39,7 @@ def read_positions(log_file): def read_trans(fname): - with open(fname, "r") as f: + with open(fname) as f: lines = f.readlines() mat = [] for idx in range(4): @@ -53,7 +53,7 @@ def read_trans(fname): def write_geoinfo_txt(fname, imname_list, positions): with open(fname, "w") as f: for imname, pos in zip(imname_list, positions): - f.write("{0} {1} {2} {3}\n".format(imname, pos[0], pos[1], pos[2])) + f.write(f"{imname} {pos[0]} {pos[1]} {pos[2]}\n") def main(): @@ -62,11 +62,11 @@ def main(): # get geo txt imname_list = get_imname_list(scene_id) log_file = os.path.join( - input_meta_path, scene_id, "{0}_COLMAP_SfM.log".format(scene_id) + input_meta_path, scene_id, f"{scene_id}_COLMAP_SfM.log" ) positions = read_positions(log_file) trans_file = os.path.join( - input_meta_path, scene_id, "{0}_trans.txt".format(scene_id) + input_meta_path, scene_id, f"{scene_id}_trans.txt" ) trans_mat = read_trans(trans_file) new_positions = [ diff --git a/scripts/tnt_colmap_runner.py b/scripts/tnt_colmap_runner.py index 6be725f5..347e24f2 100644 --- a/scripts/tnt_colmap_runner.py +++ b/scripts/tnt_colmap_runner.py @@ -22,18 +22,14 @@ database_path = os.path.join(output_folder, "database.db") cmd = ( - "colmap feature_extractor --database_path {0} --image_path {1}".format( - database_path, input_folder - ) + f"colmap feature_extractor --database_path {database_path} --image_path {input_folder}" ) print(cmd) os.system(cmd) - cmd = "colmap exhaustive_matcher --database_path {0}".format(database_path) + cmd = f"colmap exhaustive_matcher --database_path {database_path}" print(cmd) os.system(cmd) - cmd = "colmap mapper --database_path {0} --image_path {1} --output_path {2}".format( - database_path, input_folder, sparse_folder - ) + cmd = f"colmap mapper --database_path {database_path} --image_path {input_folder} --output_path {sparse_folder}" print(cmd) os.system(cmd) cmd = "colmap image_undistorter --image_path {0} --input_path {1} --output_path {2} --output_type COLMAP".format( diff --git a/setup.py b/setup.py index db7ac534..3258576b 100644 --- a/setup.py +++ b/setup.py @@ -60,8 +60,7 @@ def build_extension(self, ext): ["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp ) subprocess.check_call( - ["cmake", "--build", ".", f"--parallel {N_CORES}"] - + build_args, + ["cmake", "--build", ".", f"--parallel {N_CORES}"] + build_args, cwd=self.build_temp, ) diff --git a/visualize_3d_lines.py b/visualize_3d_lines.py index 151b7d76..14f73bfe 100644 --- a/visualize_3d_lines.py +++ b/visualize_3d_lines.py @@ -111,9 +111,7 @@ def main(args): if (not os.path.exists(args.imagecols)) or ( not args.imagecols.endswith(".npy") ): - raise ValueError( - f"Error! Input file {args.imagecols} is not valid" - ) + raise ValueError(f"Error! Input file {args.imagecols} is not valid") imagecols = _base.ImageCollection( limapio.read_npy(args.imagecols).item() ) From 3b27f5ca9fe8b979c90c3b455abc8a9015338c30 Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:33:50 +0200 Subject: [PATCH 6/9] formatting. --- scripts/aachen_undistort.py | 4 +--- scripts/tnt_colmap_runner.py | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/scripts/aachen_undistort.py b/scripts/aachen_undistort.py index 42eca6fa..775bb8ab 100644 --- a/scripts/aachen_undistort.py +++ b/scripts/aachen_undistort.py @@ -60,9 +60,7 @@ def process(image_list, cameras): assert camera_undistorted.K[0, 0] == camera_undistorted.K[1, 1] fx = camera_undistorted.K[0, 0] cx, cy = camera_undistorted.K[0, 2], camera_undistorted.K[1, 2] - f.write( - f"{imname_undist} SIMPLE_PINHOLE {w} {h} {fx} {cx} {cy}\n" - ) + f.write(f"{imname_undist} SIMPLE_PINHOLE {w} {h} {fx} {cx} {cy}\n") if __name__ == "__main__": diff --git a/scripts/tnt_colmap_runner.py b/scripts/tnt_colmap_runner.py index 347e24f2..8e8efaa9 100644 --- a/scripts/tnt_colmap_runner.py +++ b/scripts/tnt_colmap_runner.py @@ -21,9 +21,7 @@ os.makedirs(dense_folder) database_path = os.path.join(output_folder, "database.db") - cmd = ( - f"colmap feature_extractor --database_path {database_path} --image_path {input_folder}" - ) + cmd = f"colmap feature_extractor --database_path {database_path} --image_path {input_folder}" print(cmd) os.system(cmd) cmd = f"colmap exhaustive_matcher --database_path {database_path}" From 586c5c0434ccac7f09f9b84577670f2da2c9826b Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:40:43 +0200 Subject: [PATCH 7/9] formatting. --- ruff.toml | 2 +- runners/rome16k/triangulation.py | 2 +- runners/visualsfm_triangulation.py | 2 +- scripts/eval_hypersim.py | 3 +-- scripts/eval_tnt.py | 3 +-- 5 files changed, 5 insertions(+), 7 deletions(-) diff --git a/ruff.toml b/ruff.toml index 22d0c2d3..3117e41b 100644 --- a/ruff.toml +++ b/ruff.toml @@ -15,7 +15,7 @@ select = [ # isort "I", ] -ignore = ["SIM117"] +ignore = ["SIM117", "F401"] [lint.per-file-ignores] "scripts/*.py" = ["E"] diff --git a/runners/rome16k/triangulation.py b/runners/rome16k/triangulation.py index caed521f..68b8c500 100644 --- a/runners/rome16k/triangulation.py +++ b/runners/rome16k/triangulation.py @@ -109,7 +109,7 @@ def parse_config(): cfg["list_path"] = args.list_path cfg["model_path"] = args.model_path cfg["info_path"] = args.info_path - if ("max_image_dim" not in cfg.keys()) or args.max_image_dim is not None: + if ("max_image_dim" not in cfg) or args.max_image_dim is not None: cfg["max_image_dim"] = args.max_image_dim # components cfg["component_folder"] = args.component_folder diff --git a/runners/visualsfm_triangulation.py b/runners/visualsfm_triangulation.py index 58cdf699..447487d5 100644 --- a/runners/visualsfm_triangulation.py +++ b/runners/visualsfm_triangulation.py @@ -105,7 +105,7 @@ def parse_config(): cfg["vsfm_path"] = args.vsfm_path cfg["nvm_file"] = args.nvm_file cfg["info_path"] = args.info_path - if ("max_image_dim" not in cfg.keys()) or args.max_image_dim is not None: + if ("max_image_dim" not in cfg) or args.max_image_dim is not None: cfg["max_image_dim"] = args.max_image_dim return cfg diff --git a/scripts/eval_hypersim.py b/scripts/eval_hypersim.py index 231e7fdb..43bb53b2 100644 --- a/scripts/eval_hypersim.py +++ b/scripts/eval_hypersim.py @@ -155,8 +155,7 @@ def eval_hypersim( [line.as_array() for line in outlier_lines] ) limap.save_obj( - f"tmp/outliers_th_{threshold:.4f}.obj", - outlier_lines_np, + f"tmp/outliers_th_{threshold:.4f}.obj", outlier_lines_np ) diff --git a/scripts/eval_tnt.py b/scripts/eval_tnt.py index d970f014..e32aed9f 100644 --- a/scripts/eval_tnt.py +++ b/scripts/eval_tnt.py @@ -136,8 +136,7 @@ def eval_tnt(cfg, lines, ref_lines=None): [line.as_array() for line in outlier_lines] ) limapio.save_obj( - f"tmp/outliers_th_{threshold:.4f}.obj", - outlier_lines_np, + f"tmp/outliers_th_{threshold:.4f}.obj", outlier_lines_np ) From cb62132a462414c77906c4eebb0ec1cef98dd9bb Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 16:20:18 +0200 Subject: [PATCH 8/9] formatting. --- limap/line2d/GlueStick/extractor.py | 2 +- limap/line2d/GlueStick/matcher.py | 2 +- limap/line2d/L2D2/extractor.py | 2 +- limap/line2d/L2D2/matcher.py | 2 +- limap/line2d/LBD/extractor.py | 2 +- limap/line2d/LBD/matcher.py | 2 +- limap/line2d/LSD/lsd.py | 2 +- limap/line2d/LineTR/extractor.py | 2 +- limap/line2d/LineTR/linetr_pipeline.py | 6 ++-- limap/line2d/LineTR/matcher.py | 2 +- limap/line2d/SOLD2/model/loss.py | 10 +++---- limap/line2d/endpoints/extractor.py | 2 +- limap/line2d/endpoints/matcher.py | 4 +-- limap/runners/line_localization.py | 15 ++++++---- limap/runners/line_triangulation.py | 2 +- limap/util/config.py | 4 +-- limap/util/io.py | 7 ++--- limap/visualize/trackvis/base.py | 4 ++- limap/visualize/trackvis/open3d.py | 4 +-- limap/visualize/trackvis/pyvista.py | 3 +- limap/visualize/vis_bipartite.py | 23 +++++++-------- limap/visualize/vis_lines.py | 36 ++++++++++++------------ limap/visualize/vis_utils.py | 8 ++---- limap/vplib/JLinkage/JLinkage.py | 2 +- limap/vplib/progressivex/progressivex.py | 7 ++--- ruff.toml | 2 +- runners/7scenes/localization.py | 3 +- runners/7scenes/utils.py | 5 ++-- runners/bundler_triangulation.py | 2 +- runners/cambridge/localization.py | 4 ++- runners/cambridge/utils.py | 2 +- runners/colmap_triangulation.py | 2 +- runners/eth3d/loader.py | 2 +- runners/hypersim/Hypersim.py | 2 +- runners/hypersim/loader.py | 2 +- runners/inloc/utils.py | 5 ++-- runners/rome16k/Rome16K.py | 4 +-- runners/scannet/ScanNet.py | 4 +-- runners/scannet/loader.py | 2 +- scripts/aachen_undistort.py | 1 - scripts/eval_hypersim.py | 3 +- scripts/eval_tnt.py | 3 +- 42 files changed, 102 insertions(+), 101 deletions(-) diff --git a/limap/line2d/GlueStick/extractor.py b/limap/line2d/GlueStick/extractor.py index 375cb6f4..112af1d6 100644 --- a/limap/line2d/GlueStick/extractor.py +++ b/limap/line2d/GlueStick/extractor.py @@ -13,7 +13,7 @@ class WireframeExtractor(BaseDetector): def __init__(self, options=BaseDetectorOptions(), device=None): - super(WireframeExtractor, self).__init__(options) + super().__init__(options) self.device = "cuda" if device is None else device self.sp = ( SuperPoint({"weight_path": self.weight_path}).eval().to(self.device) diff --git a/limap/line2d/GlueStick/matcher.py b/limap/line2d/GlueStick/matcher.py index c0ff5a62..0ba132d8 100644 --- a/limap/line2d/GlueStick/matcher.py +++ b/limap/line2d/GlueStick/matcher.py @@ -9,7 +9,7 @@ class GlueStickMatcher(BaseMatcher): def __init__(self, extractor, options=BaseMatcherOptions(), device=None): - super(GlueStickMatcher, self).__init__(extractor, options) + super().__init__(extractor, options) self.device = "cuda" if device is None else device self.gs = GlueStick({}).eval().to(self.device) if self.weight_path is None: diff --git a/limap/line2d/L2D2/extractor.py b/limap/line2d/L2D2/extractor.py index 78bceaa6..d0e3f0ca 100644 --- a/limap/line2d/L2D2/extractor.py +++ b/limap/line2d/L2D2/extractor.py @@ -11,7 +11,7 @@ class L2D2Extractor(BaseDetector): def __init__(self, options=BaseDetectorOptions(), device=None): - super(L2D2Extractor, self).__init__(options) + super().__init__(options) self.mini_batch = 20 self.device = "cuda" if device is None else device if self.weight_path is None: diff --git a/limap/line2d/L2D2/matcher.py b/limap/line2d/L2D2/matcher.py index a246f52e..ae304eb9 100644 --- a/limap/line2d/L2D2/matcher.py +++ b/limap/line2d/L2D2/matcher.py @@ -5,7 +5,7 @@ class L2D2Matcher(BaseMatcher): def __init__(self, extractor, options=BaseMatcherOptions()): - super(L2D2Matcher, self).__init__(extractor, options) + super().__init__(extractor, options) def get_module_name(self): return "l2d2" diff --git a/limap/line2d/LBD/extractor.py b/limap/line2d/LBD/extractor.py index ac044a96..46456f90 100644 --- a/limap/line2d/LBD/extractor.py +++ b/limap/line2d/LBD/extractor.py @@ -57,7 +57,7 @@ def to_multiscale_lines(lines): class LBDExtractor(BaseDetector): def __init__(self, options=BaseDetectorOptions()): - super(LBDExtractor, self).__init__(options) + super().__init__(options) def get_module_name(self): return "lbd" diff --git a/limap/line2d/LBD/matcher.py b/limap/line2d/LBD/matcher.py index 9671908e..ffdd5b81 100644 --- a/limap/line2d/LBD/matcher.py +++ b/limap/line2d/LBD/matcher.py @@ -6,7 +6,7 @@ class LBDMatcher(BaseMatcher): def __init__(self, extractor, options=BaseMatcherOptions()): - super(LBDMatcher, self).__init__(extractor, options) + super().__init__(extractor, options) def get_module_name(self): return "lbd" diff --git a/limap/line2d/LSD/lsd.py b/limap/line2d/LSD/lsd.py index 9cbfe509..286f08a7 100644 --- a/limap/line2d/LSD/lsd.py +++ b/limap/line2d/LSD/lsd.py @@ -5,7 +5,7 @@ class LSDDetector(BaseDetector): def __init__(self, options=BaseDetectorOptions()): - super(LSDDetector, self).__init__(options) + super().__init__(options) def get_module_name(self): return "lsd" diff --git a/limap/line2d/LineTR/extractor.py b/limap/line2d/LineTR/extractor.py index 60674796..f3f495c8 100644 --- a/limap/line2d/LineTR/extractor.py +++ b/limap/line2d/LineTR/extractor.py @@ -13,7 +13,7 @@ class LineTRExtractor(BaseDetector): def __init__(self, options=BaseDetectorOptions(), device=None): - super(LineTRExtractor, self).__init__(options) + super().__init__(options) self.device = "cuda" if device is None else device self.sp = SuperPoint({}).eval().to(self.device) self.linetr = ( diff --git a/limap/line2d/LineTR/linetr_pipeline.py b/limap/line2d/LineTR/linetr_pipeline.py index 2d7219cc..f7e4ebde 100755 --- a/limap/line2d/LineTR/linetr_pipeline.py +++ b/limap/line2d/LineTR/linetr_pipeline.py @@ -318,9 +318,9 @@ def process_siamese(data, i): lmatch_scores = torch.from_numpy( distance_matrix[(0,) + np.where(match_mat[0] > 0)] ) - pred["line_match_scores0"] = pred["line_match_scores1"] = ( - -lmatch_scores[None] - ) + pred["line_match_scores0"] = pred[ + "line_match_scores1" + ] = -lmatch_scores[None] return pred def loss(self, pred, data): diff --git a/limap/line2d/LineTR/matcher.py b/limap/line2d/LineTR/matcher.py index 016d0b98..a0a3e8c5 100644 --- a/limap/line2d/LineTR/matcher.py +++ b/limap/line2d/LineTR/matcher.py @@ -10,7 +10,7 @@ class LineTRMatcher(BaseMatcher): def __init__( self, extractor, options=BaseMatcherOptions(), topk=0, device=None ): - super(LineTRMatcher, self).__init__(extractor, options) + super().__init__(extractor, options) self.device = "cuda" if device is None else device self.linetr = ( LineTransformer({"weight_path": self.weight_path}) diff --git a/limap/line2d/SOLD2/model/loss.py b/limap/line2d/SOLD2/model/loss.py index 9bac8fa0..1564dd83 100644 --- a/limap/line2d/SOLD2/model/loss.py +++ b/limap/line2d/SOLD2/model/loss.py @@ -254,7 +254,7 @@ class JunctionDetectionLoss(nn.Module): """Junction detection loss.""" def __init__(self, grid_size, keep_border): - super(JunctionDetectionLoss, self).__init__() + super().__init__() self.grid_size = grid_size self.keep_border = keep_border @@ -268,7 +268,7 @@ class HeatmapLoss(nn.Module): """Heatmap prediction loss.""" def __init__(self, class_weight): - super(HeatmapLoss, self).__init__() + super().__init__() self.class_weight = class_weight def forward(self, prediction, target, valid_mask=None): @@ -279,7 +279,7 @@ class RegularizationLoss(nn.Module): """Module for regularization loss.""" def __init__(self): - super(RegularizationLoss, self).__init__() + super().__init__() self.name = "regularization_loss" self.loss_init = torch.zeros([]) @@ -367,7 +367,7 @@ class TripletDescriptorLoss(nn.Module): """Triplet descriptor loss.""" def __init__(self, grid_size, dist_threshold, margin): - super(TripletDescriptorLoss, self).__init__() + super().__init__() self.grid_size = grid_size self.init_dist_threshold = 64 self.dist_threshold = dist_threshold @@ -405,7 +405,7 @@ class TotalLoss(nn.Module): and regularization losses.""" def __init__(self, loss_funcs, loss_weights, weighting_policy): - super(TotalLoss, self).__init__() + super().__init__() # Whether we need to compute the descriptor loss self.compute_descriptors = "descriptor_loss" in loss_funcs.keys() diff --git a/limap/line2d/endpoints/extractor.py b/limap/line2d/endpoints/extractor.py index a39f390b..39d1b709 100644 --- a/limap/line2d/endpoints/extractor.py +++ b/limap/line2d/endpoints/extractor.py @@ -11,7 +11,7 @@ class SuperPointEndpointsExtractor(BaseDetector): def __init__(self, options=BaseDetectorOptions(), device=None): - super(SuperPointEndpointsExtractor, self).__init__(options) + super().__init__(options) self.device = "cuda" if device is None else device self.sp = ( SuperPoint({"weight_path": self.weight_path}).eval().to(self.device) diff --git a/limap/line2d/endpoints/matcher.py b/limap/line2d/endpoints/matcher.py index 6fb3b903..ab041168 100644 --- a/limap/line2d/endpoints/matcher.py +++ b/limap/line2d/endpoints/matcher.py @@ -8,7 +8,7 @@ class NNEndpointsMatcher(BaseMatcher): def __init__(self, extractor, options=BaseMatcherOptions(), device=None): - super(NNEndpointsMatcher, self).__init__(extractor, options) + super().__init__(extractor, options) assert self.extractor.get_module_name() == "superpoint_endpoints" self.device = "cuda" if device is None else device self.sg = ( @@ -116,7 +116,7 @@ def __init__( weights="outdoor", device=None, ): - super(SuperGlueEndpointsMatcher, self).__init__(extractor, options) + super().__init__(extractor, options) assert self.extractor.get_module_name() == "superpoint_endpoints" self.device = "cuda" if device is None else device self.sg = SuperGlue({"weights": weights}).eval().to(self.device) diff --git a/limap/runners/line_localization.py b/limap/runners/line_localization.py index 2f77f731..d74ef56a 100644 --- a/limap/runners/line_localization.py +++ b/limap/runners/line_localization.py @@ -10,7 +10,13 @@ import limap.line2d import limap.runners as _runners import limap.util.io as limapio -from limap.optimize.line_localization.functions import * +from limap.optimize.line_localization.functions import ( + filter_line_2to2_epipolarIoU, + get_reprojection_dist_func, + match_line_2to2_epipolarIoU, + match_line_2to3, + reprojection_filter_matches_2to3, +) def get_hloc_keypoints( @@ -37,10 +43,9 @@ def get_hloc_keypoints( for i, tgt_id in enumerate(target_img_ids): image = ref_sfm.images[tgt_id] - if image.num_points3D() == 0: - if logger: - logger.debug(f"No 3D points found for {image.name}.") - continue + if image.num_points3D() == 0 and logger: + logger.debug(f"No 3D points found for {image.name}.") + continue points3D_ids = np.array( [p.point3D_id if p.has_point3D() else -1 for p in image.points2D] ) diff --git a/limap/runners/line_triangulation.py b/limap/runners/line_triangulation.py index 4be4d386..2e766a46 100644 --- a/limap/runners/line_triangulation.py +++ b/limap/runners/line_triangulation.py @@ -39,7 +39,7 @@ def line_triangulation(cfg, imagecols, neighbors=None, ranges=None): n_jobs=cfg["n_jobs"], ) # resize cameras - assert imagecols.IsUndistorted() == True + assert imagecols.IsUndistorted() if cfg["max_image_dim"] != -1 and cfg["max_image_dim"] is not None: imagecols.set_max_image_dim(cfg["max_image_dim"]) limapio.save_txt_imname_dict( diff --git a/limap/util/config.py b/limap/util/config.py index 7b8da04c..321924b8 100644 --- a/limap/util/config.py +++ b/limap/util/config.py @@ -54,7 +54,7 @@ def get_val_from_keys(cfg, keys): keys = arg.replace("--", "").split(".") val = get_val_from_keys(cfg, keys) argtype = type(val) - if argtype == bool: + if argtype is bool: # test if it is a store action if idx == len(unknown) - 1 or unknown[idx + 1].startswith("--"): v = True @@ -63,7 +63,7 @@ def get_val_from_keys(cfg, keys): else: v = unknown[idx + 1] if val is not None: - if argtype == list: + if argtype is list: if v.startswith("["): v = eval(v) else: diff --git a/limap/util/io.py b/limap/util/io.py index 7d6816a3..bbede8b7 100644 --- a/limap/util/io.py +++ b/limap/util/io.py @@ -179,8 +179,8 @@ def read_txt_imname_dict(fname): def save_obj(fname, lines): # save obj for CloudCompare visualization - if type(lines) == list: - if type(lines[0]) == np.ndarray: + if isinstance(lines, list): + if isinstance(lines[0], np.ndarray): lines = np.array(lines) else: lines = np.array([line.as_array() for line in lines]) @@ -220,7 +220,6 @@ def save_l3dpp(folder, imagecols, all_2d_segs): if os.path.exists(folder): shutil.rmtree(folder) os.makedirs(folder) - n_images = len(all_2d_segs) assert imagecols.NumImages() == len(all_2d_segs) image_names = imagecols.get_image_name_list() @@ -423,7 +422,7 @@ def read_lines_from_input(input_file): # exception raise ValueError( - f"Error! File {input_dir} not supported. should be txt, obj, or folder to the linetracks." + f"Error! File {input_file} not supported. should be txt, obj, or folder to the linetracks." ) diff --git a/limap/visualize/trackvis/base.py b/limap/visualize/trackvis/base.py index e22a38df..97902207 100644 --- a/limap/visualize/trackvis/base.py +++ b/limap/visualize/trackvis/base.py @@ -1,4 +1,6 @@ -from ..vis_utils import * +import numpy as np + +from ..vis_utils import test_line_inside_ranges class BaseTrackVisualizer: diff --git a/limap/visualize/trackvis/open3d.py b/limap/visualize/trackvis/open3d.py index a81372d3..22fd7419 100644 --- a/limap/visualize/trackvis/open3d.py +++ b/limap/visualize/trackvis/open3d.py @@ -7,14 +7,14 @@ class Open3DTrackVisualizer(BaseTrackVisualizer): def __init__(self, tracks): - super(Open3DTrackVisualizer, self).__init__(tracks) + super().__init__(tracks) def reset(self): app = o3d.visualization.gui.Application.instance app.initialize() return app - def vis_all_lines(self, n_visible_views=4, width=2, scale=1.0): + def vis_all_lines(self, n_visible_views=4, width=2, ranges=None, scale=1.0): lines = self.get_lines_n_visible_views(n_visible_views) vis = o3d.visualization.Visualizer() vis.create_window(height=1080, width=1920) diff --git a/limap/visualize/trackvis/pyvista.py b/limap/visualize/trackvis/pyvista.py index 873b4340..e43d8eb3 100644 --- a/limap/visualize/trackvis/pyvista.py +++ b/limap/visualize/trackvis/pyvista.py @@ -3,7 +3,7 @@ class PyVistaTrackVisualizer(BaseTrackVisualizer): def __init__(self, tracks): - super(PyVistaTrackVisualizer, self).__init__(tracks) + super().__init__(tracks) self.reset() def reset(self, img_hw=(600, 800)): @@ -21,6 +21,7 @@ def reset(self, img_hw=(600, 800)): def vis_all_lines(self, n_visible_views=4, width=2, scale=1.0): lines = self.get_lines_n_visible_views(n_visible_views) + color = "#ff0000" for line in lines: self.plotter.add_lines(line.as_array() * scale, color, width=width) self.plotter.show() diff --git a/limap/visualize/vis_bipartite.py b/limap/visualize/vis_bipartite.py index d0246275..bdb181df 100644 --- a/limap/visualize/vis_bipartite.py +++ b/limap/visualize/vis_bipartite.py @@ -54,12 +54,11 @@ def open3d_draw_bipartite3d_pointline( for idx, ptrack in bpt3d.get_dict_points().items(): p = ptrack.p deg = bpt3d.pdegree(idx) - if ranges is not None: - if not test_point_inside_ranges(p, ranges): - continue + if (ranges is not None) and (not test_point_inside_ranges(p, ranges)): + continue points.append(p) degrees.append(deg) - points_deg0 = [p for p, deg in zip(points, degrees) if deg == 0] + # points_deg0 = [p for p, deg in zip(points, degrees) if deg == 0] points_deg1 = [p for p, deg in zip(points, degrees) if deg == 1] points_deg2 = [p for p, deg in zip(points, degrees) if deg == 2] points_deg3p = [p for p, deg in zip(points, degrees) if deg >= 3] @@ -75,9 +74,10 @@ def open3d_draw_bipartite3d_pointline( if bpt3d.pdegree(p_id) == 0: continue p = ptrack.p - if ranges is not None: - if not test_point_inside_ranges(p, ranges): - continue + if (ranges is not None) and ( + not test_point_inside_ranges(p, ranges) + ): + continue for line_id in bpt3d.neighbor_lines(p_id): line = bpt3d.line(line_id).line p_proj = line.point_projection(p) @@ -142,7 +142,7 @@ def open3d_draw_bipartite3d_pointline( if planes is not None: for plane_id, plane in enumerate(planes): - plane_c = (0.5, 0.4, 0.6) + # plane_c = (0.5, 0.4, 0.6) mesh = o3d.geometry.TriangleMesh() np_vertices = np.array(plane) np_triangles = np.array([[0, 1, 2], [0, 2, 3]]).astype(np.int32) @@ -174,9 +174,10 @@ def open3d_draw_bipartite3d_vpline(bpt3d, ranges=None): vp_line_sets = {vp_id: [] for vp_id in vp_ids} nonvp_line_set = [] for line_id, ltrack in bpt3d.get_dict_lines().items(): - if ranges is not None: - if not test_line_inside_ranges(ltrack.line, ranges): - continue + if (ranges is not None) and ( + not test_line_inside_ranges(ltrack.line, ranges) + ): + continue labels = bpt3d.neighbor_points(line_id) if len(labels) == 0: nonvp_line_set.append(ltrack.line) diff --git a/limap/visualize/vis_lines.py b/limap/visualize/vis_lines.py index bf93d9e8..2919cb7c 100644 --- a/limap/visualize/vis_lines.py +++ b/limap/visualize/vis_lines.py @@ -17,9 +17,8 @@ def pyvista_vis_3d_lines( plotter = pv.Plotter(window_size=[img_hw[1], img_hw[0]]) for line in lines: - if ranges is not None: - if not test_line_inside_ranges(line, ranges): - continue + if (ranges is not None) and (not test_line_inside_ranges(line, ranges)): + continue plotter.add_lines(line.as_array() * scale, "#ff0000", width=width) plotter.show() @@ -39,9 +38,10 @@ def open3d_add_points( o3d_points, o3d_colors = [], [] for idx in range(np.array(points).shape[0]): - if ranges is not None: - if not test_point_inside_ranges(points[idx], ranges): - continue + if (ranges is not None) and ( + not test_point_inside_ranges(points[idx], ranges) + ): + continue o3d_points.append(points[idx] * scale) o3d_colors.append(color) pcd = o3d.geometry.PointCloud() @@ -62,9 +62,8 @@ def open3d_get_line_set( o3d_points, o3d_lines, o3d_colors = [], [], [] counter = 0 for line in lines: - if ranges is not None: - if not test_line_inside_ranges(line, ranges): - continue + if (ranges is not None) and (not test_line_inside_ranges(line, ranges)): + continue o3d_points.append(line.start * scale) o3d_points.append(line.end * scale) o3d_lines.append([2 * counter, 2 * counter + 1]) @@ -91,9 +90,8 @@ def open3d_add_line_set( o3d_points, o3d_lines, o3d_colors = [], [], [] counter = 0 for line in lines: - if ranges is not None: - if not test_line_inside_ranges(line, ranges): - continue + if (ranges is not None) and (not test_line_inside_ranges(line, ranges)): + continue o3d_points.append(line.start * scale) o3d_points.append(line.end * scale) o3d_lines.append([2 * counter, 2 * counter + 1]) @@ -135,9 +133,10 @@ def open3d_get_cameras( ) for img_id in imagecols.get_img_ids(): camimage = imagecols.camimage(img_id) - if ranges is not None: - if not test_point_inside_ranges(camimage.pose.center(), ranges): - continue + if (ranges is not None) and ( + not test_point_inside_ranges(camimage.pose.center(), ranges) + ): + continue T = np.eye(4) T[:3, :3] = camimage.R() T[:3, 3] = camimage.T() * scale @@ -172,9 +171,10 @@ def open3d_add_cameras( ) for img_id in imagecols.get_img_ids(): camimage = imagecols.camimage(img_id) - if ranges is not None: - if not test_point_inside_ranges(camimage.pose.center(), ranges): - continue + if (ranges is not None) and ( + not test_point_inside_ranges(camimage.pose.center(), ranges) + ): + continue T = np.eye(4) T[:3, :3] = camimage.R() T[:3, 3] = camimage.T() * scale diff --git a/limap/visualize/vis_utils.py b/limap/visualize/vis_utils.py index 6ba20d61..13e2927a 100644 --- a/limap/visualize/vis_utils.py +++ b/limap/visualize/vis_utils.py @@ -300,17 +300,13 @@ def make_bigimage(imgs, pad=20): def test_point_inside_ranges(point, ranges): point = np.array(point) - if ~np.all(point > ranges[0]) or ~np.all(point < ranges[1]): - return False - return True + return np.all(point > ranges[0]) and np.all(point < ranges[1]) def test_line_inside_ranges(line, ranges): if not test_point_inside_ranges(line.start, ranges): return False - if not test_point_inside_ranges(line.end, ranges): - return False - return True + return test_point_inside_ranges(line.end, ranges) def compute_robust_range(arr, range_robust=[0.05, 0.95], k_stretch=2.0): diff --git a/limap/vplib/JLinkage/JLinkage.py b/limap/vplib/JLinkage/JLinkage.py index d512d838..acdc362a 100644 --- a/limap/vplib/JLinkage/JLinkage.py +++ b/limap/vplib/JLinkage/JLinkage.py @@ -5,7 +5,7 @@ class JLinkage(BaseVPDetector): def __init__(self, cfg_jlinkage, options=BaseVPDetectorOptions()): - super(JLinkage, self).__init__(options) + super().__init__(options) self.detector = _vplib.JLinkage(cfg_jlinkage) def get_module_name(self): diff --git a/limap/vplib/progressivex/progressivex.py b/limap/vplib/progressivex/progressivex.py index 2573e09b..f7a23298 100644 --- a/limap/vplib/progressivex/progressivex.py +++ b/limap/vplib/progressivex/progressivex.py @@ -15,7 +15,7 @@ class ProgressiveX(BaseVPDetector): def __init__(self, cfg, options=BaseVPDetectorOptions()): - super(ProgressiveX, self).__init__(options) + super().__init__(options) self.options = ProgressiveXOptions() for fld in self.options._fields: if fld in cfg: @@ -30,10 +30,7 @@ def detect_vp(self, lines, camview=None): # Initialize labels = (np.ones(len(lines)) * -1).astype(int) - flags = [ - True if line.length() >= self.options.min_length else False - for line in lines - ] + flags = [line.length() >= self.options.min_length for line in lines] # Progressive-X inference lines = [ diff --git a/ruff.toml b/ruff.toml index 3117e41b..fdd86d74 100644 --- a/ruff.toml +++ b/ruff.toml @@ -15,7 +15,7 @@ select = [ # isort "I", ] -ignore = ["SIM117", "F401"] +ignore = ["SIM117", "E501", "F401", "F403", "UP030", "B007", "E402", "E741", "B006", "B008"] [lint.per-file-ignores] "scripts/*.py" = ["E"] diff --git a/runners/7scenes/localization.py b/runners/7scenes/localization.py index 83122281..465b1cf0 100644 --- a/runners/7scenes/localization.py +++ b/runners/7scenes/localization.py @@ -249,7 +249,8 @@ def main(): "inliers": inliers, } - final_poses = _runners.line_localization( + # can return final_poses + _runners.line_localization( cfg, imagecols_train, imagecols_query, diff --git a/runners/7scenes/utils.py b/runners/7scenes/utils.py index f048e1e4..df0e6dbb 100644 --- a/runners/7scenes/utils.py +++ b/runners/7scenes/utils.py @@ -188,7 +188,7 @@ def correct_sfm_with_gt_depth(sfm_path, depth_folder_path, output_path): class DepthReader(_base.BaseDepthReader): def __init__(self, filename, depth_folder): - super(DepthReader, self).__init__(filename) + super().__init__(filename) self.depth_folder = depth_folder def read(self, filename): @@ -237,7 +237,7 @@ def read_scene_7scenes(cfg, root_path, model_path, image_path, n_neighbors=20): def get_result_filenames(cfg, use_dense_depth=False): ransac_cfg = cfg["ransac"] ransac_postfix = "" - if ransac_cfg["method"] != None: + if ransac_cfg["method"] is not None: if ransac_cfg["method"] in ["ransac", "hybrid"]: ransac_postfix = "_{}".format(ransac_cfg["method"]) elif ransac_cfg["method"] == "solver": @@ -396,7 +396,6 @@ def run_hloc_7scenes( # Read coarse poses poses = {} with open(results_file) as f: - lines = [] for data in f.read().rstrip().split("\n"): data = data.split() name = data[0] diff --git a/runners/bundler_triangulation.py b/runners/bundler_triangulation.py index b980d708..37a4c1c1 100644 --- a/runners/bundler_triangulation.py +++ b/runners/bundler_triangulation.py @@ -118,7 +118,7 @@ def parse_config(): cfg["list_path"] = args.list_path cfg["model_path"] = args.model_path cfg["info_path"] = args.info_path - if ("max_image_dim" not in cfg.keys()) or args.max_image_dim is not None: + if ("max_image_dim" not in cfg) or args.max_image_dim is not None: cfg["max_image_dim"] = args.max_image_dim return cfg diff --git a/runners/cambridge/localization.py b/runners/cambridge/localization.py index 1f521c1e..633e0786 100644 --- a/runners/cambridge/localization.py +++ b/runners/cambridge/localization.py @@ -107,6 +107,7 @@ def parse_config(): cfg["n_neighbors_loc"] = args.num_loc # Output path for LIMAP results (tmp) if cfg["output_dir"] is None: + scene_id = os.path.basename(cfg["vsfm_path"]) cfg["output_dir"] = f"tmp/cambridge/{scene_id}" # Output folder for LIMAP linetracks (in tmp) if cfg["output_folder"] is None: @@ -231,7 +232,8 @@ def main(): "inliers": inliers, } - final_poses = _runners.line_localization( + # can return final_poses + _runners.line_localization( cfg, imagecols_train, imagecols_query, diff --git a/runners/cambridge/utils.py b/runners/cambridge/utils.py index c5f01b16..d2d10dea 100644 --- a/runners/cambridge/utils.py +++ b/runners/cambridge/utils.py @@ -153,7 +153,7 @@ def create_query_list(imagecols, out): def get_result_filenames(cfg, args): ransac_cfg = cfg["ransac"] ransac_postfix = "" - if ransac_cfg["method"] != None: + if ransac_cfg["method"] is not None: if ransac_cfg["method"] in ["ransac", "hybrid"]: ransac_postfix = "_{}".format(ransac_cfg["method"]) elif ransac_cfg["method"] == "solver": diff --git a/runners/colmap_triangulation.py b/runners/colmap_triangulation.py index 9b3aba1b..1c623478 100644 --- a/runners/colmap_triangulation.py +++ b/runners/colmap_triangulation.py @@ -125,7 +125,7 @@ def parse_config(): cfg["info_path"] = args.info_path if cfg["colmap_path"] is None and cfg["info_path"] is None: raise ValueError("Error! colmap_path unspecified.") - if ("max_image_dim" not in cfg.keys()) or args.max_image_dim is not None: + if ("max_image_dim" not in cfg) or args.max_image_dim is not None: cfg["max_image_dim"] = args.max_image_dim return cfg diff --git a/runners/eth3d/loader.py b/runners/eth3d/loader.py index 5f1bc04e..bdb55cf7 100644 --- a/runners/eth3d/loader.py +++ b/runners/eth3d/loader.py @@ -13,7 +13,7 @@ class ETH3DDepthReader(_base.BaseDepthReader): def __init__(self, filename): - super(ETH3DDepthReader, self).__init__(filename) + super().__init__(filename) def read(self, filename): ref_depth = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) diff --git a/runners/hypersim/Hypersim.py b/runners/hypersim/Hypersim.py index 7f2a3c72..eaac49d7 100644 --- a/runners/hypersim/Hypersim.py +++ b/runners/hypersim/Hypersim.py @@ -252,7 +252,7 @@ def set_camera_properties(cls, cam, Rt): def get_camera_frustrum(cls, Rt): h, w = cls.h, cls.w view_camera = pv.Camera() - set_camera_properties(view_camera, Rt) + Hypersim.set_camera_properties(view_camera, Rt) view_camera.clipping_range = (1e-5, 0.5) frustum = view_camera.view_frustum(w / h) return frustum diff --git a/runners/hypersim/loader.py b/runners/hypersim/loader.py index a1efac4c..9cb3b400 100644 --- a/runners/hypersim/loader.py +++ b/runners/hypersim/loader.py @@ -14,7 +14,7 @@ class HypersimDepthReader(_base.BaseDepthReader): def __init__(self, filename, K, img_hw): - super(HypersimDepthReader, self).__init__(filename) + super().__init__(filename) self.K = K self.img_hw = img_hw diff --git a/runners/inloc/utils.py b/runners/inloc/utils.py index 223b8d62..59a87303 100644 --- a/runners/inloc/utils.py +++ b/runners/inloc/utils.py @@ -14,7 +14,7 @@ class InLocP3DReader(_base.BaseP3DReader): def __init__(self, filename): - super(InLocP3DReader, self).__init__(filename) + super().__init__(filename) def read(self, filename): scan = loadmat(str(filename) + ".mat")["XYZcut"] @@ -117,7 +117,7 @@ def read_dataset_inloc( def get_result_filenames(cfg, use_temporal=True): ransac_cfg = cfg["ransac"] ransac_postfix = "" - if ransac_cfg["method"] != None: + if ransac_cfg["method"] is not None: if ransac_cfg["method"] in ["ransac", "hybrid"]: ransac_postfix = "_{}".format(ransac_cfg["method"]) elif ransac_cfg["method"] == "solver": @@ -196,7 +196,6 @@ def run_hloc_inloc( # Read coarse poses and inliers poses = {} with open(results_file) as f: - lines = [] for data in f.read().rstrip().split("\n"): data = data.split() name = data[0] diff --git a/runners/rome16k/Rome16K.py b/runners/rome16k/Rome16K.py index 528b14ab..8a625a33 100644 --- a/runners/rome16k/Rome16K.py +++ b/runners/rome16k/Rome16K.py @@ -64,13 +64,13 @@ def count_components(self): return len(self.component_names) def count_images_in_component(self, c_id): - if type(c_id) == str: + if isinstance(c_id, str): return len(self.components[c_id]) else: return self.count_images_in_component(self.component_names[c_id]) def get_images_in_component(self, c_id): - if type(c_id) == str: + if isinstance(c_id, str): images = self.components[c_id] images = [self.get_fullname(imname) for imname in images] imname_list = [] diff --git a/runners/scannet/ScanNet.py b/runners/scannet/ScanNet.py index 3de53f26..d39ab413 100644 --- a/runners/scannet/ScanNet.py +++ b/runners/scannet/ScanNet.py @@ -83,8 +83,8 @@ def read_pose(self, pose_txt): def loadinfos(self): img_folder = os.path.join(self.scene_dir, "color") - pose_folder = os.path.join(self.scene_dir, "pose") - depth_folder = os.path.join(self.scene_dir, "depth") + # pose_folder = os.path.join(self.scene_dir, "pose") + # depth_folder = os.path.join(self.scene_dir, "depth") n_images = len(os.listdir(img_folder)) index_list = np.arange(0, n_images, self.stride).tolist() diff --git a/runners/scannet/loader.py b/runners/scannet/loader.py index a51213e4..deafb89f 100644 --- a/runners/scannet/loader.py +++ b/runners/scannet/loader.py @@ -12,7 +12,7 @@ class ScanNetDepthReader(_base.BaseDepthReader): def __init__(self, filename): - super(ScanNetDepthReader, self).__init__(filename) + super().__init__(filename) def read(self, filename): ref_depth = cv2.imread(filename, cv2.IMREAD_UNCHANGED) diff --git a/scripts/aachen_undistort.py b/scripts/aachen_undistort.py index 775bb8ab..7746af57 100644 --- a/scripts/aachen_undistort.py +++ b/scripts/aachen_undistort.py @@ -29,7 +29,6 @@ def load_list_file(fname): imname = k[0] # Aachen only uses simple radial model assert k[1] == "SIMPLE_RADIAL" - h, w = int(k[2]), int(k[3]) f = float(k[4]) cx, cy = float(k[5]), float(k[6]) k1 = float(k[7]) diff --git a/scripts/eval_hypersim.py b/scripts/eval_hypersim.py index 43bb53b2..e505b830 100644 --- a/scripts/eval_hypersim.py +++ b/scripts/eval_hypersim.py @@ -48,7 +48,6 @@ def report_error_to_GT(evaluator, lines, vis_err_th=None): if vis_err_th is not None: visualize_error_to_GT(evaluator, lines, vis_err_th) lengths = np.array([line.length() for line in lines]) - sum_length = lengths.sum() thresholds = [0.001, 0.005, 0.01] list_recall, list_precision = [], [] for threshold in thresholds: @@ -154,7 +153,7 @@ def eval_hypersim( outlier_lines_np = np.array( [line.as_array() for line in outlier_lines] ) - limap.save_obj( + limapio.save_obj( f"tmp/outliers_th_{threshold:.4f}.obj", outlier_lines_np ) diff --git a/scripts/eval_tnt.py b/scripts/eval_tnt.py index e32aed9f..00d7cd31 100644 --- a/scripts/eval_tnt.py +++ b/scripts/eval_tnt.py @@ -20,7 +20,6 @@ def plot_curve(fname, thresholds, data): def report_error_to_GT(evaluator, lines): lengths = np.array([line.length() for line in lines]) - sum_length = lengths.sum() thresholds = np.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0]) list_recall, list_precision = [], [] for threshold in thresholds: @@ -82,6 +81,8 @@ def write_ply(fname, points): def report_error_to_mesh(mesh_fname, lines): + # Hypersim + MPAU = 0.02539999969303608 evaluator = _eval.MeshEvaluator(mesh_fname, MPAU) return report_error_to_GT(evaluator, lines) From 9bae926183f6f197e7d0b75889f801eb1370d077 Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 16:51:06 +0200 Subject: [PATCH 9/9] fix formatting. --- limap/base/align.py | 12 +--- limap/features/extractors.py | 8 +-- limap/features/models/vggnet.py | 4 ++ limap/fitting/fitting.py | 2 +- limap/line2d/DeepLSD/deeplsd.py | 2 +- limap/line2d/HAWPv3/hawp.py | 2 +- limap/line2d/L2D2/RAL_net_cov.py | 4 +- limap/line2d/LineTR/linetr_pipeline.py | 8 +-- limap/line2d/LineTR/nn_matcher.py | 1 - limap/line2d/SOLD2/model/loss.py | 2 +- limap/line2d/SOLD2/model/lr_scheduler.py | 4 +- limap/line2d/SOLD2/model/metrics.py | 8 +-- limap/line2d/SOLD2/model/model_util.py | 2 +- limap/line2d/SOLD2/model/nets/backbone.py | 4 +- .../SOLD2/model/nets/descriptor_decoder.py | 2 +- .../SOLD2/model/nets/heatmap_decoder.py | 2 +- .../SOLD2/model/nets/junction_decoder.py | 2 +- .../line2d/SOLD2/model/nets/lcnn_hourglass.py | 8 +-- limap/line2d/SOLD2/sold2.py | 4 +- limap/line2d/SOLD2/train.py | 16 ++--- limap/line2d/TP_LSD/tp_lsd.py | 2 +- limap/line2d/base_detector.py | 2 +- limap/optimize/functions.py | 71 +------------------ .../hybrid_bundle_adjustment/solve.py | 5 +- limap/optimize/line_localization/functions.py | 3 +- .../line_refinement/line_refinement.py | 19 ----- limap/point2d/superpoint/main.py | 17 ----- limap/pointsfm/bundler_reader.py | 2 +- limap/pointsfm/colmap_reader.py | 16 +++-- limap/pointsfm/database.py | 18 ++--- limap/pointsfm/visualsfm_reader.py | 2 +- limap/runners/functions.py | 4 +- limap/runners/line_fitnmerge.py | 5 +- limap/runners/line_localization.py | 2 +- ruff.toml | 12 +++- scripts/convert_model.py | 2 +- 36 files changed, 89 insertions(+), 190 deletions(-) diff --git a/limap/base/align.py b/limap/base/align.py index ccfe747d..072fabaf 100644 --- a/limap/base/align.py +++ b/limap/base/align.py @@ -14,7 +14,7 @@ def umeyama_alignment(x, y, with_scale=True): :return: r, t, c - rotation matrix, translation vector and scale factor """ if x.shape != y.shape: - assert False, "x.shape not equal to y.shape" + raise AssertionError("x.shape not equal to y.shape") # m = dimension, n = nr. of data points m, n = x.shape @@ -54,10 +54,7 @@ def umeyama_alignment(x, y, with_scale=True): def align_imagecols_umeyama(imagecols_src, imagecols_dst): # assertion check assert imagecols_src.NumImages() == imagecols_dst.NumImages() - assert ( - np.all(imagecols_src.get_img_ids() == imagecols_dst.get_img_ids()) - == True - ) + assert np.all(imagecols_src.get_img_ids() == imagecols_dst.get_img_ids()) # fit transformation xyz_src = np.array(imagecols_src.get_locations()).transpose() @@ -84,10 +81,7 @@ def align_imagecols_colmap( # assertion check assert imagecols_src.NumImages() == imagecols_dst.NumImages() - assert ( - np.all(imagecols_src.get_img_ids() == imagecols_dst.get_img_ids()) - == True - ) + assert np.all(imagecols_src.get_img_ids() == imagecols_dst.get_img_ids()) limapio.check_makedirs(tmp_folder) src_folder = os.path.join(tmp_folder, "source") diff --git a/limap/features/extractors.py b/limap/features/extractors.py index 2323a0aa..05e94468 100644 --- a/limap/features/extractors.py +++ b/limap/features/extractors.py @@ -10,7 +10,7 @@ from _limap import _features from torchvision import transforms -from .models.s2dnet import * +from .models.s2dnet import S2DNet from .models.vggnet import VGGNet RGB_mean = [0.485, 0.456, 0.406] @@ -44,7 +44,7 @@ class Extractor(torch.nn.Module): """ def __init__(self, device: str): - super(Extractor, self).__init__() + super().__init__() self.device = device self.num_levels = 0 self.model = None @@ -87,7 +87,7 @@ def __init__(self, device: str, *args, **kwargs): self.num_levels = 1 self.channels = [128] # ,128] #[128,128] self.l2_normalize = True - if "output_channels" in kwargs.keys(): + if "output_channels" in kwargs: self.channels = [min(kwargs["output_channels"], self.channels[0])] def extract_featuremaps(self, image_batch: torch.Tensor) -> list: @@ -148,7 +148,7 @@ def __init__(self, device: str, *args, **kwargs): self.num_levels = 1 # 2 self.channels = [64] # [128,128] self.l2_normalize = True - if "output_channels" in kwargs.keys(): + if "output_channels" in kwargs: self.channels = [min(kwargs["output_channels"], self.channels[0])] def extract_featuremaps(self, image_batch: torch.Tensor) -> list: diff --git a/limap/features/models/vggnet.py b/limap/features/models/vggnet.py index f4ab7134..fee891cc 100644 --- a/limap/features/models/vggnet.py +++ b/limap/features/models/vggnet.py @@ -1,3 +1,7 @@ +import torch +import torch.nn as nn +import torchvision.models as models + from limap.features.models.s2dnet import * diff --git a/limap/fitting/fitting.py b/limap/fitting/fitting.py index 95c38944..9dbb13d5 100644 --- a/limap/fitting/fitting.py +++ b/limap/fitting/fitting.py @@ -63,7 +63,7 @@ def estimate_seg3d_from_points3d( var2d=5.0, ): h, w = camview.h(), camview.w() - K, R, T = camview.K(), camview.R(), camview.T() + R, T = camview.R(), camview.T() # get points and depths seg1_pts = np.linspace( diff --git a/limap/line2d/DeepLSD/deeplsd.py b/limap/line2d/DeepLSD/deeplsd.py index 7836e3d3..fffa5857 100644 --- a/limap/line2d/DeepLSD/deeplsd.py +++ b/limap/line2d/DeepLSD/deeplsd.py @@ -9,7 +9,7 @@ class DeepLSDDetector(BaseDetector): def __init__(self, options=BaseDetectorOptions()): - super(DeepLSDDetector, self).__init__(options) + super().__init__(options) conf = { "detect_lines": True, diff --git a/limap/line2d/HAWPv3/hawp.py b/limap/line2d/HAWPv3/hawp.py index 2e73aed9..c27dc421 100644 --- a/limap/line2d/HAWPv3/hawp.py +++ b/limap/line2d/HAWPv3/hawp.py @@ -11,7 +11,7 @@ class HAWPv3Detector(BaseDetector): def __init__(self, options=BaseDetectorOptions()): - super(HAWPv3Detector, self).__init__(options) + super().__init__(options) # Load the HAWPv3 model if self.weight_path is None: ckpt = os.path.join( diff --git a/limap/line2d/L2D2/RAL_net_cov.py b/limap/line2d/L2D2/RAL_net_cov.py index a4d8fa9e..cf215372 100644 --- a/limap/line2d/L2D2/RAL_net_cov.py +++ b/limap/line2d/L2D2/RAL_net_cov.py @@ -5,7 +5,7 @@ class L2Norm(nn.Module): def __init__(self): - super(L2Norm, self).__init__() + super().__init__() self.eps = 1e-10 def forward(self, x): @@ -16,7 +16,7 @@ def forward(self, x): class L2Net(nn.Module): def __init__(self): - super(L2Net, self).__init__() + super().__init__() self.features = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(32, affine=False), diff --git a/limap/line2d/LineTR/linetr_pipeline.py b/limap/line2d/LineTR/linetr_pipeline.py index f7e4ebde..3d5e8864 100755 --- a/limap/line2d/LineTR/linetr_pipeline.py +++ b/limap/line2d/LineTR/linetr_pipeline.py @@ -133,9 +133,7 @@ def process_siamese(data, i): klines_cv = self.detect_lsd_lines(data["image0"])[0] - valid_lines0 = ( - data["valid_lines0"] if "valid_lines0" in data.keys() else None - ) + valid_lines0 = data.get("valid_lines0", None) klines0 = self.linetransformer.preprocess( klines_cv, image_shape, pred0, valid_lines0 ) @@ -153,9 +151,7 @@ def process_siamese(data, i): ) klines_cv = self.detect_lsd_lines(data["image1"])[0] - valid_lines1 = ( - data["valid_lines1"] if "valid_lines1" in data.keys() else None - ) + valid_lines1 = data.get("valid_lines1", None) klines1 = self.linetransformer.preprocess( klines_cv, image_shape, pred1, valid_lines1 ) diff --git a/limap/line2d/LineTR/nn_matcher.py b/limap/line2d/LineTR/nn_matcher.py index 9ac6be27..837644e4 100755 --- a/limap/line2d/LineTR/nn_matcher.py +++ b/limap/line2d/LineTR/nn_matcher.py @@ -35,7 +35,6 @@ def nn_matcher_distmat(dist_mat, nn_thresh, is_mutual_NN=True): def nn_matcher(desc0, desc1, nn_thresh=0.8, is_mutual_NN=True): """Nearest Neighbor Matching using two descriptors""" d, num0 = desc0.shape - num1 = desc1.shape[1] desc0_, desc1_ = desc0.T, desc1.T dmat = desc0_ @ desc1_.T diff --git a/limap/line2d/SOLD2/model/loss.py b/limap/line2d/SOLD2/model/loss.py index 1564dd83..e1857b9d 100644 --- a/limap/line2d/SOLD2/model/loss.py +++ b/limap/line2d/SOLD2/model/loss.py @@ -407,7 +407,7 @@ class TotalLoss(nn.Module): def __init__(self, loss_funcs, loss_weights, weighting_policy): super().__init__() # Whether we need to compute the descriptor loss - self.compute_descriptors = "descriptor_loss" in loss_funcs.keys() + self.compute_descriptors = "descriptor_loss" in loss_funcs self.loss_funcs = loss_funcs self.loss_weights = loss_weights diff --git a/limap/line2d/SOLD2/model/lr_scheduler.py b/limap/line2d/SOLD2/model/lr_scheduler.py index 8f694841..3393cf59 100644 --- a/limap/line2d/SOLD2/model/lr_scheduler.py +++ b/limap/line2d/SOLD2/model/lr_scheduler.py @@ -8,10 +8,10 @@ def get_lr_scheduler(lr_decay, lr_decay_cfg, optimizer): """Get the learning rate scheduler according to the config.""" # If no lr_decay is specified => return None - if (lr_decay == False) or (lr_decay_cfg is None): + if (not lr_decay) or (lr_decay_cfg is None): schduler = None # Exponential decay - elif (lr_decay == True) and (lr_decay_cfg["policy"] == "exp"): + elif lr_decay and (lr_decay_cfg["policy"] == "exp"): schduler = torch.optim.lr_scheduler.ExponentialLR( optimizer, gamma=lr_decay_cfg["gamma"] ) diff --git a/limap/line2d/SOLD2/model/metrics.py b/limap/line2d/SOLD2/model/metrics.py index aaa3c9f8..8b697289 100644 --- a/limap/line2d/SOLD2/model/metrics.py +++ b/limap/line2d/SOLD2/model/metrics.py @@ -74,7 +74,7 @@ def __init__( # Initialize the results self.metric_results = {} - for key in self.metric_table.keys(): + for key in self.metric_table: self.metric_results[key] = 0.0 def evaluate( @@ -237,13 +237,13 @@ def update(self, metrics, loss_dict=None, num_samples=1): ) # Update all the losses - for loss in loss_dict.keys(): + for loss in loss_dict: self.metric_results[loss] += num_samples * loss_dict[loss] # Update all pr counts for pr_met in self.supported_pr_metrics: # Update all tp, tn, fp, fn, precision, and recall. - for key in metrics.metric_results[pr_met].keys(): + for key in metrics.metric_results[pr_met]: # Update each interval for idx in range(len(self.metric_results[pr_met][key])): self.metric_results[pr_met][key][idx] += ( @@ -252,7 +252,7 @@ def update(self, metrics, loss_dict=None, num_samples=1): def average(self): results = {} - for met in self.metric_results.keys(): + for met in self.metric_results: # Skip pr curve metrics if met not in self.supported_pr_metrics: results[met] = self.metric_results[met] / self.count diff --git a/limap/line2d/SOLD2/model/model_util.py b/limap/line2d/SOLD2/model/model_util.py index 4a44c55f..5910756a 100644 --- a/limap/line2d/SOLD2/model/model_util.py +++ b/limap/line2d/SOLD2/model/model_util.py @@ -60,7 +60,7 @@ class SOLD2Net(nn.Module): """Full network for SOLDĀ².""" def __init__(self, model_cfg): - super(SOLD2Net, self).__init__() + super().__init__() self.name = model_cfg["model_name"] self.cfg = model_cfg diff --git a/limap/line2d/SOLD2/model/nets/backbone.py b/limap/line2d/SOLD2/model/nets/backbone.py index 5d7c7d83..3d488505 100644 --- a/limap/line2d/SOLD2/model/nets/backbone.py +++ b/limap/line2d/SOLD2/model/nets/backbone.py @@ -15,7 +15,7 @@ def __init__( num_blocks=1, num_classes=5, ): - super(HourglassBackbone, self).__init__() + super().__init__() self.head = MultitaskHead self.net = hg( **{ @@ -36,7 +36,7 @@ class SuperpointBackbone(nn.Module): """SuperPoint backbone.""" def __init__(self): - super(SuperpointBackbone, self).__init__() + super().__init__() self.relu = torch.nn.ReLU(inplace=True) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4 = 64, 64, 128, 128 diff --git a/limap/line2d/SOLD2/model/nets/descriptor_decoder.py b/limap/line2d/SOLD2/model/nets/descriptor_decoder.py index 714b0678..0a8a9105 100644 --- a/limap/line2d/SOLD2/model/nets/descriptor_decoder.py +++ b/limap/line2d/SOLD2/model/nets/descriptor_decoder.py @@ -6,7 +6,7 @@ class SuperpointDescriptor(nn.Module): """Descriptor decoder based on the SuperPoint arcihtecture.""" def __init__(self, input_feat_dim=128): - super(SuperpointDescriptor, self).__init__() + super().__init__() self.relu = torch.nn.ReLU(inplace=True) self.convPa = torch.nn.Conv2d( input_feat_dim, 256, kernel_size=3, stride=1, padding=1 diff --git a/limap/line2d/SOLD2/model/nets/heatmap_decoder.py b/limap/line2d/SOLD2/model/nets/heatmap_decoder.py index f8f910d3..806cd432 100644 --- a/limap/line2d/SOLD2/model/nets/heatmap_decoder.py +++ b/limap/line2d/SOLD2/model/nets/heatmap_decoder.py @@ -5,7 +5,7 @@ class PixelShuffleDecoder(nn.Module): """Pixel shuffle decoder.""" def __init__(self, input_feat_dim=128, num_upsample=2, output_channel=2): - super(PixelShuffleDecoder, self).__init__() + super().__init__() # Get channel parameters self.channel_conf = self.get_channel_conf(num_upsample) diff --git a/limap/line2d/SOLD2/model/nets/junction_decoder.py b/limap/line2d/SOLD2/model/nets/junction_decoder.py index f348d56e..73f2b596 100644 --- a/limap/line2d/SOLD2/model/nets/junction_decoder.py +++ b/limap/line2d/SOLD2/model/nets/junction_decoder.py @@ -6,7 +6,7 @@ class SuperpointDecoder(nn.Module): """Junction decoder based on the SuperPoint architecture.""" def __init__(self, input_feat_dim=128, backbone_name="lcnn"): - super(SuperpointDecoder, self).__init__() + super().__init__() self.relu = torch.nn.ReLU(inplace=True) # Perform strided convolution when using lcnn backbone. if backbone_name == "lcnn": diff --git a/limap/line2d/SOLD2/model/nets/lcnn_hourglass.py b/limap/line2d/SOLD2/model/nets/lcnn_hourglass.py index b370dd37..b4259064 100644 --- a/limap/line2d/SOLD2/model/nets/lcnn_hourglass.py +++ b/limap/line2d/SOLD2/model/nets/lcnn_hourglass.py @@ -11,7 +11,7 @@ class MultitaskHead(nn.Module): def __init__(self, input_channels, num_class): - super(MultitaskHead, self).__init__() + super().__init__() m = int(input_channels / 4) head_size = [[2], [1], [2]] @@ -35,7 +35,7 @@ class Bottleneck2D(nn.Module): expansion = 2 def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck2D, self).__init__() + super().__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1) @@ -74,7 +74,7 @@ def forward(self, x): class Hourglass(nn.Module): def __init__(self, block, num_blocks, planes, depth): - super(Hourglass, self).__init__() + super().__init__() self.depth = depth self.block = block self.hg = self._make_hour_glass(block, num_blocks, planes, depth) @@ -128,7 +128,7 @@ def __init__( num_classes, input_channels, ): - super(HourglassNet, self).__init__() + super().__init__() self.inplanes = 64 self.num_feats = 128 diff --git a/limap/line2d/SOLD2/sold2.py b/limap/line2d/SOLD2/sold2.py index 0d565f95..0ab77195 100644 --- a/limap/line2d/SOLD2/sold2.py +++ b/limap/line2d/SOLD2/sold2.py @@ -10,7 +10,7 @@ class SOLD2Detector(BaseDetector): def __init__(self, options=BaseDetectorOptions()): - super(SOLD2Detector, self).__init__(options) + super().__init__(options) self.detector = SOLD2LineDetector(weight_path=self.weight_path) def get_module_name(self): @@ -94,7 +94,7 @@ def extract_heatmaps_all_images(self, folder, imagecols, skip_exists=False): class SOLD2Matcher(BaseMatcher): def __init__(self, extractor, options=BaseMatcherOptions()): - super(SOLD2Matcher, self).__init__(extractor, options) + super().__init__(extractor, options) assert self.extractor.get_module_name() == "sold2" self.detector = SOLD2LineDetector(weight_path=self.weight_path) diff --git a/limap/line2d/SOLD2/train.py b/limap/line2d/SOLD2/train.py index 39ebb7b5..42f20821 100644 --- a/limap/line2d/SOLD2/train.py +++ b/limap/line2d/SOLD2/train.py @@ -741,12 +741,12 @@ def record_train_summaries(writer, global_step, scalars, images): "Train_loss/total_loss", scalars["total_loss"], global_step ) # Add regularization loss - if "reg_loss" in scalars.keys(): + if "reg_loss" in scalars: writer.add_scalar( "Train_loss/reg_loss", scalars["reg_loss"], global_step ) # Add descriptor loss - if "descriptor_loss" in scalars.keys(): + if "descriptor_loss" in scalars: key = "descriptor_loss" writer.add_scalar("Train_loss/%s" % (key), scalars[key], global_step) writer.add_scalar( @@ -754,7 +754,7 @@ def record_train_summaries(writer, global_step, scalars, images): ) # Record weighting - for key in scalars.keys(): + for key in scalars: if "w_" in key: writer.add_scalar( "Train_weight/%s" % (key), scalars[key], global_step @@ -771,7 +771,7 @@ def record_train_summaries(writer, global_step, scalars, images): "Train_loss_average/total_loss", average["total_loss"], global_step ) # Add smoothed descriptor loss - if "descriptor_loss" in average.keys(): + if "descriptor_loss" in average: writer.add_scalar( "Train_loss_average/descriptor_loss", average["descriptor_loss"], @@ -802,7 +802,7 @@ def record_train_summaries(writer, global_step, scalars, images): "Train_metrics/heatmap_recall", results["heatmap_recall"], global_step ) # Add descriptor metric - if "matching_score" in results.keys(): + if "matching_score" in results: writer.add_scalar( "Train_metrics/matching_score", results["matching_score"], @@ -839,7 +839,7 @@ def record_train_summaries(writer, global_step, scalars, images): global_step, ) # Add smoothed descriptor metric - if "matching_score" in average.keys(): + if "matching_score" in average: writer.add_scalar( "Train_metrics_average/matching_score", average["matching_score"], @@ -913,7 +913,7 @@ def record_test_summaries(writer, epoch, scalars): writer.add_scalar("Val_loss/heatmap_loss", average["heatmap_loss"], epoch) writer.add_scalar("Val_loss/total_loss", average["total_loss"], epoch) # Add descriptor loss - if "descriptor_loss" in average.keys(): + if "descriptor_loss" in average: key = "descriptor_loss" writer.add_scalar("Val_loss/%s" % (key), average[key], epoch) @@ -935,7 +935,7 @@ def record_test_summaries(writer, epoch, scalars): "Val_metrics/heatmap_recall", average["heatmap_recall"], epoch ) # Add descriptor metric - if "matching_score" in average.keys(): + if "matching_score" in average: writer.add_scalar( "Val_metrics/matching_score", average["matching_score"], epoch ) diff --git a/limap/line2d/TP_LSD/tp_lsd.py b/limap/line2d/TP_LSD/tp_lsd.py index 19a6b52f..f9a67065 100644 --- a/limap/line2d/TP_LSD/tp_lsd.py +++ b/limap/line2d/TP_LSD/tp_lsd.py @@ -12,7 +12,7 @@ class TPLSDDetector(BaseDetector): def __init__(self, options=BaseDetectorOptions()): - super(TPLSDDetector, self).__init__(options) + super().__init__(options) # Load the TP-LSD model head = {"center": 1, "dis": 4, "line": 1} if self.weight_path is None: diff --git a/limap/line2d/base_detector.py b/limap/line2d/base_detector.py index 945fda47..de7feded 100644 --- a/limap/line2d/base_detector.py +++ b/limap/line2d/base_detector.py @@ -266,7 +266,7 @@ def detect_and_extract_all_images( all_segs (dict[int -> :class:`np.array`]): The line detection for each image indexed by the image id. Each segment is with shape (N, 5). Each row corresponds to x1, y1, x2, y2 and score. descinfo_folder (str): Path to the extracted descriptors. """ - assert self.do_merge_lines == False + assert self.do_merge_lines seg_folder = self.get_segments_folder(output_folder) descinfo_folder = self.get_descinfo_folder(output_folder) if not skip_exists: diff --git a/limap/optimize/functions.py b/limap/optimize/functions.py index 0f81e643..70dec61f 100644 --- a/limap/optimize/functions.py +++ b/limap/optimize/functions.py @@ -1,80 +1,13 @@ import os +import cv2 import numpy as np from tqdm import tqdm +import limap.base as _base import limap.visualize as limapvis -def visualize_heatmap_intersections( - prefix, - imname_list, - image_ids, - p_heatmaps, - ht_intersections, - max_image_dim=None, -): - import matplotlib.cm as cmx - import matplotlib.colors as colors - - cNorm = colors.Normalize(vmin=0, vmax=1) - scalarMap = cmx.ScalarMappable(norm=cNorm, cmap="viridis") - - path = os.path.dirname(prefix) - if not os.path.exists(path): - os.makedirs(path) - for img_id, heatmap, intersections in zip( - image_ids, p_heatmaps, ht_intersections - ): - imname = imname_list[img_id] - - # visualize image - img = utils.read_image( - imname, max_image_dim=max_image_dim, set_gray=False - ) - img = limapvis.draw_points(img, intersections, (255, 0, 0), 2) - fname_out = prefix + f"_img{img_id}.png" - cv2.imwrite(fname_out, img) - - # visualize heatmap - heatmap_img = (scalarMap.to_rgba(heatmap)[:, :, :3] * 255).astype( - np.uint8 - ) - heatmap_img = limapvis.draw_points( - heatmap_img, intersections, (255, 0, 0), 2 - ) - fname_out_heatmap = prefix + f"_heatmap{img_id}.png" - cv2.imwrite(fname_out_heatmap, heatmap_img) - - -def visualize_fconsis_intersections( - prefix, - imname_list, - image_ids, - fc_intersections, - max_image_dim=None, - n_samples_vis=-1, -): - if n_samples_vis != -1: - fc_intersections = fc_intersections[:n_samples_vis] - path = os.path.dirname(prefix) - if not os.path.exists(path): - os.makedirs(path) - for sample_id, intersections in enumerate(tqdm(fc_intersections)): - imgs = [] - for data in intersections: - img_id, point = image_ids[data[0]], data[1] - img = utils.read_image( - imname_list[img_id], max_image_dim=max_image_dim, set_gray=False - ) - limapvis.draw_points(img, [point], (0, 0, 255), 1) - img = limapvis.crop_to_patch(img, point, patch_size=100) - imgs.append(img) - bigimg = limapvis.make_bigimage(imgs, pad=20) - fname_out = prefix + f"_sample{sample_id}.png" - cv2.imwrite(fname_out, bigimg) - - def unit_test_add_noise_to_track(track): # for unit test tmptrack = _base.LineTrack(track) diff --git a/limap/optimize/hybrid_bundle_adjustment/solve.py b/limap/optimize/hybrid_bundle_adjustment/solve.py index c6e072e2..c79aef88 100644 --- a/limap/optimize/hybrid_bundle_adjustment/solve.py +++ b/limap/optimize/hybrid_bundle_adjustment/solve.py @@ -2,10 +2,7 @@ def _init_bundle_adjustment_engine(cfg, imagecols, max_num_iterations=100): - if type(cfg) == dict: - ba_config = _optimize.HybridBAConfig(cfg) - else: - ba_config = cfg + ba_config = _optimize.HybridBAConfig(cfg) if isinstance(cfg, dict) else cfg ba_config.solver_options.logging_type = _ceresbase.LoggingType.SILENT ba_config.solver_options.max_num_iterations = max_num_iterations ba_engine = _optimize.HybridBAEngine(ba_config) diff --git a/limap/optimize/line_localization/functions.py b/limap/optimize/line_localization/functions.py index 93379c78..d87d492f 100644 --- a/limap/optimize/line_localization/functions.py +++ b/limap/optimize/line_localization/functions.py @@ -106,7 +106,8 @@ def reprojection_filter_matches_2to3( matches = [] for ref_line_id in all_pairs_2to3: ref_line = ref_lines[ref_line_id] - mp_ref, dir_ref = ref_line.midpoint(), ref_line.direction() + # mp_ref = ref_line.midpoint() + dir_ref = ref_line.direction() track_ids = np.unique(all_pairs_2to3[ref_line_id]) min_loss = np.inf diff --git a/limap/optimize/line_refinement/line_refinement.py b/limap/optimize/line_refinement/line_refinement.py index f9af0221..413a5697 100644 --- a/limap/optimize/line_refinement/line_refinement.py +++ b/limap/optimize/line_refinement/line_refinement.py @@ -123,25 +123,6 @@ def line_refinement( # debug if cfg["visualize"]: - - def report_track(track_id): - limapvis.visualize_line_track( - imname_list, - tracks[track_id], - max_image_dim=-1, - cameras=cameras, - prefix=f"track.{track_id}", - ) - - def report_newtrack(track_id): - limapvis.visualize_line_track( - imname_list, - opttracks[track_id], - max_image_dim=-1, - cameras=cameras, - prefix=f"newtrack.{track_id}", - ) - import pdb pdb.set_trace() diff --git a/limap/point2d/superpoint/main.py b/limap/point2d/superpoint/main.py index 206203cc..47fe72b5 100644 --- a/limap/point2d/superpoint/main.py +++ b/limap/point2d/superpoint/main.py @@ -122,20 +122,3 @@ def run_superpoint( print("[SuperPoint] Finished exporting features.") return feature_path - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--image_dir", type=Path, required=True) - parser.add_argument("--export_dir", type=Path, required=True) - parser.add_argument( - "--conf", - type=str, - default="superpoint_aachen", - choices=list(confs.keys()), - ) - parser.add_argument("--as_half", action="store_true") - parser.add_argument("--image_list", type=Path) - parser.add_argument("--feature_path", type=Path) - args = parser.parse_args() - main(confs[args.conf], args.image_dir, args.export_dir, args.as_half) diff --git a/limap/pointsfm/bundler_reader.py b/limap/pointsfm/bundler_reader.py index 99d1f55c..8e402b7f 100644 --- a/limap/pointsfm/bundler_reader.py +++ b/limap/pointsfm/bundler_reader.py @@ -45,7 +45,7 @@ def ReadModelBundler(bundler_path, list_path, model_path): raise ValueError(f"Error! Image not found: {imname}") width, height = imagesize.get(imname) img_hw = [height, width] - K = np.zeros((3, 3)) + # K = np.zeros((3, 3)) cx = img_hw[1] / 2.0 cy = img_hw[0] / 2.0 params = [f, cx, cy, k1, k2] diff --git a/limap/pointsfm/colmap_reader.py b/limap/pointsfm/colmap_reader.py index 1f8ad727..7bbb0d7b 100644 --- a/limap/pointsfm/colmap_reader.py +++ b/limap/pointsfm/colmap_reader.py @@ -4,7 +4,14 @@ from _limap import _base sys.path.append(os.path.dirname(os.path.abspath(__file__))) -from read_write_model import * +from read_write_model import ( + read_cameras_binary, + read_cameras_text, + read_images_binary, + read_images_text, + read_points3D_binary, + read_points3D_text, +) def check_exists_colmap_model(model_path): @@ -14,13 +21,11 @@ def check_exists_colmap_model(model_path): and os.path.exists(os.path.join(model_path, "points3D.bin")) ): return True - if ( + return ( os.path.exists(os.path.join(model_path, "cameras.txt")) and os.path.exists(os.path.join(model_path, "images.txt")) and os.path.exists(os.path.join(model_path, "points3D.txt")) - ): - return True - return False + ) def ReadInfos(colmap_path, model_path="sparse", image_path="images"): @@ -54,7 +59,6 @@ def ReadInfos(colmap_path, model_path="sparse", image_path="images"): ) # read images - n_images = len(colmap_images) camimages = {} for img_id, colmap_image in colmap_images.items(): imname = colmap_image.name diff --git a/limap/pointsfm/database.py b/limap/pointsfm/database.py index ec7743e4..d078f51d 100644 --- a/limap/pointsfm/database.py +++ b/limap/pointsfm/database.py @@ -147,7 +147,7 @@ def connect(database_path): return sqlite3.connect(database_path, factory=COLMAPDatabase) def __init__(self, *args, **kwargs): - super(COLMAPDatabase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.create_tables = lambda: self.executescript(CREATE_ALL) self.create_cameras_table = lambda: self.executescript( @@ -398,14 +398,14 @@ def example_usage(): # Read and check matches. - pair_ids = [ - image_ids_to_pair_id(*pair) - for pair in ( - (image_id1, image_id2), - (image_id2, image_id3), - (image_id3, image_id4), - ) - ] + # pair_ids = [ + # image_ids_to_pair_id(*pair) + # for pair in ( + # (image_id1, image_id2), + # (image_id2, image_id3), + # (image_id3, image_id4), + # ) + # ] matches = dict( (pair_id_to_image_ids(pair_id), blob_to_array(data, np.uint32, (-1, 2))) diff --git a/limap/pointsfm/visualsfm_reader.py b/limap/pointsfm/visualsfm_reader.py index fcf2ef76..706d5c28 100644 --- a/limap/pointsfm/visualsfm_reader.py +++ b/limap/pointsfm/visualsfm_reader.py @@ -73,7 +73,7 @@ def ReadModelVisualSfM(vsfm_path, nvm_file="reconstruction.nvm"): line = txt_lines[counter].strip().split() counter += 1 point = np.array([float(line[k]) for k in range(3)]) - color = np.array([int(line[k]) for k in np.arange(3, 6).tolist()]) + # color = np.array([int(line[k]) for k in np.arange(3, 6).tolist()]) n_views = int(line[6]) track = [] subcounter = 7 diff --git a/limap/runners/functions.py b/limap/runners/functions.py index 41c47d52..09bafb74 100644 --- a/limap/runners/functions.py +++ b/limap/runners/functions.py @@ -189,7 +189,7 @@ def compute_2d_segs(cfg, imagecols, compute_descinfo=True): all_2d_segs (dict[int -> :class:`np.array`], each with shape (N, 4) or (N, 5)): all the line detections for each image descinfo_folder (str): folder to store the descriptors """ - weight_path = None if "weight_path" not in cfg else cfg["weight_path"] + weight_path = cfg.get("weight_path", None) if "extractor" in cfg["line2d"]: print( "[LOG] Start 2D line detection and description (detector = {0}, extractor = {1}, n_images = {2})...".format( @@ -280,7 +280,7 @@ def compute_matches(cfg, descinfo_folder, image_ids, neighbors): Returns: matches_folder (str): path to store the computed matches """ - weight_path = None if "weight_path" not in cfg else cfg["weight_path"] + weight_path = cfg.get("weight_path", None) print( "[LOG] Start matching 2D lines... (extractor = {0}, matcher = {1}, n_images = {2}, n_neighbors = {3})".format( cfg["line2d"]["extractor"]["method"], diff --git a/limap/runners/line_fitnmerge.py b/limap/runners/line_fitnmerge.py index caf5ef95..b600186b 100644 --- a/limap/runners/line_fitnmerge.py +++ b/limap/runners/line_fitnmerge.py @@ -25,7 +25,6 @@ def fit_3d_segs(all_2d_segs, imagecols, depths, fitting_config): Returns: output (dict[int -> list[(:class:`np.array`, :class:`np.array`)]]): for each image, output a list of :class:`np.array` pair, representing two endpoints """ - n_images = len(all_2d_segs) seg3d_list = [] def process(all_2d_segs, imagecols, depths, fitting_config, img_id): @@ -128,7 +127,7 @@ def line_fitnmerge(cfg, imagecols, depths, neighbors=None, ranges=None): list[:class:`limap.base.LineTrack`]: list of output 3D line tracks """ # assertion check - assert imagecols.IsUndistorted() == True + assert imagecols.IsUndistorted() print(f"[LOG] Number of images: {imagecols.NumImages()}") cfg = _runners.setup(cfg) detector_name = cfg["line2d"]["detector"]["method"] @@ -299,7 +298,7 @@ def line_fitting_with_3Dpoints( list[:class:`limap.base.LineTrack`]: list of output 3D line tracks """ # assertion check - assert imagecols.IsUndistorted() == True + assert imagecols.IsUndistorted() print(f"[LOG] Number of images: {imagecols.NumImages()}") cfg = _runners.setup(cfg) detector_name = cfg["line2d"]["detector"]["method"] diff --git a/limap/runners/line_localization.py b/limap/runners/line_localization.py index d74ef56a..cab3dbf1 100644 --- a/limap/runners/line_localization.py +++ b/limap/runners/line_localization.py @@ -170,7 +170,7 @@ def line_localization( # Do matches for query images and retrieved neighbors for superglue endpoints matcher if cfg["localization"]["2d_matcher"] != "epipolar": - weight_path = None if "weight_path" not in cfg else cfg["weight_path"] + weight_path = cfg.get("weight_path", None) if cfg["localization"]["2d_matcher"] == "superglue_endpoints": extractor_name = "superpoint_endpoints" matcher_name = "superglue_endpoints" diff --git a/ruff.toml b/ruff.toml index fdd86d74..9f29fb90 100644 --- a/ruff.toml +++ b/ruff.toml @@ -15,7 +15,15 @@ select = [ # isort "I", ] -ignore = ["SIM117", "E501", "F401", "F403", "UP030", "B007", "E402", "E741", "B006", "B008"] +ignore = ["SIM113", "SIM117", "E501", "F401", "F403", + "UP030", "B007", "E402", "E741", "B006", + "B008", "B028", "B904", "UP031", "E722"] [lint.per-file-ignores] -"scripts/*.py" = ["E"] +"limap/line2d/SOLD2/model/*.py" = ["SIM"] +"limap/line2d/SOLD2/misc/*.py" = ["SIM"] +"limap/line2d/LineTR/*.py" = ["F", "SIM"] +"limap/line2d/L2D2/RAL_net_cov.py" = ["SIM"] +"limap/features/models/s2dnet.py" = ["F", "UP"] +"limap/features/models/vggnet.py" = ["F"] +"limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py" = ["F"] diff --git a/scripts/convert_model.py b/scripts/convert_model.py index 92b4eef1..e7a6684f 100644 --- a/scripts/convert_model.py +++ b/scripts/convert_model.py @@ -23,7 +23,7 @@ if args.type == "imagecols2colmap": imagecols = limapio.read_npy(args.input_path).item() - if type(imagecols) == dict: + if isinstance(imagecols, dict): imagecols = _base.ImageCollection(imagecols) _psfm.convert_imagecols_to_colmap(imagecols, args.output_path) elif args.type == "colmap2vsfm":