Skip to content

Commit

Permalink
add CI for formatting (#84)
Browse files Browse the repository at this point in the history
* update.

* update version.

* update clang version.

* update.

* update.

* update.

* update.

* align with 21.12 for black.

* update.
  • Loading branch information
B1ueber2y authored Jul 28, 2024
1 parent 82fc2f4 commit e8da676
Show file tree
Hide file tree
Showing 23 changed files with 166 additions and 97 deletions.
44 changes: 44 additions & 0 deletions .github/workflows/format-ubuntu.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
name: Check code formatting (Ubuntu)

on:
push:
branches:
- main
pull_request:
types: [ assigned, opened, synchronize, reopened ]
release:
types: [ published, edited ]

jobs:
build:
name: ${{ matrix.config.os }} formatting
runs-on: ${{ matrix.config.os }}
strategy:
matrix:
config: [
{
os: ubuntu-22.04,
checkCodeFormat: true,
},
]

env:
COMPILER_CACHE_VERSION: 1
COMPILER_CACHE_DIR: ${{ github.workspace }}/compiler-cache
CCACHE_DIR: ${{ github.workspace }}/compiler-cache/ccache
CCACHE_BASEDIR: ${{ github.workspace }}
CTCACHE_DIR: ${{ github.workspace }}/compiler-cache/ctcache

steps:
- uses: actions/checkout@v4
- name: Check code format
run: |
if [ "${{ matrix.config.checkCodeFormat }}" != "true" ]; then
exit 0
fi
set +x -euo pipefail
sudo apt-get update && sudo apt-get install -y clang-format-14 black
./scripts/format/clang_format.sh
./scripts/format/black.sh
git diff --name-only
git diff --exit-code || (echo "Code formatting failed" && exit 1)
4 changes: 2 additions & 2 deletions limap/base/unit_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ def report_error(imagecols_pred, imagecols):
R_error = (
imagecols_pred.camimage(img_id).R() - imagecols.camimage(img_id).R()
)
R_error = np.sqrt(np.sum(R_error**2))
R_error = np.sqrt(np.sum(R_error ** 2))
T_error = (
imagecols_pred.camimage(img_id).T() - imagecols.camimage(img_id).T()
)
T_error = np.sqrt(np.sum(T_error**2))
T_error = np.sqrt(np.sum(T_error ** 2))
pose_errors.append(np.array([R_error, T_error]))
print("pose_error: (R, T)", np.array(pose_errors).mean(0))
6 changes: 3 additions & 3 deletions limap/ceresbase/interpolation.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ template <typename dtype> struct InterpolationQuery {
const Eigen::Vector2d &scale_,
const std::array<int, 3> &shape_, const dtype *data_ptr_)
: scale(scale_), shape(shape_), corner(corner_), data_ptr(data_ptr_) {}
Eigen::Vector2d scale; // Could be a const-reference? --> With little change
// to pybind this can be done
Eigen::Vector2i corner; // Could be a const-reference?
Eigen::Vector2d scale; // Could be a const-reference? --> With little change
// to pybind this can be done
Eigen::Vector2i corner; // Could be a const-reference?
std::array<int, 3> shape; // Could be a const-reference?
const dtype *data_ptr;

Expand Down
15 changes: 9 additions & 6 deletions limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,15 @@ def _pl_estimate_absolute_pose(
ransac_options.data_type_weights_ = np.array(
[ransac_cfg["weight_point"], ransac_cfg["weight_line"]]
)
ransac_options.data_type_weights_ *= np.array(
[
ransac_options.squared_inlier_thresholds_[1],
ransac_options.squared_inlier_thresholds_[0],
]
) / np.sum(ransac_options.squared_inlier_thresholds_)
ransac_options.data_type_weights_ *= (
np.array(
[
ransac_options.squared_inlier_thresholds_[1],
ransac_options.squared_inlier_thresholds_[0],
]
)
/ np.sum(ransac_options.squared_inlier_thresholds_)
)
ransac_options.min_num_iterations_ = ransac_cfg["min_num_iterations"]
ransac_options.final_least_squares_ = ransac_cfg["final_least_squares"]

Expand Down
4 changes: 2 additions & 2 deletions limap/features/models/s2dnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def print_gpu_memory():
a = torch.cuda.memory_allocated(0)
f = r - a # free inside reserved

print(np.array([t, r, a, f]) / 2**30)
print(np.array([t, r, a, f]) / 2 ** 30)


class AdapLayers(nn.Module):
Expand Down Expand Up @@ -130,7 +130,7 @@ def _init(self, conf):
if isinstance(layer, torch.nn.MaxPool2d):
current_scale += 1
if i in self.hypercolumn_indices:
self.scales.append(2**current_scale)
self.scales.append(2 ** current_scale)

self.adaptation_layers = AdapLayers(
conf.hypercolumn_layers, conf.output_dim
Expand Down
2 changes: 1 addition & 1 deletion limap/features/models/vggnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def _init(self, conf=default_conf):
if isinstance(layer, torch.nn.MaxPool2d):
current_scale += 1
if i in self.hypercolumn_indices:
self.scales.append(2**current_scale)
self.scales.append(2 ** current_scale)

def _forward(self, data):
image = data # data['image']
Expand Down
2 changes: 1 addition & 1 deletion limap/line2d/LineTR/line_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(self, n_heads: int, d_feature: int, dropout=0.1):
self.w_vs = nn.Linear(d_feature, n_heads * dim, bias=True)
self.fc = nn.Linear(n_heads * dim, d_feature, bias=True)

self.attention = ScaledDotProduct(scale=dim**0.5)
self.attention = ScaledDotProduct(scale=dim ** 0.5)

self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_feature, eps=1e-6)
Expand Down
4 changes: 2 additions & 2 deletions limap/line2d/LineTR/line_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def point_on_line(line, dist_px):
vec = ep - sp
if vec[0] != 0:
m = vec[1] / vec[0]
x = np.sqrt(dist_px**2 / (1 + m**2))
x = np.sqrt(dist_px ** 2 / (1 + m ** 2))
y = m * x
else:
x = 0
Expand Down Expand Up @@ -275,7 +275,7 @@ def change_cv2_T_np(klines_cv):
kline_ep = [sp_x, sp_y]

# linelength = math.sqrt((kline_ep[0]-kline_sp[0])**2 +(kline_ep[1]-kline_sp[1])**2)
linelength = line.lineLength * (2**line.octave)
linelength = line.lineLength * (2 ** line.octave)

klines_sp.append(kline_sp)
klines_ep.append(kline_ep)
Expand Down
2 changes: 1 addition & 1 deletion limap/line2d/LineTR/line_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def forward(
def attention(query, key, value):
dim = query.shape[1]
scores = (
torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5
torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5
) # [3, 64, 4, 512] -> [3, 4, 512, 512]
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob
Expand Down
105 changes: 57 additions & 48 deletions limap/line2d/LineTR/linetr_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ def detect_lsd_lines(self, x, max_n_lines=None):
return torch.stack(lines).to(x)

def _forward(self, data):

def process_siamese(data, i):
data_i = {k[:-1]: v for k, v in data.items() if k[-1] == i}
if self.conf.extractor.name:
Expand Down Expand Up @@ -181,30 +180,34 @@ def process_siamese(data, i):
):
# Several points are sampled per line, and we supervise them independently
b_size = len(pred["lines0"])
samples_assignment, samples_m0, samples_m1 = (
gt_matches_from_homography(
pred["lines0"].reshape(b_size, -1, 2),
pred["lines1"].reshape(b_size, -1, 2),
**data,
pos_th=self.conf.ground_truth.th_positive
)
(
samples_assignment,
samples_m0,
samples_m1,
) = gt_matches_from_homography(
pred["lines0"].reshape(b_size, -1, 2),
pred["lines1"].reshape(b_size, -1, 2),
**data,
pos_th=self.conf.ground_truth.th_positive
)
pred["samples_gt_assignment"] = samples_assignment
pred["samples_gt_matches0"] = samples_m0
pred["samples_gt_matches1"] = samples_m1

# Compute the GT line association
line_assignment, line_m0, line_m1 = (
gt_line_matches_from_homography(
pred["lines0"],
pred["lines1"],
pred["valid_lines0"],
pred["valid_lines1"],
data,
self.conf.ground_truth.n_line_sampled_pts,
self.conf.ground_truth.line_perp_dist_th,
self.conf.ground_truth.overlap_th,
)
(
line_assignment,
line_m0,
line_m1,
) = gt_line_matches_from_homography(
pred["lines0"],
pred["lines1"],
pred["valid_lines0"],
pred["valid_lines1"],
data,
self.conf.ground_truth.n_line_sampled_pts,
self.conf.ground_truth.line_perp_dist_th,
self.conf.ground_truth.overlap_th,
)
pred["line_gt_matches0"] = line_m0
pred["line_gt_matches1"] = line_m1
Expand Down Expand Up @@ -232,31 +235,37 @@ def process_siamese(data, i):
):
# Several points are sampled per line, and we supervise them independently
b_size = len(pred["lines0"])
samples_assignment, samples_m0, samples_m1 = (
gt_matches_from_pose_depth(
pred["lines0"].reshape(b_size, -1, 2),
pred["lines1"].reshape(b_size, -1, 2),
**data,
pos_th=self.conf.ground_truth.th_positive,
neg_th=self.conf.ground_truth.th_negative
)[:3]
)
(
samples_assignment,
samples_m0,
samples_m1,
) = gt_matches_from_pose_depth(
pred["lines0"].reshape(b_size, -1, 2),
pred["lines1"].reshape(b_size, -1, 2),
**data,
pos_th=self.conf.ground_truth.th_positive,
neg_th=self.conf.ground_truth.th_negative
)[
:3
]
pred["samples_gt_assignment"] = samples_assignment
pred["samples_gt_matches0"] = samples_m0
pred["samples_gt_matches1"] = samples_m1

# Compute the GT line association
line_assignment, line_m0, line_m1 = (
gt_line_matches_from_pose_depth(
pred["lines0"],
pred["lines1"],
pred["valid_lines0"],
pred["valid_lines1"],
data,
self.conf.ground_truth.n_line_sampled_pts,
self.conf.ground_truth.line_perp_dist_th,
self.conf.ground_truth.overlap_th,
)
(
line_assignment,
line_m0,
line_m1,
) = gt_line_matches_from_pose_depth(
pred["lines0"],
pred["lines1"],
pred["valid_lines0"],
pred["valid_lines1"],
data,
self.conf.ground_truth.n_line_sampled_pts,
self.conf.ground_truth.line_perp_dist_th,
self.conf.ground_truth.overlap_th,
)
pred["line_gt_matches0"] = line_m0
pred["line_gt_matches1"] = line_m1
Expand Down Expand Up @@ -299,21 +308,21 @@ def process_siamese(data, i):
assert match_mat.shape[0] == 1
bool_match_mat = match_mat[0] > 0
pred["line_matches0"] = np.argmax(bool_match_mat, axis=1)
pred["line_matches0"][~np.any(bool_match_mat, axis=1)] = (
UNMATCHED_FEATURE
)
pred["line_matches0"][
~np.any(bool_match_mat, axis=1)
] = UNMATCHED_FEATURE
pred["line_matches1"] = np.argmax(bool_match_mat, axis=0)
pred["line_matches1"][~np.any(bool_match_mat, axis=0)] = (
UNMATCHED_FEATURE
)
pred["line_matches1"][
~np.any(bool_match_mat, axis=0)
] = UNMATCHED_FEATURE
pred["line_matches0"] = torch.from_numpy(pred["line_matches0"])[None]
pred["line_matches1"] = torch.from_numpy(pred["line_matches1"])[None]
lmatch_scores = torch.from_numpy(
distance_matrix[(0,) + np.where(match_mat[0] > 0)]
)
pred["line_match_scores0"] = pred["line_match_scores1"] = (
-lmatch_scores[None]
)
pred["line_match_scores0"] = pred[
"line_match_scores1"
] = -lmatch_scores[None]
return pred

def loss(self, pred, data):
Expand Down
6 changes: 4 additions & 2 deletions limap/line2d/SOLD2/model/line_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,9 @@ def detect(self, junctions, heatmap, device=torch.device("cpu")):
dim=-1,
)
)
normalized_seg_length = segments_length / (((H**2) + (W**2)) ** 0.5)
normalized_seg_length = segments_length / (
((H ** 2) + (W ** 2)) ** 0.5
)

# Perform local max search
num_cand = cand_h.shape[0]
Expand Down Expand Up @@ -550,7 +552,7 @@ def detect_local_max(
"""Detection by local maximum search."""
# Compute the distance threshold
dist_thresh = (
0.5 * (2**0.5) + self.lambda_radius * normalized_seg_length
0.5 * (2 ** 0.5) + self.lambda_radius * normalized_seg_length
)
# Make it N x 64
dist_thresh = torch.repeat_interleave(
Expand Down
2 changes: 1 addition & 1 deletion limap/line2d/SOLD2/model/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def space_to_depth(input_tensor, grid_size):
# (N, bs, bs, C, H//bs, W//bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
# (N, C*bs^2, H//bs, W//bs)
x = x.view(N, C * (grid_size**2), H // grid_size, W // grid_size)
x = x.view(N, C * (grid_size ** 2), H // grid_size, W // grid_size)
return x


Expand Down
4 changes: 2 additions & 2 deletions limap/line2d/SOLD2/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ def train_single_epoch(
results = metric_func.metric_results
average = average_meter.average()
# Get gpu memory usage in GB
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3)
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3)
if compute_descriptors:
print(
"Epoch [%d / %d] Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), descriptor_loss=%.4f (%.4f), gpu_mem=%.4fGB"
Expand Down Expand Up @@ -734,7 +734,7 @@ def record_train_summaries(writer, global_step, scalars, images):

# GPU memory part
# Get gpu memory usage in GB
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3)
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3)
writer.add_scalar("GPU/GPU_memory_usage", gpu_mem_usage, global_step)

# Loss part
Expand Down
4 changes: 2 additions & 2 deletions limap/line2d/line_utils/merge_lines.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ def merge_line_cluster(lines):
if b == 0:
u = np.array([1, 0]) if a >= c else np.array([0, 1])
else:
m = (c - a + np.sqrt((a - c) ** 2 + 4 * b**2)) / (2 * b)
u = np.array([1, m]) / np.sqrt(1 + m**2)
m = (c - a + np.sqrt((a - c) ** 2 + 4 * b ** 2)) / (2 * b)
u = np.array([1, m]) / np.sqrt(1 + m ** 2)

# Get the center of gravity of all endpoints
cross = np.mean(points, axis=0)
Expand Down
2 changes: 1 addition & 1 deletion limap/point2d/superglue/superglue.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def attention(
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
dim = query.shape[1]
scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5
scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob

Expand Down
3 changes: 1 addition & 2 deletions limap/pointsfm/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

IS_PYTHON3 = sys.version_info[0] >= 3

MAX_IMAGE_ID = 2**31 - 1
MAX_IMAGE_ID = 2 ** 31 - 1

CREATE_CAMERAS_TABLE = """CREATE TABLE IF NOT EXISTS cameras (
camera_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
Expand Down Expand Up @@ -144,7 +144,6 @@ def blob_to_array(blob, dtype, shape=(-1,)):


class COLMAPDatabase(sqlite3.Connection):

@staticmethod
def connect(database_path):
return sqlite3.connect(database_path, factory=COLMAPDatabase)
Expand Down
Loading

0 comments on commit e8da676

Please sign in to comment.