-
Notifications
You must be signed in to change notification settings - Fork 337
/
feature_keynet_affnet_hardnet.py
114 lines (98 loc) · 4.67 KB
/
feature_keynet_affnet_hardnet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
"""
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import cv2
import numpy as np
from utils_sys import Printer,getchar
import kornia as K
import kornia.feature as KF
import numpy as np
import torch
from kornia_moons.feature import *
from kornia_moons.viz import *
import matplotlib.pyplot as plt
kVerbose = True
# interface for pySLAM
class KeyNetAffNetHardNetFeature2D:
def __init__(self, num_features=2000, device=K.utils.get_cuda_or_mps_device_if_available()):
print('Using KeyNetAffNetHardNetFeature2D')
self.device = device
self.num_features = num_features
self.feature = KF.KeyNetAffNetHardNet(num_features, True).eval().to(self.device)
def setMaxFeatures(self, num_features): # use the cv2 method name for extractors (see https://docs.opencv.org/4.x/db/d95/classcv_1_1ORB.html#aca471cb82c03b14d3e824e4dcccf90b7)
self.num_features = num_features
try:
self.feature.detector.num_features = num_features
except:
Printer.red('[KeyNetAffNetHardNetFeature2D] Error setting num_features')
def convert_to_keypoints_array(self, lafs):
mkpts = KF.get_laf_center(lafs).squeeze().detach().cpu().numpy()
return mkpts
def convert_to_keypoints(self, lafs, size=1):
mkpts = self.convert_to_keypoints_array(lafs)
# convert matrix [Nx2] of pts into list of keypoints
kps = [ cv2.KeyPoint(int(p[0]), int(p[1]), size=size, response=1) for p in mkpts ]
return kps
def draw_matches(self, img1, img2, lafs1, lafs2, idxs, inliers):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
draw_LAF_matches(lafs1.cpu(), lafs2.cpu(), idxs.cpu(), K.tensor_to_image(img1.cpu()), K.tensor_to_image(img2.cpu()), inliers,
draw_dict={'inlier_color': (0.2, 1, 0.2), 'tentative_color': (1, 1, 0.2, 0.3), 'feature_color': None, 'vertical': False},
ax=ax)
# extract keypoints
def detect(self, img, mask=None): #mask is fake: it is not considered by the c++ implementation
mkptkpss1 = None
with torch.inference_mode():
lafs, resps, descs = self.feature(img)
kps = self.convert_to_keypoints(lafs)
return kps
def compute(self, img, kps, mask=None):
Printer.orange('WARNING: you are supposed to call detectAndCompute() for KeyNetAffNetHardNetFeature2D instead of compute()')
Printer.orange('WARNING: KeyNetAffNetHardNetFeature2D is recomputing both kps and des on input frame', img.shape)
return self.detectAndCompute(img)
# compute both keypoints and descriptors
def detectAndCompute(self, img, mask=None): #mask is fake: it is not considered by the c++ implementation
# detect and compute
kps = None
des = None
with torch.inference_mode():
if img.ndim>2:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = K.image_to_tensor(img, False).to(self.device).float() / 255.
lafs, resps, des = self.feature(img)
kps = self.convert_to_keypoints(lafs)
des = des.cpu().numpy()
des = np.squeeze(des, axis=0)
#print(f'des shape: {des.shape}, des type: {des.dtype}')
return kps, des
# compute both keypoints and descriptors
def detectAndComputeWithTensors(self, img, mask=None): #mask is fake: it is not considered by the c++ implementation
# detect and compute
kps = None
# a starting 't' means the variable is a tensor
tdes = None
tresps = None
lafs = None
with torch.inference_mode():
if img.ndim>2:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
timg = K.image_to_tensor(img, False).to(self.device).float() / 255.
lafs, tresps, tdes = self.feature(timg)
kps = self.convert_to_keypoints(lafs)
return kps, lafs, tresps, tdes