Skip to content

Commit

Permalink
testing matchFeatures from artoolkitx
Browse files Browse the repository at this point in the history
  • Loading branch information
kalwalt committed May 31, 2024
1 parent 6dad763 commit 20623d0
Show file tree
Hide file tree
Showing 8 changed files with 153 additions and 9 deletions.
4 changes: 4 additions & 0 deletions WebARKit/WebARKitManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ cv::Mat WebARKitManager::getPoseMatrix() {
return m_tracker->getPoseMatrix();
}

float* WebARKitManager::getPoseMatrix2() {
return m_tracker->getPoseMatrix2();
}

cv::Mat WebARKitManager::getGLViewMatrix() {
return m_tracker->getGLViewMatrix();
}
Expand Down
30 changes: 25 additions & 5 deletions WebARKit/WebARKitPattern.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,32 @@ WebARKitPatternTrackingInfo::WebARKitPatternTrackingInfo() {
m_scale = 1.0f;
}

void WebARKitPatternTrackingInfo::cameraPoseFromPoints(cv::Mat& pose, const std::vector<cv::Point3f>& objPts,
const std::vector<cv::Point2f>& imgPts, const cv::Matx33f& caMatrix,
const cv::Mat& distCoeffs) {
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1); // output rotation vector
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1); // output translation vector

cv::solvePnPRansac(objPts, imgPts, caMatrix, distCoeffs, rvec, tvec);

// Assemble pose matrix from rotation and translation vectors.
cv::Mat rMat;
Rodrigues(rvec, rMat);
cv::hconcat(rMat, tvec, pose);
};

void WebARKitPatternTrackingInfo::computePose(std::vector<cv::Point3f>& treeDPoints,
std::vector<cv::Point2f>& imgPoints, const cv::Matx33f& caMatrix,
const cv::Mat& distCoeffs) {
//cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1); // output rotation vector
//cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1); // output translation vector
// cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1); // output rotation vector
// cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1); // output translation vector
cv::Mat rvec, tvec;

cv::solvePnPRansac(treeDPoints, imgPoints, caMatrix, distCoeffs, rvec, tvec);

cv::Mat rMat;
cv::Rodrigues(rvec, rMat);
//cv::hconcat(rMat, tvec, pose3d);
// cv::hconcat(rMat, tvec, pose3d);

for (unsigned int row = 0; row < 3; ++row) {
for (unsigned int col = 0; col < 3; ++col) {
Expand All @@ -31,10 +45,16 @@ void WebARKitPatternTrackingInfo::computePose(std::vector<cv::Point3f>& treeDPoi
invertPose();
}

void WebARKitPatternTrackingInfo::computeGLviewMatrix() {
cv::transpose(pose3d , glViewMatrix);
void WebARKitPatternTrackingInfo::getTrackablePose(cv::Mat& pose) {
//float transMat [3][4];
cv::Mat poseOut;
pose.convertTo(poseOut, CV_32FC1);
//std::cout << "poseOut: " << poseOut << std::endl;
memcpy(transMat, poseOut.ptr<float>(0), 3*4*sizeof(float));
}

void WebARKitPatternTrackingInfo::computeGLviewMatrix() { cv::transpose(pose3d, glViewMatrix); }

void WebARKitPatternTrackingInfo::invertPose() {

/*cv::Mat invertPose(3, 4, CV_64FC1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ extern const int DEFAULT_MAX_FEATURES = 8000;
extern const int TEBLID_MAX_FEATURES = 10000;
extern const int N = 10;
extern const int MIN_NUM_MATCHES = 8;
extern const int minRequiredDetectedFeatures = 50; ///< Minimum number of detected features required to consider a target matched.
extern const int markerTemplateWidth = 15; ///< Width in pixels of image patches used in template matching.
extern const int maxLevel = 3; ///< Maximum number of levels in optical flow image pyramid.
extern const cv::Size winSize(31, 31);
Expand Down
117 changes: 113 additions & 4 deletions WebARKit/WebARKitTrackers/WebARKitOpticalTracking/WebARKitTracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,8 @@ class WebARKitTracker::WebARKitTrackerImpl {

cv::Mat getPoseMatrix() { return _patternTrackingInfo.pose3d; };

float* getPoseMatrix2() { return (float*)_patternTrackingInfo.transMat; }

cv::Mat getGLViewMatrix() { return _patternTrackingInfo.glViewMatrix; };

std::array<double, 16> getCameraProjectionMatrix() { return m_cameraProjectionMatrix; };
Expand Down Expand Up @@ -346,7 +348,8 @@ class WebARKitTracker::WebARKitTrackerImpl {
// return false;
};
if (!_isDetected) {
std::vector<cv::Point2f> refPts;
MatchFeatures(frameKeyPts, frameDescr);
/*std::vector<cv::Point2f> refPts;
getMatches(frameDescr, frameKeyPts, refPts, framePts);
numMatches = framePts.size();
Expand All @@ -366,7 +369,7 @@ class WebARKitTracker::WebARKitTrackerImpl {
_trackSelection.SetHomography(homographyInfo.homography);
perspectiveTransform(_bBox, _bBoxTransformed, homographyInfo.homography);
}
}
}*/
}
int i = 0;
if (_isDetected) {
Expand Down Expand Up @@ -396,7 +399,12 @@ class WebARKitTracker::WebARKitTrackerImpl {

if (_isDetected || _isTracking) {
// std::vector<cv::Point2f> warpedCorners = _trackSelection.GetTrackedFeaturesWarped();
cv::Mat _pose;
std::vector<cv::Point2f> imgPoints = _trackSelection.GetTrackedFeaturesWarped();
std::vector<cv::Point3f> objPoints = _trackSelection.GetTrackedFeatures3d();
_patternTrackingInfo.cameraPoseFromPoints(_pose, objPoints, imgPoints, m_camMatrix, m_distortionCoeff);
// _patternTrackingInfo.computePose(_pattern.points3d, warpedCorners, m_camMatrix, m_distortionCoeff);
_patternTrackingInfo.getTrackablePose(_pose);
}

WEBARKIT_LOG("Marker detected : %s\n", _isDetected ? "true" : "false");
Expand Down Expand Up @@ -435,12 +443,105 @@ class WebARKitTracker::WebARKitTrackerImpl {
output[16] = warped[3].y;
};

void fill_output2(cv::Mat& H) {
output[0] = H.at<double>(0, 0);
output[1] = H.at<double>(0, 1);
output[2] = H.at<double>(0, 2);
output[3] = H.at<double>(1, 0);
output[4] = H.at<double>(1, 1);
output[5] = H.at<double>(1, 2);
output[6] = H.at<double>(2, 0);
output[7] = H.at<double>(2, 1);
output[8] = H.at<double>(2, 2);

output[9] = _bBoxTransformed[0].x;
output[10] = _bBoxTransformed[0].y;
output[11] = _bBoxTransformed[1].x;
output[12] = _bBoxTransformed[1].y;
output[13] = _bBoxTransformed[2].x;
output[14] = _bBoxTransformed[2].y;
output[15] = _bBoxTransformed[3].x;
output[16] = _bBoxTransformed[3].y;
};

void clear_output() { output = std::vector<double>(17, 0.0); };

void buildImagePyramid(cv::Mat& frame) { cv::buildOpticalFlowPyramid(frame, _pyramid, winSize, maxLevel); }

void swapImagePyramid() { _pyramid.swap(_prevPyramid); }

void MatchFeatures(const std::vector<cv::KeyPoint>& newFrameFeatures, cv::Mat newFrameDescriptors)
{
int maxMatches = 0;
int bestMatchIndex = -1;
std::vector<cv::KeyPoint> finalMatched1, finalMatched2;
//for (int i = 0; i < _trackables.size(); i++) {
if (!_isDetected) {
std::vector< std::vector<cv::DMatch> > matches = getMatches(newFrameDescriptors);
numMatches = matches.size();
WEBARKIT_LOG("Num Matches: %d\n", numMatches);

if (matches.size() > minRequiredDetectedFeatures) {
std::vector<cv::KeyPoint> matched1, matched2;
std::vector<uchar> status;
int totalGoodMatches = 0;
for (unsigned int j = 0; j < matches.size(); j++) {
// Ratio Test for outlier removal, removes ambiguous matches.
if (matches[j][0].distance < _nn_match_ratio * matches[j][1].distance) {
matched1.push_back(newFrameFeatures[matches[j][0].queryIdx]);
matched2.push_back(refKeyPts[matches[j][0].trainIdx]);
status.push_back(1);
totalGoodMatches++;
} else {
status.push_back(0);
}
}
// Measure goodness of match by most number of matching features.
// This allows for maximum of a single marker to match each time.
// TODO: Would a better metric be percentage of marker features matching?
if (totalGoodMatches > maxMatches) {
finalMatched1 = matched1;
finalMatched2 = matched2;
maxMatches = totalGoodMatches;
//bestMatchIndex = i;
}
}
}
//} end for cycle

if (maxMatches > 0) {
/*for (int i = 0; i < finalMatched1.size(); i++) {
finalMatched1[i].pt.x *= _featureDetectScaleFactor[0];
finalMatched1[i].pt.y *= _featureDetectScaleFactor[1];
}*/

homography::WebARKitHomographyInfo homoInfo = getHomographyInliers(Points(finalMatched2), Points(finalMatched1));
if (homoInfo.validHomography) {
//std::cout << "New marker detected" << std::endl;
//_trackables[bestMatchIndex]._isDetected = true;
_isDetected = true;
// Since we've just detected the marker, make sure next invocation of
// GetInitialFeatures() for this marker makes a new selection.
//_trackables[bestMatchIndex]._trackSelection.ResetSelection();
_trackSelection.ResetSelection();
//_trackables[bestMatchIndex]._trackSelection.SetHomography(homoInfo.homography);
_trackSelection.SetHomography(homoInfo.homography);

// Use the homography to form the initial estimate of the bounding box.
// This will be refined by the optical flow pass.
//perspectiveTransform(_trackables[bestMatchIndex]._bBox, _trackables[bestMatchIndex]._bBoxTransformed, homoInfo.homography);
perspectiveTransform(_bBox, _bBoxTransformed, homoInfo.homography);
/*if (_trackVizActive) {
for (int i = 0; i < 4; i++) {
_trackViz.bounds[i][0] = _trackables[bestMatchIndex]._bBoxTransformed[i].x;
_trackViz.bounds[i][1] = _trackables[bestMatchIndex]._bBoxTransformed[i].y;
}
}*/
//_currentlyTrackedMarkers++;
}
}
}

bool runOpticalFlow(int trackableId, const std::vector<cv::Point2f>& trackablePoints,
const std::vector<cv::Point2f>& trackablePointsWarped) {
std::vector<cv::Point2f> flowResultPoints, trackablePointsWarpedResult;
Expand Down Expand Up @@ -493,8 +594,8 @@ class WebARKitTracker::WebARKitTrackerImpl {
m_H = homoInfo.homography;
this->_valid = homoInfo.validHomography;
// Update the bounding box.
// perspectiveTransform(_bBox, _bBoxTransformed, homoInfo.homography);
fill_output(m_H);
perspectiveTransform(_bBox, _bBoxTransformed, homoInfo.homography);
fill_output2(m_H);
/*if (_trackVizActive) {
for (int i = 0; i < 4; i++) {
_trackViz.bounds[i][0] = _trackables[trackableId]._bBoxTransformed[i].x;
Expand Down Expand Up @@ -526,6 +627,12 @@ class WebARKitTracker::WebARKitTrackerImpl {
}
}

std::vector< std::vector<cv::DMatch> > getMatches(const cv::Mat& frameDescr) {
std::vector<std::vector<cv::DMatch>> knnMatches;
_matcher->knnMatch(frameDescr, refDescr, knnMatches, 2);
return knnMatches;
}

cv::Mat createTrackerFeatureMask(cv::Mat& frame) {
cv::Mat featureMask;
if (featureMask.empty()) {
Expand Down Expand Up @@ -666,6 +773,8 @@ std::vector<double> WebARKitTracker::getOutputData() { return _trackerImpl->getO

cv::Mat WebARKitTracker::getPoseMatrix() { return _trackerImpl->getPoseMatrix(); }

float* WebARKitTracker::getPoseMatrix2() { return _trackerImpl->getPoseMatrix2(); }

cv::Mat WebARKitTracker::getGLViewMatrix() { return _trackerImpl->getGLViewMatrix(); }

std::array<double, 16> WebARKitTracker::getCameraProjectionMatrix() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ extern const int DEFAULT_MAX_FEATURES;
extern const int TEBLID_MAX_FEATURES;
extern const int N;
extern const int MIN_NUM_MATCHES;
extern const int minRequiredDetectedFeatures; ///< Minimum number of detected features required to consider a target matched.
extern const int markerTemplateWidth; ///< Width in pixels of image patches used in template matching.
extern const int maxLevel; ///< Maximum number of levels in optical flow image pyramid.
extern const cv::Size winSize;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ class WebARKitTracker {
std::vector<double> getOutputData();

cv::Mat getPoseMatrix();

float* getPoseMatrix2();

cv::Mat getGLViewMatrix();

Expand Down
2 changes: 2 additions & 0 deletions WebARKit/include/WebARKitManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ class WebARKitManager {

cv::Mat getPoseMatrix();

float* getPoseMatrix2();

cv::Mat getGLViewMatrix();

std::array<double, 16> getTransformationMatrix();
Expand Down
5 changes: 5 additions & 0 deletions WebARKit/include/WebARKitPattern.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,23 @@ class WebARKitPatternTrackingInfo {
cv::Mat homography;
std::vector<cv::Point2f> points2d;
cv::Mat pose3d;
float transMat [3][4];
cv::Mat glViewMatrix;

void setScale(const float scale) { m_scale = scale; }

float getScale() { return m_scale; }

void cameraPoseFromPoints(cv::Mat& pose, const std::vector<cv::Point3f>& objPts, const std::vector<cv::Point2f>& imgPts, const cv::Matx33f& caMatrix, const cv::Mat& distCoeffs);

/**
* Compute pattern pose using PnP algorithm
*/
void computePose(std::vector<cv::Point3f>& treeDPoints, std::vector<cv::Point2f>& imgPoints, const cv::Matx33f& caMatrix,
const cv::Mat& distCoeffs);

void getTrackablePose(cv::Mat& pose);

void computeGLviewMatrix();

private:
Expand Down

0 comments on commit 20623d0

Please sign in to comment.