-
Angel Santamaria-Navarro authoredAngel Santamaria-Navarro authored
gtest_trifocaltensor.cpp 19.41 KiB
#include "utils_gtest.h"
// vision utils includes
#include "../vision_utils.h"
#include "../detectors.h"
#include "../descriptors.h"
#include "../matchers.h"
#include "../common_class/trifocaltensor.h"
cv::Mat image1, image2, image3;
std::string vu_root = _VU_ROOT_DIR;
std::string filename = vu_root + "/src/test/data/img1.jpg";
std::string filename2 = vu_root + "/src/test/data/img2.jpg";
std::string filename3 = vu_root + "/src/test/data/img3.jpg";
std::map<int, cv::Point2f> TestDetectorROI(const cv::Mat& _image, vision_utils::DetectorBasePtr _det_ptr, const std::string& _det_name, const PointVector& _points_to_check)
{
std::map<int, cv::Point2f> points_found;
_det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(_det_ptr);
if(_det_ptr==NULL)
return points_found;
unsigned int img_width = _image.cols;
std::vector<cv::KeyPoint> target_keypoints;
int roi_x;
int roi_y;
int roi_width = 50;
int roi_heigth = 50;
Eigen::VectorXi roi_center_y(3); roi_center_y << 102 , 250 , 476;
for (int ii = 0; ii<roi_center_y.size() ; ii++)
{
roi_y = roi_center_y(ii) - roi_width/2;
for(roi_x = 0; roi_x < img_width; roi_x += 5)
{
cv::Rect roi(roi_x, roi_y, roi_width, roi_heigth);
cv::Rect roi_inflated = roi;
// Detect features in ROI
target_keypoints = _det_ptr->detect(_image, roi_inflated);
// Keep only one KP in ROI
if (!target_keypoints.empty())
{
vision_utils::retainBest(target_keypoints,1);
cv::Point2f pnt = target_keypoints[0].pt;
int j = vision_utils::existsInPVec(pnt, _points_to_check, 2.0);
// append the keypoint to the list of keypoints found
if (j >= 0)
{
points_found[j] = pnt;
std::cout << points_found[j] << std::endl;
}
else
return points_found;
}
#ifdef _VU_DEBUG
cv::Mat image_graphics = _image.clone();
cv::drawKeypoints(image_graphics,target_keypoints,image_graphics);
cv::rectangle(image_graphics, roi, cv::Scalar(255.0, 0.0, 255.0), 1, 8, 0);
cv::rectangle(image_graphics, roi_inflated, cv::Scalar(255.0, 255.0, 0.0), 1, 8, 0);
cv::imshow("test",image_graphics);
cv::waitKey(1);
#endif
}
}
return points_found;
}
TEST(TrifocalTensor, LoadImageFromFile)
{
image1 = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename << std::endl;
image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image2.data)<< "Failed to load image " << filename2 << std::endl;
image3 = cv::imread(filename3, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image3.data)<< "Failed to load image " << filename3 << std::endl;
}
TEST(TrifocalTensor, DetectorDescriptorMatcher)
{
image1 = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "failed to load image " << filename << std::endl;
image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image2.data)<< "failed to load image " << filename2 << std::endl;
image3 = cv::imread(filename3, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image3.data)<< "failed to load image " << filename3 << std::endl;
// Define detector
vision_utils::DetectorParamsORBPtr params = std::make_shared<vision_utils::DetectorParamsORB>();
std::string det_name = "ORB";
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
vision_utils::DescriptorParamsORBPtr des_params = std::make_shared<vision_utils::DescriptorParamsORB>();
std::string des_name = "ORB";
vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor(des_name, des_name + " detector", des_params);
vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Image 3
KeyPointVector kpts3 = det_ptr->detect(image3);
cv::Mat desc3 = des_ptr->getDescriptor(image3,kpts3);
ASSERT_TRUE(kpts3.size() == desc3.rows);
// Define matcher
std::string matcher_name = "BRUTEFORCE";
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>(matcher_name);
vision_utils::MatcherBasePtr mat_ptr;
// MATCH type
params_ptr->match_type = vision_utils::MATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
params_ptr = mat_ptr->getParams();
ASSERT_EQ(params_ptr->match_type,1);
// match 1-2
DMatchVector matches12;
mat_ptr->match(desc1,desc2,matches12);
ASSERT_EQ(matches12.size(),193);
// filter
for (int ii = 0; ii < matches12.size(); ++ii )
if(matches12[ii].distance > 200)
matches12.erase(matches12.begin()+ii-1);
ASSERT_EQ(matches12.size(),193);
// RANSAC
PointVector trainMatches;
PointVector queryMatches;
for( int ii = 0; ii < matches12.size(); ++ii )
{
trainMatches.push_back( kpts2[matches12[ii].trainIdx].pt);
queryMatches.push_back( kpts1[matches12[ii].queryIdx].pt);
}
std::vector<uchar> status;
cv::Mat h = cv::findHomography(trainMatches,queryMatches,CV_RANSAC,1.0, status);
DMatchVector best_matches12;
KeyPointVector kpts_matched12_img1;
KeyPointVector kpts_matched12_img2;
for(size_t ii = 0; ii < queryMatches.size(); ++ii)
{
if(status.at(ii) != 0)
{
best_matches12.push_back(matches12[ii]);
kpts_matched12_img1.push_back(cv::KeyPoint(queryMatches.at(ii),CV_32F));
kpts_matched12_img2.push_back(cv::KeyPoint(trainMatches.at(ii),CV_32F));
}
}
// match 2-3
DMatchVector matches23;
mat_ptr->match(desc2,desc3,matches23);
ASSERT_EQ(matches23.size(),190);
// filter
for (int ii = 0; ii < matches23.size(); ++ii )
if(matches23[ii].distance > 200)
matches23.erase(matches23.begin()+ii-1);
ASSERT_EQ(matches23.size(),190);
// RANSAC
trainMatches.clear();
queryMatches.clear();
for( int ii = 0; ii < matches23.size(); ++ii )
{
trainMatches.push_back( kpts3[matches23[ii].trainIdx].pt);
queryMatches.push_back( kpts2[matches23[ii].queryIdx].pt);
}
status.clear();
h = cv::findHomography(trainMatches,queryMatches,CV_RANSAC,1.0, status);
DMatchVector best_matches23;
KeyPointVector kpts_matched23_img2;
KeyPointVector kpts_matched23_img3;
for(size_t ii = 0; ii < queryMatches.size(); ++ii)
{
if(status.at(ii) != 0)
{
best_matches23.push_back(matches12[ii]);
kpts_matched23_img2.push_back(cv::KeyPoint(queryMatches.at(ii),CV_32F));
kpts_matched23_img3.push_back(cv::KeyPoint(trainMatches.at(ii),CV_32F));
}
}
#ifdef _VU_DEBUG
cv::Mat img_graphics = mat_ptr->drawMatches(image1, image2, image3,kpts_matched12_img1, kpts_matched12_img2, kpts_matched23_img2, kpts_matched23_img3);
cv::imshow("test",img_graphics);
cv::waitKey(5000);
#endif
// get only those matches existing in all 3 images
KeyPointVector kpts_good_in1;
KeyPointVector kpts_good_in2;
KeyPointVector kpts_good_in3;
vision_utils::getCommonMatches(best_matches12, best_matches23, kpts_matched12_img1, kpts_matched12_img2, kpts_matched23_img2, kpts_matched23_img3, kpts_good_in1, kpts_good_in2, kpts_good_in3);
// ASSERT_EQ(kpts_good_in1.size(), 22);
#ifdef _VU_DEBUG
cv::Mat img_graphics2 = mat_ptr->drawMatches(image1, image2, image3, kpts_good_in1, kpts_good_in2, kpts_good_in3);
cv::imshow("test1",img_graphics2);
cv::waitKey(5000);
#endif
}
TEST(TrifocalTensor, ComputeTensorSynthetic)
{
Eigen::MatrixXd list1(9,3);
Eigen::MatrixXd list2(9,3);
Eigen::MatrixXd list3(9,3);
list1.row(0) << 425.2371, 370.9823, 1.0;
list1.row(1) << 411.0910, 360.9023, 1.0;
list1.row(2) << 393.7641, 345.2446, 1.0;
list1.row(3) << 375.7297, 330.8695, 1.0;
list1.row(4) << 357.0081, 319.4906, 1.0;
list1.row(5) << 332.6585, 294.8769, 1.0;
list1.row(6) << 310.0062, 288.2585, 1.0;
list1.row(7) << 280.7571, 268.7615, 1.0;
list1.row(8) << 246.3107, 244.4621, 1.0;
list2.row(0) << 491.4733, 397.7847, 1.0;
list2.row(1) << 468.9708, 383.5625, 1.0;
list2.row(2) << 443.2443, 362.9503, 1.0;
list2.row(3) << 416.8286, 344.3626, 1.0;
list2.row(4) << 389.3720, 329.6092, 1.0;
list2.row(5) << 357.7888, 300.7258, 1.0;
list2.row(6) << 325.3252, 292.0294, 1.0;
list2.row(7) << 288.2056, 270.1776, 1.0;
list2.row(8) << 247.2391, 244.5418, 1.0;
list3.row(0) << 580.1551, 446.9013, 1.0;
list3.row(1) << 542.1363, 423.6365, 1.0;
list3.row(2) << 501.8491, 393.2496, 1.0;
list3.row(3) << 462.1800, 366.7074, 1.0;
list3.row(4) << 422.2158, 345.7112, 1.0;
list3.row(5) << 381.3637, 310.0847, 1.0;
list3.row(6) << 337.6389, 297.6254, 1.0;
list3.row(7) << 293.1407, 272.2456, 1.0;
list3.row(8) << 247.6911, 244.6778, 1.0;
// Tensor parameters
int max_iterations = 100;
double max_error_reprojection = 1.0;
double percentage_of_correct_reprojected_points = 1.0;
vision_utils::TrifocalTensor tensor(max_iterations,max_error_reprojection,percentage_of_correct_reprojected_points);
bool tensor_ok = tensor.computeTensor(list1, list2, list3);
ASSERT_TRUE(tensor_ok);
double percentage_correct = tensor.getErrorTrifocal(list1, list2, list3);
ASSERT_TRUE(percentage_correct>=percentage_of_correct_reprojected_points);
}
TEST(TrifocalTensor, ComputeTensorSyntheticWNoise)
{
Eigen::MatrixXd list1(9,3);
Eigen::MatrixXd list2(9,3);
Eigen::MatrixXd list3(9,3);
list1.row(0) << 425.2371, 370.9823, 1.0;
list1.row(1) << 411.0910, 360.9023, 1.0;
list1.row(2) << 393.7641, 345.2446, 1.0;
list1.row(3) << 375.7297, 330.8695, 1.0;
list1.row(4) << 357.0081, 319.4906, 1.0;
list1.row(5) << 332.6585, 294.8769, 1.0;
list1.row(6) << 310.0062, 288.2585, 1.0;
list1.row(7) << 280.7571, 268.7615, 1.0;
list1.row(8) << 246.3107, 244.4621, 1.0;
list2.row(0) << 491.4733, 397.7847, 1.0;
list2.row(1) << 468.9708, 383.5625, 1.0;
list2.row(2) << 443.2443, 362.9503, 1.0;
list2.row(3) << 416.8286, 344.3626, 1.0;
list2.row(4) << 389.3720, 329.6092, 1.0;
list2.row(5) << 357.7888, 300.7258, 1.0;
list2.row(6) << 325.3252, 292.0294, 1.0;
list2.row(7) << 288.2056, 270.1776, 1.0;
list2.row(8) << 247.2391, 244.5418, 1.0;
list3.row(0) << 580.1551, 446.9013, 1.0;
list3.row(1) << 542.1363, 423.6365, 1.0;
list3.row(2) << 501.8491, 393.2496, 1.0;
list3.row(3) << 462.1800, 366.7074, 1.0;
list3.row(4) << 422.2158, 345.7112, 1.0;
list3.row(5) << 381.3637, 310.0847, 1.0;
list3.row(6) << 337.6389, 297.6254, 1.0;
list3.row(7) << 293.1407, 272.2456, 1.0;
list3.row(8) << 247.6911, 244.6778, 1.0;
std::srand((unsigned int) time(0));
Eigen::MatrixXd Noise1 = Eigen::MatrixXd::Random(9,2);
Eigen::MatrixXd Noise2 = Eigen::MatrixXd::Random(9,2);
Eigen::MatrixXd Noise3 = Eigen::MatrixXd::Random(9,2);
// Evaluation (without noise)
Eigen::MatrixXd l1_eval(list1), l2_eval(list2), l3_eval(list3);
vision_utils::normalizePointsIsotrop(list1,l1_eval);
vision_utils::normalizePointsIsotrop(list2,l2_eval);
vision_utils::normalizePointsIsotrop(list3,l3_eval);
// Tensor computation (with noise)
list1.block(0,0,9,2) = list1.block(0,0,9,2) + Noise1;
list2.block(0,0,9,2) = list2.block(0,0,9,2) + Noise2;
list3.block(0,0,9,2) = list3.block(0,0,9,2) + Noise3;
Eigen::MatrixXd l1_noise(list1), l2_noise(list2), l3_noise(list3);
vision_utils::normalizePointsIsotrop(list1,l1_noise);
vision_utils::normalizePointsIsotrop(list2,l2_noise);
vision_utils::normalizePointsIsotrop(list3,l3_noise);
// Tensor parameters
int max_iterations = 100;
double max_error_reprojection = 1.0;
double percentage_of_correct_reprojected_points = 1;
vision_utils::TrifocalTensor tensor(max_iterations,max_error_reprojection,percentage_of_correct_reprojected_points);
bool tensor_ok = tensor.computeTensor(l1_noise, l2_noise, l3_noise);
ASSERT_TRUE(tensor_ok);
double percentage_correct = tensor.getErrorTrifocal(l1_eval, l2_eval, l3_eval);
ASSERT_TRUE(percentage_correct>=percentage_of_correct_reprojected_points);
}
TEST(TrifocalTensor, ComputeTensorReal)
{
image1 = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "failed to load image " << filename << std::endl;
image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image2.data)<< "failed to load image " << filename2 << std::endl;
image3 = cv::imread(filename3, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image3.data)<< "failed to load image " << filename3 << std::endl;
// Define detector
vision_utils::DetectorParamsORBPtr params = std::make_shared<vision_utils::DetectorParamsORB>();
std::string det_name = "ORB";
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
vision_utils::DescriptorParamsORBPtr des_params = std::make_shared<vision_utils::DescriptorParamsORB>();
std::string des_name = "ORB";
vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor(des_name, des_name + " detector", des_params);
vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Image 3
KeyPointVector kpts3 = det_ptr->detect(image3);
cv::Mat desc3 = des_ptr->getDescriptor(image3,kpts3);
ASSERT_TRUE(kpts3.size() == desc3.rows);
// Define matcher
std::string matcher_name = "BRUTEFORCE";
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>(matcher_name);
vision_utils::MatcherBasePtr mat_ptr;
// MATCH type
params_ptr->match_type = vision_utils::MATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
params_ptr = mat_ptr->getParams();
ASSERT_EQ(params_ptr->match_type,1);
// match 1-2
DMatchVector matches12;
mat_ptr->match(desc1,desc2,matches12);
ASSERT_EQ(matches12.size(),193);
// filter
for (int ii = 0; ii < matches12.size(); ++ii )
if(matches12[ii].distance > 200)
matches12.erase(matches12.begin()+ii-1);
// RANSAC
PointVector trainMatches;
PointVector queryMatches;
for( int ii = 0; ii < matches12.size(); ++ii )
{
trainMatches.push_back( kpts2[matches12[ii].trainIdx].pt);
queryMatches.push_back( kpts1[matches12[ii].queryIdx].pt);
}
std::vector<uchar> status;
cv::Mat h = cv::findHomography(trainMatches,queryMatches,CV_RANSAC,1.0, status);
DMatchVector best_matches12;
KeyPointVector kpts_matched12_img1;
KeyPointVector kpts_matched12_img2;
for(size_t ii = 0; ii < queryMatches.size(); ++ii)
{
if(status.at(ii) != 0)
{
best_matches12.push_back(matches12[ii]);
kpts_matched12_img1.push_back(cv::KeyPoint(queryMatches.at(ii),CV_32F));
kpts_matched12_img2.push_back(cv::KeyPoint(trainMatches.at(ii),CV_32F));
}
}
// match 2-3
DMatchVector matches23;
mat_ptr->match(desc2,desc3,matches23);
ASSERT_EQ(matches23.size(),190);
// filter
for (int ii = 0; ii < matches23.size(); ++ii )
if(matches23[ii].distance > 200)
matches23.erase(matches23.begin()+ii-1);
// RANSAC
trainMatches.clear();
queryMatches.clear();
for( int ii = 0; ii < matches23.size(); ++ii )
{
trainMatches.push_back( kpts3[matches23[ii].trainIdx].pt);
queryMatches.push_back( kpts2[matches23[ii].queryIdx].pt);
}
status.clear();
h = cv::findHomography(trainMatches,queryMatches,CV_RANSAC,1.0, status);
DMatchVector best_matches23;
KeyPointVector kpts_matched23_img2;
KeyPointVector kpts_matched23_img3;
for(size_t ii = 0; ii < queryMatches.size(); ++ii)
{
if(status.at(ii) != 0)
{
best_matches23.push_back(matches23[ii]);
kpts_matched23_img2.push_back(cv::KeyPoint(queryMatches.at(ii),CV_32F));
kpts_matched23_img3.push_back(cv::KeyPoint(trainMatches.at(ii),CV_32F));
}
}
// get only those matches existing in all 3 images
KeyPointVector kpts_good_in1;
KeyPointVector kpts_good_in2;
KeyPointVector kpts_good_in3;
std::vector<int> distances = vision_utils::getCommonMatches(best_matches12, best_matches23, kpts_matched12_img1, kpts_matched12_img2, kpts_matched23_img2, kpts_matched23_img3, kpts_good_in1, kpts_good_in2, kpts_good_in3);
ASSERT_EQ(kpts_good_in1.size(), 43);
#ifdef _VU_DEBUG
cv::Mat img_graphics2 = mat_ptr->drawMatches(image1, image2, image3, kpts_good_in1, kpts_good_in2, kpts_good_in3);
cv::imshow("test1",img_graphics2);
#endif
// Compute tensor
Eigen::MatrixXd list1 = vision_utils::KeyPointVecToEigenMat(kpts_good_in1);
Eigen::MatrixXd list2 = vision_utils::KeyPointVecToEigenMat(kpts_good_in2);
Eigen::MatrixXd list3 = vision_utils::KeyPointVecToEigenMat(kpts_good_in3);
// Tensor with parameters
double max_error_reprojection = 1.0;
vision_utils::TrifocalTensor tensor(1000,max_error_reprojection,0.75);
// bool tensor_ok = tensor.computeTensorRansac(list1, list2, list3);
bool tensor_ok = tensor.computeTensor(list1, list2, list3);
// bool tensor_ok = tensor.computeTensorFactorized(list1, list2, list3);
ASSERT_TRUE(tensor_ok);
#ifdef _VU_DEBUG
cv::Mat img_tmp;
cvtColor(image3, img_tmp, cv::COLOR_GRAY2RGB);
#endif
double count_good = 0.0;
for (int ii=0;ii<list1.rows(); ++ii)
{
Eigen::Vector3d p1,p2,p3,p3est;
p1 << list1(ii,0),list1(ii,1),list1(ii,2);
p2 << list2(ii,0),list2(ii,1),list2(ii,2);
p3 << list3(ii,0),list3(ii,1),list3(ii,2);
tensor.transfer(p1, p2, p3est);
p3est = p3est/p3est(2);
Eigen::Vector2d err_tmp(p3est(0)-list3(ii,0),p3est(1)-list3(ii,1));
if (err_tmp.norm()<=std::pow(max_error_reprojection,2))
++count_good;
#ifdef _VU_DEBUG
if (err_tmp.norm()<=std::pow(max_error_reprojection,2))
cv::circle(img_tmp,cv::Point(p3est(0),p3est(1)),5,cv::Scalar(0,255,0),-1);
else
{
cv::circle(img_tmp,cv::Point(list3(ii,0),list3(ii,1)),5,cv::Scalar(0,0,255),-1);
cv::circle(img_tmp,cv::Point(p3est(0),p3est(1)),5,cv::Scalar(255,0,0),2);
}
#endif
}
ASSERT_TRUE(count_good/list1.rows()>0.9);
#ifdef _VU_DEBUG
cv::imshow("test2",img_tmp);
cv::waitKey(10000);
#endif
}
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}